This is a simple, but helpful testing script to help you quickly test and validate your AWS bedrock + KB setup. Just update your AWS region if different, and plug in your Bedrock KB ID.
import boto3
import json
import time
from datetime import datetime
def test_kb_setup():
"""Test function to verify Bedrock Knowledge Base setup and queries"""
# Initialize clients
bedrock_agent = boto3.client('bedrock-agent-runtime', region_name='us-east-1')
bedrock_runtime = boto3.client('bedrock-runtime', region_name='us-east-1')
# Your Knowledge Base ID
kb_id = "**your-knowledge-base-id**" # Replace with your actual KB ID
def test_kb_query(query_text):
"""Test a single knowledge base query"""
print(f"\nTesting query: '{query_text}'")
print("-" * 50)
try:
# Query the knowledge base
response = bedrock_agent.retrieve(
knowledgeBaseId=kb_id,
retrievalQuery={'text': query_text},
retrievalConfiguration={
'vectorSearchConfiguration': {
'numberOfResults': 3
}
}
)
# Print raw response for debugging
print("\nRaw Response:")
print(json.dumps(response, indent=2, default=str))
# Process and print retrieved results
print("\nProcessed Results:")
if 'retrievalResults' in response:
for i, result in enumerate(response['retrievalResults'], 1):
print(f"\nResult {i}:")
print(f"Score: {result.get('score', 'N/A')}")
print(f"Content: {result.get('content', {}).get('text', 'N/A')}")
print(f"Location: {result.get('location', 'N/A')}")
else:
print("No results found in response")
return True
except Exception as e:
print(f"Error during query: {str(e)}")
return False
def test_kb_with_bedrock(query_text):
"""Test knowledge base integration with Bedrock"""
print(f"\nTesting KB + Bedrock integration for: '{query_text}'")
print("-" * 50)
try:
# First get KB results
kb_response = bedrock_agent.retrieve(
knowledgeBaseId=kb_id,
retrievalQuery={'text': query_text},
retrievalConfiguration={
'vectorSearchConfiguration': {
'numberOfResults': 3
}
}
)
# Format context from KB results
context = ""
if 'retrievalResults' in kb_response:
context = "\n".join([
f"Reference {i+1}:\n{result.get('content', {}).get('text', '')}\n"
for i, result in enumerate(kb_response['retrievalResults'])
])
# Prepare Bedrock prompt
enhanced_prompt = (
f"Using the following references:\n\n{context}\n\n"
f"Please answer this question: {query_text}\n"
"Base your response on the provided references and clearly cite them when used."
)
# Get Bedrock response
bedrock_response = bedrock_runtime.invoke_model(
modelId="anthropic.claude-v2",
body=json.dumps({
"prompt": f"\n\nHuman: {enhanced_prompt}\n\nAssistant:",
"max_tokens_to_sample": 500,
"temperature": 0.7,
"top_p": 1,
}),
contentType="application/json",
accept="application/json",
)
response_body = json.loads(bedrock_response.get('body').read())
final_response = response_body.get('completion', '').strip()
print("\nBedrock Response:")
print(final_response)
return True
except Exception as e:
print(f"Error during KB + Bedrock integration: {str(e)}")
return False
# Run test queries
test_queries = [
"What are our company's remote work policies?",
"Tell me about employee benefits",
"What is the vacation policy?",
"How does the performance review process work?",
"What are the working hours?"
]
print("Starting Knowledge Base Tests")
print("=" * 50)
# Test 1: Basic KB Queries
print("\nTest 1: Basic Knowledge Base Queries")
for query in test_queries:
success = test_kb_query(query)
if not success:
print(f"Failed on query: {query}")
# Test 2: KB + Bedrock Integration
print("\nTest 2: Knowledge Base + Bedrock Integration")
for query in test_queries:
success = test_kb_with_bedrock(query)
if not success:
print(f"Failed on integration test: {query}")
if __name__ == "__main__":
test_kb_setup()