Source: https://github.com/aws-samples/amazon-bedrock-samples/tree/main
AWS official examples, with some good api call examples. https://github.com/aws-samples/amazon-bedrock-samples/tree/main
runtime_with_strands_and_bedrock_models.ipynb
%%writefile strands_claude.py
from strands import Agent, tool
from strands_tools import calculator # Import the calculator tool
import argparse
import json
from bedrock_agentcore.runtime import BedrockAgentCoreApp
from strands.models import BedrockModel
app = BedrockAgentCoreApp()
# Create a custom tool
@tool
def weather():
""" Get weather """ # Dummy implementation
return "sunny"
model_id = "us.anthropic.claude-3-7-sonnet-20250219-v1:0"
model = BedrockModel(
model_id=model_id,
)
agent = Agent(
model=model,
tools=[calculator, weather],
system_prompt="You're a helpful assistant. You can do simple math calculation, and tell the weather."
)
@app.entrypoint
def strands_agent_bedrock(payload):
"""
Invoke the agent with a payload
"""
user_input = payload.get("prompt")
print("User input:", user_input)
response = agent(user_input)
return response.message['content'][0]['text']
if __name__ == "__main__":
app.run()
What happens behind the scenes?
When you use BedrockAgentCoreApp, it automatically:
- Creates an HTTP server that listens on the port 8080
- Implements the required /invocations endpoint for processing the agent’s requirements
- Implements the /ping endpoint for health checks (very important for asynchronous agents)
- Handles proper content types and response formats
- Manages error handling according to the AWS standards
from bedrock_agentcore_starter_toolkit import Runtime
agentcore_runtime = Runtime()
# .configure() creating Dockerfile
agent_name = "strands_claude_getting_started"
response = agentcore_runtime.configure(
entrypoint="strands_claude.py",
auto_create_execution_role=True,
auto_create_ecr=True,
requirements_file="requirements.txt",
region=region,
agent_name=agent_name
)
launch_result = agentcore_runtime.launch()
invoke_response = agentcore_runtime.invoke({"prompt": "How is the weather now?"})
# For streaming responses, we need to handle the EventStream
boto3_response = agentcore_client.invoke_agent_runtime(
agentRuntimeArn=agent_arn,
qualifier="DEFAULT",
payload=json.dumps({"prompt": "How much is 2+1"})
)
# Check if the response is streaming
if "text/event-stream" in boto3_response.get("contentType", ""):
print("Processing streaming response with boto3:")
content = []
for line in boto3_response["response"].iter_lines(chunk_size=1):
if line:
line = line.decode("utf-8")
if line.startswith("data: "):
data = line[6:].replace('"', '') # Remove "data: " prefix
print(f"Received streaming chunk: {data}")
content.append(data.replace('"', ''))
# Display the complete streamed response
full_response = " ".join(content)
display(Markdown(full_response))
else:
# Handle non-streaming response
hosting_mcp_server_iam_auth.ipynb
mcp = FastMCP(host="0.0.0.0", stateless_http=True)
@mcp.tool()
def add_numbers(a: int, b: int) -> int:
"""Add two numbers together"""
return a + b
@mcp.tool()
def multiply_numbers(a: int, b: int) -> int:
"""Multiply two numbers together"""
return a * b
@mcp.tool()
def greet_user(name: str) -> str:
"""Greet a user by name"""
return f"Hello, {name}! Nice to meet you."
if __name__ == "__main__":
mcp.run(transport="streamable-http")
streamining_agent_response.ipynb
Server-Sent Events (SSE) Format
- The AgentCore SDK automatically converts your yielded data into SSE format
- Each yield becomes a data: event in the SSE stream
- The Content-Type is automatically set to text/event-stream
@app.entrypoint
async def strands_agent_bedrock_streaming(payload):
"""
Invoke the agent with streaming capabilities
This function demonstrates how to implement streaming responses
with AgentCore Runtime using async generators
"""
user_input = payload.get("prompt")
print("User input:", user_input)
try:
# Stream each chunk as it becomes available
async for event in agent.stream_async(user_input):
if "data" in event:
yield event["data"]
except Exception as e:
# ...
if __name__ == "__main__":
app.run()
handling_large_payloads.ipynb
# Create large payload
large_payload = {
"prompt": "Analyze the sales data from the Excel file and correlate it with the chart image. Provide insights on sales performance and trends.",
"excel_data": excel_base64,
"image_data": image_base64
}
agent = Agent(
model=model,
system_prompt="""
You are a data analysis assistant that can process large Excel files and images.
When given multi-modal data, analyze both the structured data and visual content,
then provide comprehensive insights combining both data sources.
"""
)
boto_session = Session()
region = boto_session.region_name
agentcore_runtime = Runtime()
response = agentcore_runtime.configure(
entrypoint="multimodal_data_agent.py",
auto_create_execution_role=True,
auto_create_ecr=True,
requirements_file="requirements.txt",
region=region,
agent_name="multimodal_data_agent"
)
launch_result = agentcore_runtime.launch()
invoke_response = agentcore_runtime.invoke(
large_payload,
session_id=session_id
)
@app.entrypoint
def multimodal_data_processor(payload, context):
prompt = payload.get("prompt", "Analyze the provided data.")
excel_data = payload.get("excel_data", "")
image_data = payload.get("image_data", "")
######
response = agent(
[{
"document": {
"format": "xlsx",
"name": "excel_data",
"source": {
"bytes": excel_bytes
}
}
},
{
"image": {
"format": "png",
"source": {
"bytes": image_bytes
}
}
},
{
"text": enhanced_prompt
}]
)
return response.message['content'][0]['text']
01-gateway-target-lambda.ipynb
import utils
cognito = boto3.client("cognito-idp", region_name=REGION)
user_pool_id = utils.get_or_create_user_pool(cognito, USER_POOL_NAME)
utils.get_or_create_resource_server(cognito, user_pool_id, RESOURCE_SERVER_ID, RESOURCE_SERVER_NAME, SCOPES)
create_response = gateway_client.create_gateway(name='TestGWforLambda',
roleArn = agentcore_gateway_iam_role['Role']['Arn'], # The IAM Role must have permissions to create/list/get/delete Gateway
protocolType='MCP',
authorizerType='CUSTOM_JWT',
authorizerConfiguration=auth_config,
description='AgentCore Gateway with AWS Lambda target type'
)
# Create an AWS Lambda target and transform into MCP tools
lambda_target_config = {
"mcp": {
"lambda": {
"lambdaArn": lambda_resp['lambda_function_arn'], # Replace this with your AWS Lambda function ARN
"toolSchema": {
"inlinePayload": [
{
"name": "get_order_tool",
"description": "tool to get the order",
"inputSchema": {
"type": "object",
"properties": {
"orderId": {
"type": "string"
}
},
"required": ["orderId"]
}
},
{
"name": "update_order_tool",
"description": "tool to update the orderId",
"inputSchema": {
"type": "object",
"properties": {
"orderId": {
"type": "string"
}
},
"required": ["orderId"]
}
}
]
}
}
}
}
response = gateway_client.create_gateway_target(
gatewayIdentifier=gatewayID,
name=targetname,
description='Lambda Target using SDK',
targetConfiguration=lambda_target_config,
credentialProviderConfigurations=credential_config)
01-openapis-into-mcp-api-key.ipynb
https://github.com/awslabs/amazon-bedrock-agentcore-samples/blob/main/01-tutorials/02-AgentCore-gateway/02-transform-apis-into-mcp-tools/01-transform-openapi-into-mcp-tools/openapi-specs/nasa_mars_insights_openapi.json
# Create an OpenAPI target
file_path = 'openapi-specs/nasa_mars_insights_openapi.json'
nasa_openapi_s3_target_config = {
"mcp": {
"openApiSchema": {
"s3": {
"uri": openapi_s3_uri
}
}
}
}
gateway_client = boto3.client('bedrock-agentcore-control', region_name = os.environ['AWS_DEFAULT_REGION'])
response = gateway_client.create_gateway_target(
gatewayIdentifier=gatewayID,
name=targetname,
description='OpenAPI Target with S3Uri using SDK',
targetConfiguration=nasa_openapi_s3_target_config,
credentialProviderConfigurations=api_key_credential_config)
# Smithy apis
smithy_s3_target_config = {
"mcp": {
"smithyModel": {
"s3": {
"uri": smithy_s3_uri
}
}
}
}
01-gateway-search.ipynb
#### Create a sample AWS Lambda function that you want to convert into MCP tools
restaurant_lambda_resp = utils.create_gateway_lambda(
"restaurant/lambda_function_code.zip",
lambda_function_name="restaurant_lambda_gateway",
)
# Enable semantic search of tools
search_config = {
"mcp": {"searchType": "SEMANTIC", "supportedVersions": ["2025-03-26"]}
}
# Create the gateway
response = agentcore_client.create_gateway(
name=gateway_name,
roleArn=gateway_role_arn,
authorizerType="CUSTOM_JWT",
description=gateway_desc,
protocolType="MCP",
authorizerConfiguration=auth_config,
protocolConfiguration=search_config,
)
# Add a Lambda target to the gateway
response = agentcore_client.create_gateway_target(
gatewayIdentifier=gateway_id,
name=target_name,
description=target_descr,
targetConfiguration={
"mcp": {
"lambda": {
"lambdaArn": lambda_arn,
"toolSchema": {"inlinePayload": api_spec},
}
}
},
# Use IAM as credential provider
credentialProviderConfigurations=[
{"credentialProviderType": "GATEWAY_IAM_ROLE"}
],
)
# get_all_agent_tools_from_mcp_endpoint
requestBody = {"jsonrpc": "2.0", "id": 2, "method": "tools/list", "params": {}}
headers = {
"Authorization": f"Bearer {jwt_token}",
"Content-Type": "application/json",
}
response = requests.post(gateway_endpoint, json=requestBody, headers=headers)
# built-in Gateway semantic search tool
def tool_search(gateway_endpoint, jwt_token, query):
toolParams = {
"name": "x_amz_bedrock_agentcore_search",
"arguments": {"query": query},
}
toolResp = invoke_gateway_tool(
gateway_endpoint=gateway_endpoint, jwt_token=jwt_token, tool_params=toolParams
)
tools = toolResp["result"]["structuredContent"]["tools"]
return tools
tools_found = tool_search(
gateway_endpoint=gatewayEndpoint,
jwt_token=jwtToken,
query="find me 3 credit research tools",
)
def tools_to_strands_mcp_tools(tools, top_n):
strands_mcp_tools = []
for tool in tools[:top_n]:
mcp_tool = MCPTool(
name=tool["name"],
description=tool["description"],
inputSchema=tool["inputSchema"],
)
strands_mcp_tools.append(MCPAgentTool(mcp_tool, client))
return strands_mcp_tools
# search for tools
simple_agent = Agent(
model=bedrockmodel,
tools=[get_search_tool(client)],
callback_handler=null_callback_handler,
)
direct_result = simple_agent.tool.x_amz_bedrock_agentcore_search(
query="find equity trading tools"
)
# search for tools, advanced
tools_found = tool_search(
gateway_endpoint=gatewayEndpoint,
jwt_token=jwtToken,
query="tools for multiplying two numbers",
)
agent = Agent(model=bedrockmodel, tools=tools_to_strands_mcp_tools(tools_found, 1))
result = agent("10 * 70")
inbound_auth_runtime_with_strands_and_bedrock_models.ipynb
from bedrock_agentcore_starter_toolkit import Runtime
from boto3.session import Session
boto_session = Session()
region = boto_session.region_name
discovery_url = cognito_config.get("discovery_url")
client_id = cognito_config.get("client_id")
agentcore_runtime = Runtime()
response = agentcore_runtime.configure(
entrypoint="strands_claude.py",
auto_create_execution_role=True,
auto_create_ecr=True,
requirements_file="requirements.txt",
region=region,
agent_name="strands_agent_inbound_identity",
authorizer_configuration={
"customJWTAuthorizer": {
"discoveryUrl": discovery_url,
"allowedClients": [client_id]
}
}
)
runtime_with_strands_and_openai_models.ipynb
import asyncio
from bedrock_agentcore.identity.auth import requires_access_token, requires_api_key
from strands import Agent, tool
from strands_tools import calculator
import argparse
import json
from strands.models.litellm import LiteLLMModel
import os
from bedrock_agentcore.runtime import BedrockAgentCoreApp
AZURE_API_KEY_FROM_CREDS_PROVIDER = ""
@requires_api_key(
provider_name="openai-apikey-provider" # replace with your own credential provider name
)
async def need_api_key(*, api_key: str):
global AZURE_API_KEY_FROM_CREDS_PROVIDER
print(f'received api key for async func: {api_key}')
AZURE_API_KEY_FROM_CREDS_PROVIDER = api_key
app = BedrockAgentCoreApp()
@app.entrypoint
async def strands_agent_open_ai(payload):
global AZURE_API_KEY_FROM_CREDS_PROVIDER, agent
# Get API key if not already retrieved
if not AZURE_API_KEY_FROM_CREDS_PROVIDER:
try:
await need_api_key(api_key="")
print(f"API key retrieved: '{AZURE_API_KEY_FROM_CREDS_PROVIDER}'")
os.environ["AZURE_API_KEY"] = AZURE_API_KEY_FROM_CREDS_PROVIDER
print("Environment variable AZURE_API_KEY set")
if agent is None:
model = "azure/gpt-4.1-mini"
litellm_model = LiteLLMModel(
model_id=model, params={"max_tokens": 32000, "temperature": 0.7}
)
agent = Agent(
model=litellm_model,
tools=[calculator, weather],
system_prompt="You're a helpful assistant. You can do simple math calculation, and tell the weather."
)
if __name__ == "__main__":
app.run()
agentcore_runtime = Runtime()
agentcore_runtime.configure(entrypoint="strands_agents_openai.py", ...)
invoke_response = agentcore_runtime.invoke({"prompt": "Hello"}, user_id="userid_1234567890")
personal-agent-memory-manager.ipynb
# Import memory management modules
from bedrock_agentcore_starter_toolkit.operations.memory.manager import MemoryManager
from bedrock_agentcore.memory.constants import ConversationalMessage, MessageRole
from bedrock_agentcore.memory.session import MemorySession, MemorySessionManager
# Initialize Memory Manager
memory_manager = MemoryManager(region_name=REGION)
memory_name = "PersonalAgentMemoryManager"
try:
memory = memory_manager.get_or_create_memory(
name=memory_name,
strategies=[], # No strategies for short-term memory
description="Short-term memory for personal agent",
event_expiry_days=7, # Retention period for short-term memory
memory_execution_role_arn=None, # Optional for short-term memory
)
# Initialize the session memory manager
session_manager = MemorySessionManager(memory_id=memory.id, region_name=REGION)
# Create a memory session for the specific actor/session combination
memory_session = session_manager.create_memory_session(
actor_id=ACTOR_ID,
session_id=SESSION_ID
)
recent_turns = self.memory_session.get_last_k_turns(k=5)
self.memory_session.add_turns(
messages=[ConversationalMessage(message_text, message_role)]
)
def create_personal_agent():
"""Create personal agent with memory and web search using MemorySession"""
agent = Agent(
name="PersonalAssistant",
model="us.anthropic.claude-3-7-sonnet-20250219-v1:0", # or your preferred model
system_prompt=f"""You are a helpful personal assistant with web search capabilities.
You can help with:
- General questions and information lookup
- Web searches for current information
- Personal task management
When you need current information, use the websearch function.
Today's date: {datetime.today().strftime('%Y-%m-%d')}
Be friendly and professional.""",
hooks=[MemoryHookProvider(user_session)],
tools=[websearch],
)
return agent
# Create agent
agent = create_personal_agent()
agent("My name is Alex and I'm interested in learning about AI.")
personal-agent.ipynb
AgentCore Memory (Short-Term Memory)
from bedrock_agentcore.memory import MemoryClient
client = MemoryClient(region_name=REGION)
memory_name = "PersonalAgentMemory"
# Create memory resource without strategies (thus only access to short-term memory)
memory = client.create_memory_and_wait(
name=memory_name,
strategies=[], # No strategies for short-term memory
description="Short-term memory for personal agent",
event_expiry_days=7, # Retention period for short-term memory. This can be upto 365 days.
)
memory_id = memory['id']
class MemoryHookProvider(HookProvider):
# Load the last 5 conversation turns from memory
recent_turns = self.memory_client.get_last_k_turns(
memory_id=self.memory_id,
actor_id=actor_id,
session_id=session_id,
k=5
)
# Save to memory
self.memory_client.create_event(
memory_id=self.memory_id,
actor_id=actor_id,
session_id=session_id,
messages=[(messages[-1]["content"][0]["text"], messages[-1]["role"])]
)
def create_personal_agent():
"""Create personal agent with memory and web search"""
agent = Agent(
name="PersonalAssistant",
model="us.anthropic.claude-3-7-sonnet-20250219-v1:0", # or your preferred model
system_prompt=f"""You are a helpful personal assistant with web search capabilities.
You can help with:
- General questions and information lookup
- Web searches for current information
- Personal task management
When you need current information, use the websearch function.
Today's date: {datetime.today().strftime('%Y-%m-%d')}
Be friendly and professional.""",
hooks=[MemoryHookProvider(client, memory_id)],
tools=[websearch],
state={"actor_id": ACTOR_ID, "session_id": SESSION_ID}
)
return agent
# Create agent
agent = create_personal_agent()
agent("My name is Alex and I'm interested in learning about AI.")
nutrition-assistant-with-user-preference-saving.ipynb
store = AgentCoreMemoryStore(memory_id=memory_id, region_name=region)
# Save the last human message we see before LLM invocation
for msg in reversed(messages):
if isinstance(msg, HumanMessage):
store.put(namespace, str(uuid.uuid4()), {"message": msg})
break
# Retrieve user preferences based on the last message and append to state
user_preferences_namespace = (actor_id, "preferences")
preferences = store.search(user_preferences_namespace, query=msg.content, limit=5)
# Construct another AI message to add context before the current message
if preferences:
context_items = [pref.value for pref in preferences]
context_message = AIMessage(
content=f"[User Context: {', '.join(str(item) for item in context_items)}]"
)
# Insert the context message before the last human message
return {"messages": messages[:-1] + [context_message, messages[-1]]}
return {"llm_input_messages": messages}
customer-support/customer-support-memory-manager.ipynb
# Define memory strategies using typed classes
strategies = [
CustomUserPreferenceStrategy(
name="CustomerPreferences",
description="Captures customer preferences and behavior",
extraction_config=ExtractionConfig(
append_to_prompt="Extract customer preferences and behavior patterns",
model_id="anthropic.claude-3-sonnet-20240229-v1:0"
),
consolidation_config=ConsolidationConfig(
append_to_prompt="Consolidate customer preferences",
model_id="anthropic.claude-3-sonnet-20240229-v1:0"
),
namespaces=["support/customer/{actorId}/preferences"]
),
CustomSemanticStrategy(
name="CustomerSupportSemantic",
description="Stores facts from conversations",
extraction_config=ExtractionConfig(
append_to_prompt="Extract factual information from customer support conversations",
model_id="anthropic.claude-3-sonnet-20240229-v1:0"
),
consolidation_config=ConsolidationConfig(
append_to_prompt="Consolidate semantic insights from support interactions",
model_id="anthropic.claude-3-sonnet-20240229-v1:0"
),
namespaces=["support/customer/{actorId}/semantic"]
)
]
# Define memory strategies for customer support
strategies = [
{
StrategyType.USER_PREFERENCE.value: {
"name": "CustomerPreferences",
"description": "Captures customer preferences and behavior",
"namespaces": ["support/customer/{actorId}/preferences"]
}
},
{
StrategyType.SEMANTIC.value: {
"name": "CustomerSupportSemantic",
"description": "Stores facts from conversations",
"namespaces": ["support/customer/{actorId}/semantic"],
}
}
]
# creation
memory = memory_manager.get_or_create_memory(
name=memory_name,
strategies=strategies, # Pass typed strategy objects
description="Memory for customer support agent",
event_expiry_days=90, # Memories expire after 90 days
memory_execution_role_arn=MEMORY_EXECUTION_ROLE_ARN, # Required for Custom strategies
)
class CustomerSupportMemoryHooks(HookProvider):
def __init__(self, customer_session: MemorySession):
self.customer_session = customer_session
# Define retrieval configuration for different memory types
self.retrieval_config = {
"support/customer/{actorId}/preferences": RetrievalConfig(top_k=3, relevance_score=0.3),
"support/customer/{actorId}/semantic": RetrievalConfig(top_k=5, relevance_score=0.2)
}
# Use MemorySession API, no need to pass actor_id/session_id
memories = self.customer_session.search_long_term_memories(
query=user_query,
namespace_prefix=resolved_namespace,
top_k=config.top_k
)
# Use MemorySession (no need to pass actor_id/session_id)
interaction_messages = [
ConversationalMessage(customer_query, USER),
ConversationalMessage(agent_response, ASSISTANT)
]
result = self.customer_session.add_turns(interaction_messages)
with-strands-agent/travel-booking-assistant.ipynb
- set up a shared memory resource with a long-term memory strategy
- coordinator agent that delegates to specialized agents
# Create the memory resource with a single long-term memory strategy
# The {actorId} placeholder will be dynamically replaced with the actual actor ID
memory = client.create_memory_and_wait(
name=memory_name,
description="Travel Agent with Long-Term Memory",
strategies=[{
StrategyType.USER_PREFERENCE.value: {
"name": "UserPreferences",
"description": "Captures user preferences",
"namespaces": ["travel/{actorId}/preferences"]
}
}],
event_expiry_days=7, # Short-term conversation expires after 7 days
max_wait=300,
poll_interval=10
# System prompt for the hotel booking specialist
HOTEL_BOOKING_PROMPT = f"""You are a hotel booking assistant. Help customers find hotels, make reservations, and answer questions about accommodations and amenities.
Provide clear information about availability, pricing, and booking procedures in a friendly, helpful manner.Ask max two questions per turn. Keep the messages short, don't overwhelm the customer."""
# System prompt for the flight booking specialist
FLIGHT_BOOKING_PROMPT = f"""You are a flight booking assistant. Help customers find flights, make reservations, and answer questions about airlines, routes, and travel policies.
Provide clear information about flight availability, pricing, schedules, and booking procedures in a friendly, helpful manner.Ask max two questions per turn. Keep the messages short, don't overwhelm the customer."""
# System prompt for the coordinator agent
TRAVEL_AGENT_SYSTEM_PROMPT = """
You are a comprehensive travel planning assistant that coordinates between specialized tools:
- For flight-related queries (bookings, schedules, airlines, routes) → Use the flight_booking_assistant tool
- For hotel-related queries (accommodations, amenities, reservations) → Use the hotel_booking_assistant tool
- For complete travel packages → Use both tools as needed to provide comprehensive information
- For general travel advice or simple travel questions → Answer directly
Each agent will have its own memory in case the user asks about historic data.
When handling complex travel requests, coordinate information from both tools to create a cohesive travel plan.
Provide clear organization when presenting information from multiple sources. \
Ask max two questions per turn. Keep the messages short, don't overwhelm the customer.
"""
travel_agent = Agent(
system_prompt=TRAVEL_AGENT_SYSTEM_PROMPT,
model=MODEL_ID,
tools=[flight_booking_assistant, hotel_booking_assistant]
)
provider_flight = AgentCoreMemoryToolProvider(
memory_id=memory_id, # Required
actor_id=flight_actor_id, # Required
session_id=session_id, # Required
max_results=10, # Optional
namespace=flight_namespace
)
flight_memory_hooks = MemoryHookProvider(memory_id, client)
flight_agent = Agent(
tools=provider_flight.tools,
hooks=[flight_memory_hooks],
model=MODEL_ID,
system_prompt=FLIGHT_BOOKING_PROMPT,
state={"actor_id": flight_actor_id, "session_id": session_id}
)
# Call the agent and return its response
response = flight_agent(query)
langchain-agent-advanced-data-analysis-code-interpreter.ipynb
SYSTEM_PROMPT = """You are a helpful AI assistant that validates all answers through code execution using the tools provided. DO NOT Answer questions without using the tools
VALIDATION PRINCIPLES:
1. When making claims about code, algorithms, or calculations - write code to verify them
2. Use execute_python to test mathematical calculations, algorithms, and logic
3. Create test scripts to validate your understanding before giving answers
4. Always show your work with actual code execution
5. If uncertain, explicitly state limitations and validate what you can
APPROACH:
- If asked about a programming concept, implement it in code to demonstrate
- If asked for calculations, compute them programmatically AND show the code
- If implementing algorithms, include test cases to prove correctness
- Document your validation process for transparency
- The sandbox maintains state between executions, so you can refer to previous results
TOOL AVAILABLE:
- execute_python: Run Python code and see output
RESPONSE FORMAT: The execute_python tool returns a JSON response with:
- sessionId: The sandbox session ID
- id: Request ID
- isError: Boolean indicating if there was an error
- content: Array of content objects with type and text/data
- structuredContent: For code execution, includes stdout, stderr, exitCode, executionTime
For successful code execution, the output will be in content[0].text and also in structuredContent.stdout.
Check isError field to see if there was an error.
Be thorough, accurate, and always validate your answers when possible."""
runtime_with_strands_and_bedrock_models.ipynb
Preparing your agent for deployment on AgentCore Runtime
Let’s now deploy our agents to AgentCore Runtime. To do so we need to:
- Import the Runtime App with
from bedrock_agentcore.runtime import BedrockAgentCoreApp
- Initialize the App in our code with
app = BedrockAgentCoreApp()
- Decorate the invocation function with the
@app.entrypoint
decorator - Let AgentCoreRuntime control the running of the agent with
app.run()
To summarize, please follow the below steps to enable observability from AgentCore runtime hosted agents :
- Enable Transaction Search on Amazon CloudWatch
- The runtime agent is instrumented using opentelemtry command : opentelemetry-instrument python any_runtime_agent.py
- The requirements.txt file contains aws-opentelemetry-distro listed while deploying the agent on Bedrock Agentcore Runtime. ===> at left side bar on page https://us-west-2.console.aws.amazon.com/bedrock-agentcore/memory?region=us-west-2
Strands_Observability.ipynb
Not runtime agent
Using strands-agent, so extra steps
- Create_log_group in cloudwatch
- Create_log_stream in cloudwatch
- enable transactional search. You can do so in the AWS console following this link.
- create a
.env
file for configuring the environment variables. Use Strands/.env.example as a template. - The AWS OpenTelemetry distro will automatically handle tracer provider setup when using
opentelemetry-instrument
command.
# Configure Strands logging
logging.getLogger("strands").setLevel(logging.INFO)
# trace_attributes
travel_agent = Agent(
model=bedrock_model,
system_prompt="""You are an experienced travel agent specializing in personalized travel recommendations
with access to real-time web information. Your role is to find dream destinations matching user preferences
using web search for current information. You should provide comprehensive recommendations with current
information, brief descriptions, and practical travel details.""",
tools=[web_search],
trace_attributes={
"user.id": "user@domain.com",
"tags": ["Strands", "Observability"],
}
)