diff --git a/libs/aws/langchain_aws/memory/__init__.py b/libs/aws/langchain_aws/memory/__init__.py new file mode 100644 index 00000000..b8a4c0e8 --- /dev/null +++ b/libs/aws/langchain_aws/memory/__init__.py @@ -0,0 +1,21 @@ +from langchain_aws.memory.bedrock_agentcore import ( + store_agentcore_memory_events, + list_agentcore_memory_events, + retrieve_agentcore_memories, + create_store_memory_events_tool, + create_list_memory_events_tool, + create_retrieve_memory_tool, + convert_langchain_messages_to_events, + convert_events_to_langchain_messages, +) + +__all__ = [ + "store_agentcore_memory_events", + "list_agentcore_memory_events", + "retrieve_agentcore_memories", + "create_store_memory_events_tool", + "create_list_memory_events_tool", + "create_retrieve_memory_tool", + "convert_langchain_messages_to_events", + "convert_events_to_langchain_messages", +] diff --git a/libs/aws/langchain_aws/memory/bedrock_agentcore.py b/libs/aws/langchain_aws/memory/bedrock_agentcore.py new file mode 100644 index 00000000..24f3fca2 --- /dev/null +++ b/libs/aws/langchain_aws/memory/bedrock_agentcore.py @@ -0,0 +1,385 @@ +"""Module for AWS Bedrock Agent Core memory integration. + +This module provides tools to allow agents to use the AWS Bedrock Agent Core +memory API to manage and search memories. +""" + +import logging +from typing import List, Any, Dict + +from bedrock_agentcore.memory import MemoryClient +from langchain_core.messages import ( + AIMessage, + BaseMessage, + HumanMessage, + ToolMessage, + SystemMessage, +) +from langchain_core.tools import StructuredTool +from pydantic import BaseModel, Field + +logger = logging.getLogger(__name__) + +# TODO: Once Bedrock AgentCore introduces metadata to store the tool call ID, +# implement logic to properly save and load for ToolCall messages +TOOL_CALL_ID_PLACEHOLDER = "unknown" + + +def convert_langchain_messages_to_events( + messages: List[BaseMessage], include_system_messages=False +) -> List[Dict[str, Any]]: + """Convert LangChain messages to Bedrock Agent Core events + + Args: + messages: List of Langchain messages (BaseMessage) + include_system_messages: Flag for whether to include system messages in the conversion (as OTHER) or skip them + + Returns: + List of AgentCore event tuples (text, role) + """ + converted_messages = [] + for msg in messages: + # Skip if event already saved + if msg.additional_kwargs.get("event_id") is not None: + continue + + text = msg.text() + if not text.strip(): + continue + + # Map LangChain roles to Bedrock Agent Core roles + if msg.type == "human": + role = "USER" + elif msg.type == "ai": + role = "ASSISTANT" + elif msg.type == "tool": + role = "TOOL" + elif msg.type == "system" and include_system_messages: + role = "OTHER" + else: + logger.warning(f"Skipping unsupported message type: {msg.type}") + continue + + converted_messages.append((text, role)) + + return converted_messages + + +def convert_events_to_langchain_messages( + events: List[Dict[str, Any]] +) -> List[BaseMessage]: + """Convert Bedrock Agent Core events back to LangChain messages. + + Args: + events: List of event dictionaries with 'payload' containing conversational data + + Returns: + List of LangChain BaseMessage objects + """ + messages = [] + + for event in events: + if "payload" not in event: + continue + + for payload_item in event.get("payload", []): + if "conversational" not in payload_item: + continue + + conv = payload_item["conversational"] + role = conv.get("role", "") + content = conv.get("content", {}).get("text", "") + + if not content.strip(): + continue + + message = None + if role == "USER": + message = HumanMessage(content=content) + elif role == "ASSISTANT": + message = AIMessage(content=content) + elif role == "TOOL": + # As of now, the tool_call_id is not stored or returned by the Memory API + message = ToolMessage( + content=content, tool_call_id=TOOL_CALL_ID_PLACEHOLDER + ) + elif role == "OTHER": + message = SystemMessage(content=content) + else: + logger.warning(f"Skipping unknown message role: {role}") + continue + + # Preserve event metadata + if message and "eventId" in event: + message.additional_kwargs["event_id"] = event["eventId"] + + if message: + messages.append(message) + + return messages + + +def store_agentcore_memory_events( + memory_client: MemoryClient, + messages: List[BaseMessage], + memory_id: str, + actor_id: str, + session_id: str, + include_system_messages: bool = False, +) -> str: + """Stores Langchain Messages as Bedrock AgentCore Memory events in short term memory + + Args: + memory_client: Initialized MemoryClient instance + memory_id: Memory identifier (e.g., "test-memory-id") + actor_id: Actor identifier (e.g., "user") + session_id: Session identifier (e.g., "session-1") + include_system_messages: Flag for whether to save system messages (as OTHER) or skip them + + Returns: + The ID of the event that was created + """ + + if len(messages) == 0: + raise ValueError("The messages field cannot be empty.") + + if not memory_id or not memory_id.strip(): + raise ValueError("memory_id cannot be empty") + if not actor_id or not actor_id.strip(): + raise ValueError("actor_id cannot be empty") + if not session_id or not session_id.strip(): + raise ValueError("session_id cannot be empty") + + events_to_store = convert_langchain_messages_to_events( + messages, include_system_messages + ) + if not events_to_store: + raise ValueError( + "No valid messages to store. All messages were either empty, " + "already stored, or filtered out." + ) + + response = memory_client.create_event( + memory_id=memory_id, + actor_id=actor_id, + session_id=session_id, + messages=events_to_store, + ) + event_id = response.get("eventId") + if not event_id: + raise RuntimeError("AgentCore did not return an event ID") + + return event_id + + +def list_agentcore_memory_events( + memory_client: MemoryClient, + memory_id: str, + actor_id: str, + session_id: str, + max_results: int = 100, +) -> List[BaseMessage]: + """Lists the events in short term memory from Bedrock Agentcore Memory as Langchain Messages + + Args: + memory_client: Initialized MemoryClient instance + memory_id: Memory identifier (e.g., "test-memory-id") + actor_id: Actor identifier (e.g., "user") + session_id: Session identifier (e.g., "session-1") + max_results: The maximum number of results to return + + Returns: + A list of LangChain messages of previous events saved in short term memory + """ + if not memory_id or not memory_id.strip(): + raise ValueError("memory_id cannot be empty") + if not actor_id or not actor_id.strip(): + raise ValueError("actor_id cannot be empty") + if not session_id or not session_id.strip(): + raise ValueError("session_id cannot be empty") + + events = memory_client.list_events( + memory_id=memory_id, + actor_id=actor_id, + session_id=session_id, + max_results=max_results, + include_payload=True, + ) + + return convert_events_to_langchain_messages(events) + + +def retrieve_agentcore_memories( + memory_client: MemoryClient, + memory_id: str, + namespace_str: str, + query: str, + limit: int = 3, +) -> List[Dict[str, Any]]: + """Search for memories in AWS Bedrock Agentcore Memory + + Args: + memory_client: The AgentCore memory client + memory_id: The memory identifier in AgentCore + namespace_str: The namespace to be searched + query: The query to be embedded and used in the semantic search for memories + limit: The limit for results to be retrieved + + Returns: + A list of memory results with content, score, and metadata + """ + if not memory_id or not memory_id.strip(): + raise ValueError("memory_id cannot be empty") + if not namespace_str or not namespace_str.strip(): + raise ValueError("actor_id cannot be empty") + if not query or not query.strip(): + raise ValueError("actor_id cannot be empty") + + memories = memory_client.retrieve_memories( + memory_id=memory_id, + namespace=namespace_str, + query=query, + top_k=limit, + ) + + results = [] + for item in memories: + content = item.get("content", {}).get("text", "") + result = { + "content": content, + "score": item.get("score", 0.0), + "metadata": item.get("metadata", {}), + } + + results.append(result) + + return results + + +class StoreMemoryEventsToolInput(BaseModel): + """Input schema for storing memory events.""" + + messages: List[BaseMessage] = Field( + description="List of messages to store in memory" + ) + + +class ListMemoryEventsToolInput(BaseModel): + """Input schema for listing memory events.""" + + max_results: int = Field( + default=100, description="Maximum number of events to retrieve" + ) + + +class SearchMemoryInput(BaseModel): + """Input schema for searching memories.""" + + query: str = Field(description="Search query to find relevant memories") + limit: int = Field( + default=3, description="Maximum number of search results to return" + ) + + +def create_store_memory_events_tool( + memory_client: MemoryClient, memory_id: str, actor_id: str, session_id: str +) -> StructuredTool: + """Factory function to create a memory storage tool with pre-configured connection details. + + Args: + memory_client: Initialized MemoryClient instance + memory_id: Memory identifier (e.g., "test-memory-id") + actor_id: Actor identifier (e.g., "user") + session_id: Session identifier (e.g., "session-1") + + Returns: + StructuredTool to store events that only requires the 'messages' parameter + """ + + def _store_messages(messages: List[BaseMessage]) -> str: + """Internal function with pre-bound connection details.""" + return store_agentcore_memory_events( + memory_client=memory_client, + messages=messages, + memory_id=memory_id, + actor_id=actor_id, + session_id=session_id, + ) + + return StructuredTool.from_function( + func=_store_messages, + name="store_memory_events", + description="Store conversation messages in AgentCore memory for later retrieval", + args_schema=StoreMemoryEventsToolInput, + ) + + +def create_list_memory_events_tool( + memory_client: MemoryClient, memory_id: str, actor_id: str, session_id: str +) -> StructuredTool: + """Factory function to create a memory listing tool with pre-configured connection details. + + Args: + memory_client: Initialized MemoryClient instance + memory_id: Memory identifier (e.g., "test-memory-id") + actor_id: Actor identifier (e.g., "user") + session_id: Session identifier (e.g., "session-1") + + Returns: + StructuredTool for listing events that only requires 'max_results' parameter + """ + + def _list_events(max_results: int = 100) -> List[BaseMessage]: + """Internal function with pre-bound connection details.""" + return list_agentcore_memory_events( + memory_client=memory_client, + memory_id=memory_id, + actor_id=actor_id, + session_id=session_id, + max_results=max_results, + ) + + return StructuredTool.from_function( + func=_list_events, + name="list_memory_events", + description="Retrieve recent conversation messages from AgentCore memory", + args_schema=ListMemoryEventsToolInput, + ) + + +def create_retrieve_memory_tool( + memory_client: MemoryClient, + memory_id: str, + namespace: str, + tool_name: str = "retrieve_memory", + tool_description: str = "Search for relevant memories using semantic similarity", +) -> StructuredTool: + """Factory function to create a memory search tool with pre-configured connection details. + + Args: + memory_client: Initialized MemoryClient instance + memory_id: Memory identifier (e.g., "test-memory-id") + namespace: Namespace for search (e.g., "/summaries/user/session-1") + tool_name: Name of the tool, i.e. "search_user_preferences" + tool_description: Description of the tool's purpose, i.e. "Use this tool to search for user preferences" + + Returns: + StructuredTool to retrieve memories that only requires 'query' and 'limit' parameters + """ + + def _search_memories(query: str, limit: int = 3) -> List[Dict[str, Any]]: + """Internal function with pre-bound connection details.""" + return retrieve_agentcore_memories( + memory_client=memory_client, + memory_id=memory_id, + namespace_str=namespace, + query=query, + limit=limit, + ) + + return StructuredTool.from_function( + func=_search_memories, + name=tool_name, + description=tool_description, + args_schema=SearchMemoryInput, + ) diff --git a/libs/aws/tests/integration_tests/memory/__init__.py b/libs/aws/tests/integration_tests/memory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/libs/aws/tests/integration_tests/memory/test_agentcore_memory.py b/libs/aws/tests/integration_tests/memory/test_agentcore_memory.py new file mode 100644 index 00000000..3d70f284 --- /dev/null +++ b/libs/aws/tests/integration_tests/memory/test_agentcore_memory.py @@ -0,0 +1,99 @@ +from unittest.mock import Mock +import pytest +from langchain_core.messages import HumanMessage, AIMessage + +from langchain_aws.memory.bedrock_agentcore import ( + store_agentcore_memory_events, + list_agentcore_memory_events, + retrieve_agentcore_memories, + create_store_memory_events_tool, + create_list_memory_events_tool, + create_retrieve_memory_tool, +) + + +@pytest.fixture +def mock_memory_client(): + client = Mock() + # Set up realistic mock responses + client.create_event.return_value = {"eventId": "test-event-123"} + client.list_events.return_value = [ + { + "eventId": "event-1", + "payload": [ + {"conversational": {"role": "USER", "content": {"text": "Hello"}}} + ], + } + ] + client.retrieve_memories.return_value = [ + { + "content": {"text": "User likes coffee"}, + "score": 0.95, + "metadata": {"category": "preferences"}, + } + ] + return client + + +@pytest.mark.compile +def test_agentcore_memory_integration_workflow(mock_memory_client): + """Test the complete workflow of storing, listing, and retrieving memories.""" + # Test storing messages + messages = [HumanMessage("I love coffee"), AIMessage("Great! I'll remember that.")] + + event_id = store_agentcore_memory_events( + mock_memory_client, + messages=messages, + memory_id="test-memory", + actor_id="user-1", + session_id="session-1", + ) + + assert event_id == "test-event-123" + mock_memory_client.create_event.assert_called_once() + + # Test listing messages + retrieved_messages = list_agentcore_memory_events( + mock_memory_client, + memory_id="test-memory", + actor_id="user-1", + session_id="session-1", + ) + + assert len(retrieved_messages) == 1 + assert isinstance(retrieved_messages[0], HumanMessage) + + # Test memory search + memories = retrieve_agentcore_memories( + mock_memory_client, + memory_id="test-memory", + namespace_str="/preferences/user-1", + query="coffee preferences", + ) + + assert len(memories) == 1 + assert memories[0]["content"] == "User likes coffee" + + +@pytest.mark.compile +def test_tool_creation_integration(mock_memory_client): + """Test that the tool factory functions create working tools.""" + store_tool = create_store_memory_events_tool( + mock_memory_client, "test-memory", "user-1", "session-1" + ) + + list_tool = create_list_memory_events_tool( + mock_memory_client, "test-memory", "user-1", "session-1" + ) + + search_tool = create_retrieve_memory_tool( + mock_memory_client, "test-memory", "/preferences" + ) + + assert store_tool.name == "store_memory_events" + assert list_tool.name == "list_memory_events" + assert search_tool.name == "retrieve_memory" + + messages = [HumanMessage("Test message")] + result = store_tool.invoke({"messages": messages}) + assert result == "test-event-123" diff --git a/libs/aws/tests/unit_tests/memory/test_agentcore_memory_functions.py b/libs/aws/tests/unit_tests/memory/test_agentcore_memory_functions.py new file mode 100644 index 00000000..c800225d --- /dev/null +++ b/libs/aws/tests/unit_tests/memory/test_agentcore_memory_functions.py @@ -0,0 +1,394 @@ +from unittest.mock import MagicMock + +import pytest + +from langchain_aws.memory.bedrock_agentcore import ( + convert_langchain_messages_to_events, + convert_events_to_langchain_messages, + store_agentcore_memory_events, + list_agentcore_memory_events, + retrieve_agentcore_memories, +) + +from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, ToolMessage + + +@pytest.fixture +def mock_client() -> MagicMock: + return MagicMock() + + +@pytest.fixture +def mock_agentcore_memory_client() -> MagicMock: + memory_client = MagicMock() + memory_client.create_event.return_value = {"eventId": "12345"} + + return memory_client + + +def test_store_messages(mock_agentcore_memory_client) -> None: + messages = [ + SystemMessage("You are a friendly chatbot"), + HumanMessage("Hello, world!"), + AIMessage("Hello there! What can I help you with today"), + HumanMessage("Tell me a joke"), + ToolMessage("Joke of the day retrieved.", tool_call_id="test_tool_call"), + ] + + result = store_agentcore_memory_events( + mock_agentcore_memory_client, + messages=messages, + memory_id="1234", + actor_id="5678", + session_id="9101", + ) + mock_agentcore_memory_client.create_event.assert_called_once_with( + memory_id="1234", + actor_id="5678", + session_id="9101", + messages=[ + ("Hello, world!", "USER"), + ("Hello there! What can I help you with today", "ASSISTANT"), + ("Tell me a joke", "USER"), + ("Joke of the day retrieved.", "TOOL"), + ], + ) + + assert result == "12345" + + +def test_store_messages_with_system_messages(mock_agentcore_memory_client) -> None: + messages = [ + SystemMessage("You are a friendly chatbot"), + HumanMessage("Hello, world!"), + AIMessage("Hello there! What can I help you with today"), + HumanMessage("Tell me a joke"), + ToolMessage("Joke of the day retrieved.", tool_call_id="test_tool_call"), + ] + + result = store_agentcore_memory_events( + mock_agentcore_memory_client, + messages=messages, + memory_id="1234", + actor_id="5678", + session_id="9101", + include_system_messages=True, + ) + mock_agentcore_memory_client.create_event.assert_called_once_with( + memory_id="1234", + actor_id="5678", + session_id="9101", + messages=[ + ("You are a friendly chatbot", "OTHER"), + ("Hello, world!", "USER"), + ("Hello there! What can I help you with today", "ASSISTANT"), + ("Tell me a joke", "USER"), + ("Joke of the day retrieved.", "TOOL"), + ], + ) + + assert result == "12345" + + +def test_store_messages_empty_list_raises_error(mock_agentcore_memory_client): + with pytest.raises(ValueError, match="The messages field cannot be empty."): + store_agentcore_memory_events( + mock_agentcore_memory_client, + messages=[], + memory_id="1234", + actor_id="5678", + session_id="9101", + ) + + +def test_list_memory_events_success(mock_agentcore_memory_client): + # Mock the response from list_events + mock_agentcore_memory_client.list_events.return_value = [ + { + "eventId": "event-1", + "payload": [ + { + "conversational": { + "role": "USER", + "content": {"text": "Hello, world!"}, + } + } + ], + }, + { + "eventId": "event-2", + "payload": [ + { + "conversational": { + "role": "ASSISTANT", + "content": {"text": "Hi there!"}, + } + } + ], + }, + ] + + result = list_agentcore_memory_events( + mock_agentcore_memory_client, + memory_id="test-memory", + actor_id="test-actor", + session_id="test-session", + max_results=50, + ) + + # Assert the client was called correctly + mock_agentcore_memory_client.list_events.assert_called_once_with( + memory_id="test-memory", + actor_id="test-actor", + session_id="test-session", + max_results=50, + include_payload=True, + ) + + # Assert the return value is correct + assert len(result) == 2 + assert isinstance(result[0], HumanMessage) + assert result[0].content == "Hello, world!" + assert result[0].additional_kwargs["event_id"] == "event-1" + assert isinstance(result[1], AIMessage) + assert result[1].content == "Hi there!" + + +def test_list_memory_events_tool_message(mock_agentcore_memory_client): + mock_agentcore_memory_client.list_events.return_value = [ + { + "eventId": "event-1", + "payload": [ + {"conversational": {"role": "TOOL", "content": {"text": "Tool result"}}} + ], + } + ] + + result = list_agentcore_memory_events( + mock_agentcore_memory_client, + memory_id="test-memory", + actor_id="test-actor", + session_id="test-session", + ) + + assert len(result) == 1 + assert isinstance(result[0], ToolMessage) + assert result[0].content == "Tool result" + assert result[0].tool_call_id == "unknown" # Default value + + +def test_retrieve_agentcore_memories_success(mock_agentcore_memory_client): + # Mock the response from retrieve_memories + mock_agentcore_memory_client.retrieve_memories.return_value = [ + { + "content": {"text": "User prefers coffee over tea"}, + "score": 0.95, + "metadata": {"category": "preferences", "timestamp": "2024-01-01"}, + }, + { + "content": {"text": "User lives in San Francisco"}, + "score": 0.87, + "metadata": {"category": "location"}, + }, + ] + + result = retrieve_agentcore_memories( + mock_agentcore_memory_client, + memory_id="test-memory", + namespace_str="/userPreferences/actor-1/session-1", + query="coffee preferences", + limit=5, + ) + + # Assert the client was called correctly + mock_agentcore_memory_client.retrieve_memories.assert_called_once_with( + memory_id="test-memory", + namespace="/userPreferences/actor-1/session-1", + query="coffee preferences", + top_k=5, + ) + + # Assert the return value structure + assert len(result) == 2 + assert result[0]["content"] == "User prefers coffee over tea" + assert result[0]["score"] == 0.95 + assert result[0]["metadata"] == { + "category": "preferences", + "timestamp": "2024-01-01", + } + assert result[1]["content"] == "User lives in San Francisco" + assert result[1]["score"] == 0.87 + + +def test_convert_langchain_messages_to_events_basic(): + """Test basic message conversion to events.""" + messages = [ + HumanMessage("Hello"), + AIMessage("Hi there"), + ToolMessage("Tool result", tool_call_id="123"), + SystemMessage("System prompt"), + ] + + # Without system messages + events = convert_langchain_messages_to_events( + messages, include_system_messages=False + ) + expected = [("Hello", "USER"), ("Hi there", "ASSISTANT"), ("Tool result", "TOOL")] + assert events == expected + + # With system messages + events = convert_langchain_messages_to_events( + messages, include_system_messages=True + ) + expected = [ + ("Hello", "USER"), + ("Hi there", "ASSISTANT"), + ("Tool result", "TOOL"), + ("System prompt", "OTHER"), + ] + assert events == expected + + +def test_convert_langchain_messages_skips_existing_event_ids(): + """Test that messages with event_id are skipped.""" + msg_with_id = HumanMessage("Already saved") + msg_with_id.additional_kwargs["event_id"] = "existing-123" + + messages = [msg_with_id, HumanMessage("New message")] + + events = convert_langchain_messages_to_events(messages) + assert events == [("New message", "USER")] + + +def test_convert_langchain_messages_filters_empty_content(): + """Test that empty/whitespace messages are filtered out.""" + messages = [ + HumanMessage(""), + HumanMessage(" "), + HumanMessage("Valid message"), + AIMessage("\n\t"), + ] + + events = convert_langchain_messages_to_events(messages) + assert events == [("Valid message", "USER")] + + +def test_convert_events_to_langchain_messages_basic(): + """Test basic event conversion to LangChain messages.""" + events = [ + { + "eventId": "event-1", + "payload": [ + {"conversational": {"role": "USER", "content": {"text": "Hello"}}} + ], + }, + { + "eventId": "event-2", + "payload": [ + { + "conversational": { + "role": "ASSISTANT", + "content": {"text": "Hi there"}, + } + } + ], + }, + ] + + messages = convert_events_to_langchain_messages(events) + + assert len(messages) == 2 + assert isinstance(messages[0], HumanMessage) + assert messages[0].content == "Hello" + assert messages[0].additional_kwargs["event_id"] == "event-1" + assert isinstance(messages[1], AIMessage) + assert messages[1].content == "Hi there" + + +def test_convert_events_handles_malformed_data(): + """Test handling of malformed event data.""" + events = [ + {"eventId": "event-1"}, # Missing payload + {"eventId": "event-2", "payload": [{}]}, # Missing conversational + { + "eventId": "event-3", + "payload": [ + { + "conversational": { + "role": "USER" + # Missing content + } + } + ], + }, + { + "eventId": "event-4", + "payload": [ + { + "conversational": { + "role": "UNKNOWN_ROLE", + "content": {"text": "Should be skipped"}, + } + } + ], + }, + ] + + messages = convert_events_to_langchain_messages(events) + assert len(messages) == 0 # All should be filtered out + + +# TODO: Delete test once AgentCore adds support for memory metadata +def test_convert_events_tool_message_hack(): + """Test the tool_call_id='unknown' hack.""" + events = [ + { + "eventId": "event-1", + "payload": [ + {"conversational": {"role": "TOOL", "content": {"text": "Tool result"}}} + ], + } + ] + + messages = convert_events_to_langchain_messages(events) + + assert len(messages) == 1 + assert isinstance(messages[0], ToolMessage) + assert messages[0].content == "Tool result" + assert messages[0].tool_call_id == "unknown" # The hack + + +def test_roundtrip_conversion(): + """Test that converting messages to events and back preserves data.""" + original_messages = [ + HumanMessage("Hello world"), + AIMessage("Hi there!"), + ToolMessage("Tool executed", tool_call_id="tool-123"), + ] + + # Convert to events + events_data = convert_langchain_messages_to_events(original_messages) + + # Simulate AgentCore storage format + mock_events = [] + for i, (text, role) in enumerate(events_data): + mock_events.append( + { + "eventId": f"event-{i}", + "payload": [ + {"conversational": {"role": role, "content": {"text": text}}} + ], + } + ) + + # Convert back to messages + recovered_messages = convert_events_to_langchain_messages(mock_events) + + # Verify content is preserved (note: tool_call_id will be "unknown") + assert len(recovered_messages) == 3 + assert recovered_messages[0].content == "Hello world" + assert recovered_messages[1].content == "Hi there!" + assert recovered_messages[2].content == "Tool executed" + assert isinstance(recovered_messages[0], HumanMessage) + assert isinstance(recovered_messages[1], AIMessage) + assert isinstance(recovered_messages[2], ToolMessage) diff --git a/libs/aws/tests/unit_tests/memory/test_agentcore_memory_tools.py b/libs/aws/tests/unit_tests/memory/test_agentcore_memory_tools.py new file mode 100644 index 00000000..3eb67003 --- /dev/null +++ b/libs/aws/tests/unit_tests/memory/test_agentcore_memory_tools.py @@ -0,0 +1,199 @@ +from unittest.mock import MagicMock +import pytest +from langchain_core.messages import HumanMessage, AIMessage, ToolMessage +from langchain_aws.memory.bedrock_agentcore import ( + create_store_memory_events_tool, + create_list_memory_events_tool, + create_retrieve_memory_tool, +) + + +@pytest.fixture +def mock_memory_client(): + client = MagicMock() + client.create_event.return_value = {"eventId": "test-event-123"} + client.list_events.return_value = [ + { + "eventId": "event-1", + "payload": [ + {"conversational": {"role": "USER", "content": {"text": "Hello"}}} + ], + } + ] + client.retrieve_memories.return_value = [ + { + "content": {"text": "User likes coffee"}, + "score": 0.95, + "metadata": {"category": "preferences"}, + } + ] + return client + + +def test_create_store_memory_events_tool(mock_memory_client): + tool = create_store_memory_events_tool( + mock_memory_client, "test-memory", "user-1", "session-1" + ) + + assert tool.name == "store_memory_events" + assert "Store conversation messages" in tool.description + + messages = [HumanMessage("Test message")] + result = tool.invoke({"messages": messages}) + + assert result == "test-event-123" + mock_memory_client.create_event.assert_called_once_with( + memory_id="test-memory", + actor_id="user-1", + session_id="session-1", + messages=[("Test message", "USER")], + ) + + +def test_create_list_memory_events_tool(mock_memory_client): + tool = create_list_memory_events_tool( + mock_memory_client, "test-memory", "user-1", "session-1" + ) + + assert tool.name == "list_memory_events" + assert "Retrieve recent conversation messages" in tool.description + + result = tool.invoke({"max_results": 50}) + + assert len(result) == 1 + assert isinstance(result[0], HumanMessage) + assert result[0].content == "Hello" + + mock_memory_client.list_events.assert_called_once_with( + memory_id="test-memory", + actor_id="user-1", + session_id="session-1", + max_results=50, + include_payload=True, + ) + + +def test_create_list_memory_events_tool_default_max_results(mock_memory_client): + tool = create_list_memory_events_tool( + mock_memory_client, "test-memory", "user-1", "session-1" + ) + + tool.invoke({}) + + mock_memory_client.list_events.assert_called_once_with( + memory_id="test-memory", + actor_id="user-1", + session_id="session-1", + max_results=100, + include_payload=True, + ) + + +def test_create_retrieve_memory_tool_default_params(mock_memory_client): + tool = create_retrieve_memory_tool( + mock_memory_client, "test-memory", "/summaries/actor-1/session-1" + ) + + assert tool.name == "retrieve_memory" + assert "Search for relevant memories" in tool.description + + result = tool.invoke({"query": "coffee preferences", "limit": 5}) + + assert len(result) == 1 + assert result[0]["content"] == "User likes coffee" + assert result[0]["score"] == 0.95 + + mock_memory_client.retrieve_memories.assert_called_once_with( + memory_id="test-memory", + namespace="/summaries/actor-1/session-1", + query="coffee preferences", + top_k=5, + ) + + +def test_create_retrieve_memory_tool_custom_params(mock_memory_client): + tool = create_retrieve_memory_tool( + mock_memory_client, + "test-memory", + "/summaries/actor-1/session-1", + tool_name="search_user_preferences", + tool_description="Search for user preferences", + ) + + assert tool.name == "search_user_preferences" + assert tool.description == "Search for user preferences" + + tool.invoke({"query": "food preferences", "limit": 3}) + + mock_memory_client.retrieve_memories.assert_called_once_with( + memory_id="test-memory", + namespace="/summaries/actor-1/session-1", + query="food preferences", + top_k=3, + ) + + +def test_create_retrieve_memory_tool_default_limit(mock_memory_client): + tool = create_retrieve_memory_tool( + mock_memory_client, "test-memory", "/summaries/actor-1/session-1" + ) + + tool.invoke({"query": "test query"}) + + mock_memory_client.retrieve_memories.assert_called_once_with( + memory_id="test-memory", + namespace="/summaries/actor-1/session-1", + query="test query", + top_k=3, + ) + + +def test_store_tool_with_multiple_messages(mock_memory_client): + tool = create_store_memory_events_tool( + mock_memory_client, "test-memory", "user-1", "session-1" + ) + + messages = [ + HumanMessage("Hello"), + AIMessage("Hi there"), + ToolMessage("Tool result", tool_call_id="123"), + ] + + result = tool.invoke({"messages": messages}) + + assert result == "test-event-123" + mock_memory_client.create_event.assert_called_once_with( + memory_id="test-memory", + actor_id="user-1", + session_id="session-1", + messages=[ + ("Hello", "USER"), + ("Hi there", "ASSISTANT"), + ("Tool result", "TOOL"), + ], + ) + + +def test_tools_handle_client_errors(mock_memory_client): + mock_memory_client.create_event.side_effect = Exception("AWS Error") + mock_memory_client.list_events.side_effect = Exception("AWS Error") + mock_memory_client.retrieve_memories.side_effect = Exception("AWS Error") + + store_tool = create_store_memory_events_tool( + mock_memory_client, "test-memory", "user-1", "session-1" + ) + list_tool = create_list_memory_events_tool( + mock_memory_client, "test-memory", "user-1", "session-1" + ) + retrieve_tool = create_retrieve_memory_tool( + mock_memory_client, "test-memory", "/summaries/actor-1/session-1" + ) + + with pytest.raises(Exception, match="AWS Error"): + store_tool.invoke({"messages": [HumanMessage("test")]}) + + with pytest.raises(Exception, match="AWS Error"): + list_tool.invoke({}) + + with pytest.raises(Exception, match="AWS Error"): + retrieve_tool.invoke({"query": "test", "limit": 3}) diff --git a/samples/memory/langgraph_agent_with_memory_search.ipynb b/samples/memory/langgraph_agent_with_memory_search.ipynb new file mode 100644 index 00000000..19e41558 --- /dev/null +++ b/samples/memory/langgraph_agent_with_memory_search.ipynb @@ -0,0 +1,314 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "06c698fd-5655-4b0c-a10b-486fe27ee869", + "metadata": {}, + "source": [ + "# LangGraph Agent with Bedrock AgentCore Memory User Preference Retrieval\n", + "\n", + "This notebook walks through creating a simple LangGraph agent with a chatbot (llm) node and a memory retrieval node.\n", + "\n", + "This agent integrates with Amazon Bedrock Agentcore Memory to retrieve messages using semantic search so that the agent can use context from previous conversations and user preferences to help the user.\n", + "\n", + "For this example, an Agentcore Memory was created with two strategies\n", + "- `Summarization` - Summarizes past conversations, then embeds them for later retrieval\n", + "- `User Preferences` - Extracts user preferences from past conversations\n", + "\n", + "A tool is created for each namespace using the `create_retrieve_memory_tool` tool factory, then the agent is instructed to search the long term memories in the system prompt before clarifying preferences or past conversations with the user. This way, the agent can learn the user's preferences and more information to more accurately assist them.\n", + "\n", + "### Pre-requisites for this sample\n", + "- Amazon Web Services account\n", + "- Amazon Bedrock Agentcore Memory configured - https://docs.aws.amazon.com/bedrock-agentcore/latest/devguide/memory-getting-started.html#memory-getting-started-create-memory\n", + "- Amazon Bedrock model access - https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "b801204e-fdfb-402c-bf29-4a4c6759f811", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_aws import ChatBedrockConverse\n", + "\n", + "from typing import Annotated\n", + "from typing_extensions import TypedDict\n", + "\n", + "from langgraph.graph import StateGraph, START, END\n", + "from langgraph.graph.message import add_messages\n", + "\n", + "from langgraph.checkpoint.memory import InMemorySaver\n", + "from langgraph.prebuilt import ToolNode, tools_condition\n", + "\n", + "from langchain_core.messages import SystemMessage\n", + "\n", + "from langchain_aws.memory.bedrock_agentcore import (\n", + " store_agentcore_memory_events,\n", + " list_agentcore_memory_events,\n", + " create_retrieve_memory_tool,\n", + ")\n", + "\n", + "from bedrock_agentcore.memory import MemoryClient\n", + "\n", + "config = {\"configurable\": {\"thread_id\": \"1\"}}\n", + "memory = InMemorySaver()\n", + "\n", + "llm = ChatBedrockConverse(\n", + " model=\"us.anthropic.claude-3-7-sonnet-20250219-v1:0\",\n", + " max_tokens=5000,\n", + " region_name=\"us-west-2\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "4e8f3d89-8698-4fab-8224-d0a4d0f78b8a", + "metadata": {}, + "source": [ + "## Configure Agentcore Memory\n", + "\n", + "AgentCore short-term memories are organized by a Memory ID (overall memory store) and then categorized by Actor ID (i.e which user) and Session ID (which chat session). \n", + "\n", + "Long term memories are stored in namespaces. By configuring different strategies (i.e. Summarization, User Preferences) these short term memories are processed async as long term memories in the specified namespace. We will create a separate search tool for each namespace to differentiate between `UserPreferences` and `Summaries`. The tool factories ensure that the LLM will only have to worry about sending the query and limit for searching memories and not worry about any of the configuration IDs." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "56d202fb-7cb0-4a1b-b7cd-458369d4b691", + "metadata": {}, + "outputs": [], + "source": [ + "REGION = \"us-west-2\"\n", + "MEMORY_ID = \"MEMORY_ID\"\n", + "SESSION_ID = \"session-5\"\n", + "ACTOR_ID = \"user-1\"\n", + "\n", + "SUMMARY_NAMESPACE = f\"/summaries/{ACTOR_ID}/{SESSION_ID}\"\n", + "USER_PREFERENCES_NAMESPACE = f\"/userPreferences/{ACTOR_ID}/{SESSION_ID}\"\n", + "\n", + "# Initialize the memory client\n", + "memory_client = MemoryClient(region_name=REGION)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "0d7f6172-b13a-45c7-9e97-753509a9ef6a", + "metadata": {}, + "outputs": [], + "source": [ + "summary_search_tool = create_retrieve_memory_tool(\n", + " memory_client=memory_client,\n", + " memory_id=MEMORY_ID,\n", + " namespace=SUMMARY_NAMESPACE,\n", + " tool_name=\"retrieve_summary_memory\",\n", + " tool_description=\"Search for summaries of past interactions to get information relevant to a query\",\n", + ")\n", + "\n", + "user_preferences_search_tool = create_retrieve_memory_tool(\n", + " memory_client=memory_client,\n", + " memory_id=MEMORY_ID,\n", + " namespace=USER_PREFERENCES_NAMESPACE,\n", + " tool_name=\"retrieve_user_preferences\",\n", + " tool_description=\"Search for past user preferences related to a current query\",\n", + ")\n", + "\n", + "memory_search_tools = [summary_search_tool, user_preferences_search_tool]" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "960e113b-ccfd-473a-9be4-eec5e1eac004", + "metadata": {}, + "outputs": [], + "source": [ + "config = {\"configurable\": {\"thread_id\": \"1\"}}\n", + "memory = InMemorySaver()\n", + "\n", + "class State(TypedDict):\n", + " messages: Annotated[list, add_messages]" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "9eb49712-e25b-4c35-ab4b-8196c7842623", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAS4AAAF0CAIAAAASN0OFAAAQAElEQVR4nOydB1wT5xvH30vCCHsIKkvEvcVVtRYnLtxW66jiqru2zn+Hq1rrtrbuWbdWqyK1irN14MKBoy6QIQ5QQdmQkNz/SQ5jgEBJSODG8xXzuXvvvfcuufvd+z7Pc+/7SmiaJgiClDUSgiAIC0ApIggrQCkiCCtAKSIIK0ApIggrQCkiCCtAKZY2mank9vmk+Lis7EylIlspz1aFkyhKvU1EEyVFU+pVJaFEhFaqkmlK9Y+S0HSOKh9NVFk0BTLZKBFNKz9sFUmIMie3QCYbFCERU4qc97uJVIdQ7S4mSkWeAmFfhUJ1RE2KmSUlMhNZWohc3C19WztYOYoIYmwojCuWGn/8+uL1s0xFDi0xpyytJGbmlEhM5FlKlQqYiyCmCGiAUumCVtIaKTIZROYipUy1rlFdLiKK5MkMO9NiiUiRo2QKZHJBuWJQXQ6tvRdRS5HOIVpKJGIztWK1bgxzqRhOG54dsixljlwJxypX0aLHGDepjZggRgKlWBrsXBibkpgDN26NRnYf93AiHOfK0aQH11PSkuV2DmaBc7wJYgxQiqbl/OHEOxfeOpY3H/w/L8I7fl8e9+pZVnVf205DKxCkZKAUTcjepXFp73L6T/K0L89bm1yWSbbNj7a0Fg/9nofPmtIEpWgqQrYnvH6ePeQ7Qdygvy9/IZEo+37lQRBDQSmahB0/xlIiSiA6ZNi79Flmes6Iud4EMQj0ShufQ6tfwPNNUDoEBk73sLKR/L7sGUEMAqVoZB5dT0+IzQycVYkIjwHTPN6+ll098ZYg+oNSNDJn98c383cmQqVdP9frpxIJoj8oRWNyatcribmocUcHIlSqN7GRWkuOrH9BED1BKRqTyDupdZoLV4cMLbqUex6ZSRA9QSkajQfX0mkFadnNkZQi+/fvnzNnDtGfb7755siRI8QE1GpuQygSFoIWo36gFI3GnQtJdk5mpHS5f/8+MQiDdywOzhUsHt1MJYg+YFzRaGz8Lrp6Q9s2/csRExATE7N+/fobN27A9apfv/7QoUMbNmw4evTomzdvMhl27dpVs2bN33///cKFC/fu3bOwsGjUqNGECRM8PFRh9xkzZojF4ooVK+7YsWPJkiWwyuxlY2Pzzz//EGMTdvLdzbNJYxb5EKTYYK1oNHJkyiq+1sQEyGQyUB1oadWqVevWrZNIJJMnT87Kytq4cWPdunUDAgKuX78OOgwPD1+6dGmDBg2WLVv2ww8/JCUlzZw5kynBzMwsUs2KFSt8fX1DQ0MhcdasWabQIVC3mb0iBx/x+oH9FY2DTKbq5+RZTUpMQGxsLOhq4MCBoDdYXbRoEVSGOTk5+bLVq1cPTEcvLy/QKqzK5XJQbHJysr29PUVRL1682Llzp6WlJWzKzs4mpkTqoOpz9SpW5lrJnCDFA6VoHFITZKZrYIC6HB0d586d27Vr18aNG0O916RJk4LZoNp89uzZ8uXLoYGanp7OJIKGQYqwULlyZUaHpUZifDZKsfhgA9U4QHPMdA0yMPw2bdrUqlWrPXv2jBw5slevXseOHSuY7dy5c1OmTKlduzZkDgsLW716db5CSClCiTSdlpFigVI0DnYO5qYUI/H29v7666+PHj0Kxl7VqlVnz5798OHDfHkOHz4Mvhxw1VSvXh1apKmpZenDVCqJozNWiXqAUjQOUjuIpVGvnsqICQD3aXBwMCxAC9PPz2/x4sVgDT548CBfNjALXV1dNatnz54lZUSOTDUeh3u1Uq2HuQ5K0WhQYvLohkkqItDYvHnzVq5cGRcXBy6c3377DXw2YDHCJk9PT7AMoTkKNiFUhleuXAFvKmzdvXs3s+/Lly8LFgiNVRCtJjMxNv9eTqZEFEH0AaVoNKxtJTH304gJANV99913x48f7927d9++fW/dugUxRh8fVdSuT58+0BaFRmlERMT48eNbtmwJ5mKLFi3i4+MhngF246RJk0JCQgqWOWLECBDw1KlTMzON/5Lak9vp1jgClZ5giN9ohJ16e+1E4oRlVYngWTfjSa1mdm0+dSFIscFa0Wg09XekleRuaAoRNi+jsiC+jzrUF4wrGhP3KtKrxxPrfWxXWIZRo0ZFRkYWTFcoFNA8YULzBQkKCnJwMEmHj/DwcHDM6twEpyQSiShKt8l35swZCGPq3HRqT4KLOzps9AYbqEZm9ZTIgEC3yg2sdG59/fq1XC7XuSk7O7uw0J+bmxsxGS9eGNK3sLBTSn6ds3NhzMQV2ErXG6wVjUwDP8cTe16ObVBF51YXF9Y124yr8wO/PqvSwI4g+oO2opH5pJezjYPZ/hVCHG3p6KYEiRnVJdCVIPqDUjQ+n3/rlZwoO7Y1gQiJv/9Iev4kfdhsIY6vZRTQVjQVOxbE2jpKeo93JwLg5M5XTx9njJrvTRBDQSmakK2zY8TmVOBMnlcUexY/TUvOGf0TdhQuEShF0/LHymcJcVnVfO07fs7DONvf+18/CEtxdDEbOAMnzCgpKEWT8/RhVsi2F/IcpYuHtH3f8s6enPdav30lP/fH6xfRmWIx1ebTCjWaWBGkxKAUS4m7oalhIYmZGQoRRSxtJTZ2Eit7MYT0ZdlK7WxiCWEmBoboulJJM3MD5y6rOwECzJSmUA5NUbSSfr8jpRrDQp0f4vLMZRWJVJ2VcqclVidCCk2YWYpV5cHuHwpnMohVM6XCXsz8qsy+ZhYiWkEyUpUpSbLM9Bw4kLWtxLeto29boY80aURQiqXNrbMpMQ/SwLiSZYECaHlWnt9fJAIZqF5wYTTATA2snkdYtZVSiYPKXVatqGf7ViMWE4VClQi6oolSRIk1hbzfnVbLTbVAq6c2zk17P/0w3AsqcYvUc4i/Pym1PCkzcwrKMzMXW9uKvWrZNPW3J4ixQSnyjS1btshksnHjxhGEU+DbNnwjJyensHdZETaDIX6+gVLkKHjN+AZKkaPgNeMbcrnczKy05wtASg5KkW9grchR8JrxDZQiR8FrxjdQihwFrxnfQFuRo6AU+QbWihwFrxnfQClyFLxmfAOlyFHwmvENlCJHwWvGN1CKHAWvGd9AKXIUvGZ8A6XIUfCa8Q2UIkfBa8Y3MMTPUVCKfANrRY6C14xvoBQ5Cl4zvoFS5Ch4zfgGSpGj4DXjG+i24SgoRb6BtSJHwWvGN9zc3EQiHMiPe+A14xsJCQlQMRKEa2CtyDegdYpS5CIoRb6BUuQoKEW+gVLkKChFvoFS5CgoRb6BUuQoKEW+gVLkKChFvoFS5CgoRb6BUuQoKEW+gVLkKChFviEWi1GKXASlyDewVuQoKEW+gVLkKChFvoFS5CgoRb6BUuQoKEW+gVLkKChFvoFS5CgUTdME4T4dOnRISkrSrFIUpVQqa9SosW/fPoJwAezFzxP8/PxAfqL3wLJUKu3fvz9BOAJKkScMGzbM3d1dO8XLy6tPnz4E4QgoRZ4AwmvTpo1mVSwW9+3blyDcAaXIH4YMGaKpGD08PHr06EEQ7oBS5A8uLi6dOnUiap9Nt27dLCwsCMId0IP6gYgbmTGP0rLS5dqJFAX/Ca3MXRWJiFKpSiHMz6ZZIOrHmvJ9Bs3u4ECBvZUfVmklnS9P7t4SSpmT/1qIJZQiR8cF0lkCJSIKheJ62A1YbtSoUb6BiXXvIqZoBc18zXw3ApSm9a3BH0trvoKSpqnCM2sv64T5EYrIYCGVeFazqfWRFRESKEUV6Wlk3+KYHJlSYi6SZeW9j0BsoDaayl1T32ewRhUixfw3okidV6lVGp2bMx8iMVEqCibSSgWVL1FJ0WKK0nG7UyDyXOlSVP69dCuEev/VtL9Ivk35lEYR1Q9A5y1fe3etHQscTpWtoOzzYWEpkslpeJL0nejuWMGcCAOUIpHJyJaZ0TWaOjbt6EAQ1nD3QvLt84mfTfFwEoYaUYpkw/+i2/Z3r1hVKE9fDpH2jgStiRq3xIcIAKG7bY7/lmAhFaMO2YmNA7GxNzu0Jp4IAKFL8dXzLHsXnAKNvThVtEh+lUUEgNBfB5dlF/SVICxCIqHyO9J4itClqJDRVI4grjRHUSqVCoUg3BnYSQpBWAFKEWE3FBPa5T8oRYTd0AXePeApKEWE1VAUobBWFAKqWevxlXgWQ9NEIC+hCF2KSsFcaa4igloRPahCgCbCuNCcRQnPSkG0UNFWRFgNJaLQg4ogZY+qZyPaikIA3TYsR1UrCuMCCV2KNBFK2IqjqGpFYbyYiG4blCLCCoTeOCvDSMYP8745dvwIERIC/MrFB+2kMuPRo/tEYBjylVUOVEG0W4QuRQPcNleuhk6eMqZLQKvBQ3otXDwnMfFNZmYmrO7avVWTR6FQ9OjVbsPGX3Xmh8S27Zu8jH+xdNn87j3bMLuEnPhz/MRhkA0+/zi4RzPQCdQk8+Z/e+rUsY6dW8BWKCo5+d32HZvadWjaq0+HdetX/ueQKPsP7IKcFy/+0+fTjrDX50N7nzz5F7NpztwZUDicJ5zP+QtnISUjI+PHn2Z+2r9zpy4tx4z9POjIASbn4aD9sHtk5OPPBgZ06PjRyC8G3L9/99Kl8917tIGzmj1n+rt3byHb44iHTFGQARagnDVrVzAlaL7y2HFDSLFRBTKE8eYb1or6Abfat9995evbdNvWPyZ9OePJk8eLl8yVSqVt23Q8fea4Jtut8OupqSmdO3XXmR8yhBwLhc/p02b9eeQfWDh9JmTxkh+qV6u5Z1fwqJETQIqr1y5nipJIJPf+vQ1/B34/vn7tTlj4avIXSqXiaPC5ObMXgcyuXg0t+pzFYkl6etqZsyG7dx4JOnymfbtOi5bMjYuLhU1mZmZR0ZHwt2D+ivr1fCHlm+8mvXjxbP685fv3HfPza//Lr4sfPPyXyZmWlrptx4ZlS9bCOcvl8p8WzT4eErx50z4o9u698N/371SdrVjlfdi1a8uP81ecOH5pwvipR4IP/HUsSPsrr1+3kxQb4bz4JnhbkWkBFZt7d8MtLS0/HzyifPkKHzVruXzpuoEDh0F6QNdesbHREZGPmGznzp2uWaN2pUqVC8ufj2PHgurX9/36q28cHZ0a+TYdHjg2KGj/27e5M0PJZLKJE6bZ2ztAgT6Vq4rF4uHDxlpZWfk2bOLg4PgkKuI/TzsnJ6dP7wHwyLCztRsWOMbayvrM2RNEPUZjfPyLH+YsadnSD4qCCvzu3fDpU2fVqlkHDjd40PB69Rpu37GRKQTkFzh0tKdnJSjno2Yfv3z5fPLX38L3cnJybtigMTxlNIf75JN2FSu4mZubt23j37RpizNnQoihCONNGxWCl6KSFD08bj7q1muYlZX17fdfH/hj97PncXC/gh4gvU6d+h4eXqdPqypGaDGeO3/G3z+giPzaKJVKqOuaNmmhSYFaFBLv3L3FrLq7e0KlxCxLray8K30YAQ1EBZUVKQbVq9diFkB+bm4eT59GM6uVvCrDw4JZjo6OhOXKlat82KtaLW0DT3NoeBDAUwNEWsB0OgAAEABJREFUmHtWUqu09DRNtmpVa2iW3d08Y2KjiKEI57VEbKDqB7QhFy38tZyzy8ZNq4YM7T1t+vh7924zm3r16Hfy1F+gQ2idZmZmdOjQpej8GqDSgwpny9a1YE0xf2CPQbqmVhSJ8lymfKvFRHvcfgtLy/T3yjHXSgc71tJSqr0XSA6+i2ZVe6RjqnATTrsQS61jGYD6KPg6uAAwwG0D7Uz4gybijRtXDx7a+933Xx86eAosOv+OAes3/nL9xtXLVy60bOEHTcEi8msXCDcr3PEd/QPANtNOd6voQYxHenq6tbU1s5ydleXo4FQwD2TIysrMs1dGOjxHiJ5oV9TQKMgnb/2gaArdNkJBn2duePiNq9cuwUK5ci6dOnUDt0RqWmp8wktIAe21ad0BrMSzZ0/4d+j6n/m1qVKlOqRD25X5q1ungbNTOVfX8sR43AoPYxays7OfxsVot0I11KheG5SjsXiBBw/ueevKWTTht29oliMjH4F9SwxFZUGg20YQ6HmZwaib+8OMP48eAt/9/Qf3Dh3eBxqrUL4is7Vr116MH7V581ZF54fmoouL6/XrV6A1Cz6VL0ZODA39B8LfYCKC4wQCDFOmjYWGKzES0KY9dGjf06cxEGXZ+ts6UGP7dp0LZmvWrCWYkStWLHj46H5SUiK0mUGKn/XTI/bAEHb9MvMAuhj6D3xBpq2u+cpQJkEKgF2H9VNj/36fg6hWr1m24uefwEPYrm2nn1ds1MzZBBWaqqXaoasmpYj8gweN+G3b+mthl/buOQqOyo3rd+/e8xuE+KCJWKd2fQgGGHFWNmjjwZmAvMEaBP/nNzPmgiO0YDY4sR/nLV+/YeX4CYFwtj4+1ebPWwbnRvRk0IBhW7as+ebbSfAI6NNnALiXmXTmKz9/Hrdh/S6C5EXoc2as/1+Ui5tFx2HuxBg8evxg3PihO7YdBG8qYQ0HD+1bu27FmVPXiOmJioqE4P4vP2+C2AwxBhcPJ0TfSxu/TO9GMudAt41xGumRkY8TEl5u3Lxq4IBAVumQ60B9TmEnKSGg6n9jjGbBxk2/hl2/4u/fdcTwcaTUgbjlvbvhOjeB+erqWoFwFmi10cLoJIUNVGM2UMsKsABlct0+Hiuplb09h+eNvBiUEH0XG6gIR3B2Lkf4imBqCrQVMaCDsAKsFYXSB4ejCOfiCN5tAy4BYcwZxlFUg6Di4IsIwgJw8EUEQUoRwbttJDQ+jtgMJcKZpISBMociOQRhLcLpmYE1AoKwApQigrACoUvRQiqWWGCMn71IzM0spIK4QEKXoqWVODNNGK8bc5PUpGyBSFHoFULDNo7Jb7IJwlYSX2ZVa2BHBIDQpVirmbWds/nBFXEEYR+Hfo2DZstHAY5EAAi9kxTDyd2vnz5Md/OxdqtmTSvyBDdU713l/YVUUS4RpWP0VIh/wY+Zd6xAWp2qlUX1g+cmFSxAdShdl4PKLatgOkTdlDqvIBxJqWvmbJH6JGld+Zmz1y6Ner+qOrPcc8t/ksz3pah8MQemPLrACefuWnBBCzEljo/OjHuSVs7NoufYikQYoBRzOX8w6cmdlOxsZU52XtNR9zCcNCnszch8UlTdwlS+rTTR/WJlbqqOwzES1nEsqpDJsAo7BClsVFG1cChK91QxqtI0m/KVQL3/MYpzH73PVvjPp0JkTiwtJZVr2bQdwN/OXwVAKfKNrVu3ZmdnjxtXBoMJICUB44p8IycnRywWE4RrYEiNb4AUNUM/IhwCpcg35HK5Zq4bhEPg45NvYK3IUfCa8Q2UIkfBa8Y3UIocBa8Z30ApchS8ZnwD3TYcBaXIN7BW5Ch4zfgGSpGj4DXjGyhFjoLXjG+grchRUIp8A2tFjoLXjG8oFAqUIhfBa8Y3sFbkKHjN+AZKkaPgNeMb4LZBKXIRvGZ8A2tFjoLXjG+gFDkKXjO+gVLkKHjN+AaG+DkKSpFvYK3IUfCa8Q2UIkfBa8Y3atWqhVLkIjjiG994+PAhmIsE4Rr4+OQbUCVCG5UgXAOlyDdQihwFpcg3UIocBaXIN1CKHAWlyDdQihwFpcg3UIocBaXIN1CKHAWlyDdQihwFpcg3UIocBaXIN1CKHAWlyDdAigqFgiBcA6XIN7BW5CgoRb6BUuQoKEW+AVLEnhlcBKXIN7BW5CgoRb6BUuQoFE3TBOE+DRs2FIlUHcEpSnVNAVjw8PAIDg4mCBfAXvw8oU2bNvAJagQFwqdYLDYzM+vTpw9BOAJKkSd88cUXFSpU0E7x8vLq27cvQTgCSpEn1KlTx9fXV7MKVWKnTp1sbW0JwhFQivxh5MiRbm5uzLK7u3vPnj0Jwh1QivzBx8enZcuWRO258fPzc3FxIQh3wGCGyYmPlSXGZ9FKXdsoQuj3n4aRd9+P6w98+q9Ki75Ve967nKJJZw5S6I6FLWuv6jpJcBI5V7AoX8mcICUGgxkm5Oz+N5G3UpQ5BGSoyNGhRYoi8PPrVCKjnEJ0+kFZBqs4j/oodexD1+GYMySFHEgM/loxOGwp79pWnYaWJ0gJwFrRVNy5kBJ5K7VRe5caTXnuO3lyOyMs5FXoX28/DnAkiKFgrWgSTu15E3c/rd90byIYDvwc6+pu0e2LCgQxCHTbmISo2ylNu7gSIdF+gEfc4wyCGApK0fg8Cc+CloZ3XSsiJJwqisVm1M2/UwhiEGgrGp+3rzKJIKEVypREGUEMAqVofJRKOkeuJMJDoSA4lofBoBQRhBWgFBGEFaAUTQBFEUEiEkPQHx2BBoJSNAFCDdUqwVZUCtFINgooRQRhBShFEyDQ9ilSIlCKiPGg8DFkOChFEyDYt3ppAX/3EoNSRBBWgFI0PuDPp9Clj+gJStH4gD+fFqZLnyIYVjQY/OVMgPFC/At+mvnlVyNJCTh4aF97/2aktMDerwaDUjQRZelJPBy0f+HiOaQEREc/GTCoG9EXGqVoONhANQF0GXsSHz26T0rGo8clLQHRF5Si8VH5bPSvFC9fvvDLqsWvX7+qWqV6r179u3TuwaSbSczCw28sWDjz3bu3sOnLL2fUrlUX0tPS0g78seta2OWYmCfOTuVatmw9Yvg4S0vLr6eMvn37JmQ4efKvDet3EfVYjC9ePt+6de3Va6HlyrkO/CywY8cApvCnT2NW/rLoccQDsVji7e0zLHCMb8Mmv21bv2PnZtjatn2TXTuD3N08ivsdKFokwsCigWAD1SRQemoRdDhrzrSRIyYsWvhrq1Ztlyydd/pMCLMp4VV88J9/fPftfNgkk8uWLpvHDEd06PC+PXu3fdZ/yE8LVo4Z89U/505t37ER0leu2FirVl0Q299nrlevVpMpZOGi2f7+AfN+WFa3TgNou8bFxULi27dJE78c7upaYeOGPWtW/ebo4DT/x+8yMjKGDxs74LOh5ctXgBL00CFA41BJhoO1ovFRN0/1uyOhIvL7pJ1/hy6w3LRJ8/T0tIyMdGbT69cJ69fttLVRDRvXp/eAZct/TElJtrd36N/v89Z+7StVqsxku3fv9rWwS2NGTypYuEKhgB0/aqYarbhq1RohJ/48c/bEsMDRB/7YbW5hMW3qTIlEdRtMnzb70/6djgQfGDggkBgKKtFgUIomQKmfqQg1yZOoiA5qHTKMHfOVZrlKleqMDgF7Owf4zMrKsrdXzYoRdv3yosVzIp88ZiZUdHR0KuwQHzX7mFmAoip7V3kZ/xyWo6Ijq1WryegQsLa29vSo9PjxA4KUBdhANQF6BjNkMplSqbSwsNS5VSMVdcEfSt64adX27RsDAnrv2hEELcnBg4aTwrGy+jDmlaVUCvUqLCQlvrHMe1DYlJGJo7aVDShFE0DRepmKUL+JRCJolBZ/F6hI/zx6sHfvz7oF9Aajjqi8OKlF5IeKVLMMTV87O3tYsLK2zsrO0s6WmZEBHiBiMBQtRreNoaAUTQBN6dVABR3WqFH77r1wTcqmzavXrF1RxC5yuTwzMxPcocwq1KuXLp8vIn9ExENmAbwysbHR7m6esFyjeu0HD+5BUcymlNSU2KfRlStXIYZDKdFYNBSUovGhKL1vx57dPw0Lu/z7/p23wq8fCf5j777tRUvC3Nzcy8v7eEjw8xfPkpPfLVk2r17dhqmpKenpKmePu7snaOzmrTDwkRJ1ExfcQhC3AJNyy29r4bNd246Q3r17X6iKl69YkJAQHxMTBV5WaK927dILNnl4eCUmvrl48R8QPCk+GOIvAShF42PAC6idOnUD5+fOXZunTB0Ln6O/+LJrl/+YHXHW9z+BcoYN//Tzob0aN2o2atREWO3dt8PL+BfdA/qAVTl9xgTwBikUOVZW1uBuhXijf6fm4eHXZ36/AJQGJXi4e86ZvSg6OnLAoG6wFVJ+WbkZnDew0PyjVqBtiK+8efOKIKUCBoKMz7UTb6+dSAqcU5KWHifZOf9JjaY27T/DKaUMAYMZpkC4XWgp7MZvKChF40OJKEqojkQau/EbCkrR+NAKmlbiHYnoB0rRBFCUYBuoiMGgFE2AKsSPJhOiHyhF40PhIISI/qAUjY/KUBRkiIgSERE2BwwFpWgShOlIpGn0oBoOStEEUEINr+GLbyUApWgCRBQ20xB9QSmaACWh0W2D6AlK0figxYQYAErR+Kj7SKEWEf1AKRofypySCPJ3NTMXmZuJCWIQKEXj41rBUpgdQZVKYu9qThCDwK7DxqdSbalIQj28qsdYNTwgPlIGoYz6rewIYhAoRZNQv6XDzb9fEyHxz8EX1X1Rh4aDvfhNxaUzj8JPmFWtb9+4k7M5f1ttCgW5cTop4mZy6z4utZrZEMRQ0FY0CU+ePFm37cfxA5Y+uJb65PY7MKKUevZghEek9osCtGoYOe3VfIHLDwn5OmjlWaU/dBkptB+XdtG0dhcTrUOoT46o3mVQ9ZM2txT7tnZAHZYQrBWNzPbt2wMDA1+9euXq6qpJzExTVx/aUOr/zI/P9G9krgOllahSguptAebWpzRXipGHeqtaIJT2S6+/H/g9OzN7aGBgbjki9WjlDCKRyrXC7MUs50nPLU7rrD6oMfdZkFsmNWfW7FvhN23sbM0s6QoVKnh6etasWdPNza1x48YEMQisFY3J9OnTfXx8YEFbh4BUVWGUkpc/h2TQZjlSu8K8AOJiLP83E6eMnjBhQtyLJ0qlEpoAkGJhYWFvb29mZhYcHEwQ/cFa0QhERETcv3+/Z8+eKSkpdnZl7LpQqKtfsdjkyp8/f/6RI0fyJUL1ePjwYYLoD3pQS8rz589nz57drJlqku0y1yFRi7AUdAgMHz7c3d1dO8XGxgZ1aDAoRcPZtWtXZmamVCrdu3dvxYoVCTvYuHEj2KvE9Hh4eLRu3VqzCs2ryZMnE8RQUIoG8tNPP7158wZ06OTkRNhEdnY2KS0GDx7MPIPAYrxx48adO3dmzpxJEINAW1E/oDkaGhrav3//pKQktomQIScnB7ytpdNGBX755RdoHYCBesXDxOIAABAASURBVPOmatbxkJCQ5cuXr169ukaNGgTRB5SiHiQnJ0OgAm61KlUENwh/EXTs2PHkyZOa1Xfv3oFztVOnTkOHDiVIsUEpFotDhw61aNHC1tYWPBOE3cCTonLlyn369CFlyq+//vr48WOoHglSPNBW/G82bNjw6NEjMIrYr0NSurZiEUyaNOnzzz9v2rTptWvXCFIMsFYsFPDKQLtr0KBBr1+/dnFxIRyh1OKKxWT8+PG1a9eeOHEiQYoEa0XdZGVlwUO9QYMGsMwhHZJSjCsWk7Vr10JrYsiQIWBpE6RwsFbMz19//QXeP09PTwsLC8JBfvzxx8aNG3fp0oWwiYcPH4Iv53//+x/4eAiiC6wV87Bv3z6wbXx8fDiqQ6Kuz1k49mPNmjXPnDnzzz//wJOCILrAWlFFamrq4cOHwfmekJBQvjy3J82FuKJIDWElQUFBW7ZsAc9qpUqVCKIF1ooqevTowYSkua5DQCKRsFaHQK9evTZt2jRlypT9+/cTRAtB14qnTp1ydnZu1KgR4RFgj3Xv3r1Vq1aE3SxZsuTVq1fLli0jiBrh1orHjx8/e/Zs/fr1Cb9gp61YkBkzZnTr1u2TTz65ffs2QQRYK8pksp07d44cOZIHZqFOWG4r5gMeHOBZ/eijj0aPHk2EjeBqRTALq1WrRnhhFuqE5bZiPiwtLcGLAwtffPEFS94TKiuEUiueP38e7tGWLVsSvjNp0qTAwEDOjTETHh4O1ePChQv9/PyIIBFErXjx4kXwoQtkBCSu2Ir5aNiwYWhoKFwmwTpyeF4rbt68edSoUXw1C3Uil8uh/ufuDI/79u07ePAgBB6Fc8kY+Fwr9u3b183NjfDXLNSJmZkZp2daHTBgwNKlS0eMGCG0keN4WCuGhYW9e/fO398/36i+XAGqNaVmhFL9WbFiRf/+/T08PIihsOSlv3nz5oG7WzgvyvFNinfu3Fm3bt3ixYvZMPiaYcBzBAISxFDevn1ra2srKcG0ck5OTizxwQpqeA7+SHHTpk3gEOdW30KdlFCKJW8LsEeKREjDc/DEVoQAsbW1NeFa30JTwGlDsSAODg67d+8GQfK+8zG3a8Xbt2/HxcV169YNosPc7daUj5I3UKFxXpLew6yqFTVcuXLlyy+/XLNmDTP6M//gcK345MmTVatWtWjRgrDG08AG+Bqdat68OTjktm3bxtehqzgpxQ0bNsCno6MjhA2dnZ0JogX8LAWrxM8++2zPnj2E+/B4eA7uSfHbb79lmk/sHBHYFCxYsODEiRPFzMwzW7Egw4YN+/777/v06aM9+CoP4IwUHz16xHQ2nTlzJnhKiZCIiIgofuakpKSShCU5AS+H5+CG2yYhIWHq1KlLly5lzywxpiOf26Zz587MAriIDx48CAvQ1Dx16lRiYiK4i+vXrw/ODI2XBTZB/Qmem3yboIHas2fPQYMGweUOCgqC3Z8/f+7p6dm4cWMIEuRr0LLTbaMTPg3PwfZffMeOHWAVWFpa7tq1Swg6LAgzh+HkyZMZHcIP8ueff0K7AFQXGBh4/vz5Q4cOMTmZTRDXKbhJu7R9+/b17t17+/btAQEBEEM/cOAA4Sx8Gp6D1VJcsmQJ6NBeDUEISUtLA+UMHDiwZcuW4L3w8/Pr0aPH3r175XK5ZtPHH3+cb5N2CXfv3q1WrZq/vz/E67p06fLzzz83bdqUcJkKFSrAQyomJmbatGmEy7BUiqDAjIyMMWPGQBOLIO959uwZSAssJU0K6Co9Pf3FixeaTfDTMUaHZpN2CbVr175169aKFSvA55GSkuLm5saPqXiY4TkmTZpEOIvhbyqalPXr1/v4+PTr148gWoBLhuQNokqlUvjMzMzUbIIWBNSQUDFqNmmXAE1TKyury5cvgxolEglUniNHjuRHQAicVebm5oSzsFSK4DkA+5AgeWFe7svKytKkQNuBqH8uZjQKZhMzzQ5UiaRAyAf8MV3UxMbGhoeHgwUO2X744QfCff7+++/27dsTzsJSKQotXFFMoKUA3s779+9reipAjAeEV65cOagD8226ffs2s0m7BPCdQsPV29u7khqoP48fP054AUgR4o2Es7DUVgQzhnmoI9DmBDnduHEDpAV6a9euHbhAr1y5kpqaevr06eDgYAh2Q11na2ubbxOornv37vnCEhCLmz9/PuSBX/jatWuhoaFgPRLuA9+oYcOGnG5JsbRWBFc72Dw4bS3DgAEDdu7cef36dQhXjB07FtS1aNEiiD1CdAcChhqLWucmsKC0Q8dfffUV2OFz584l6lfkoKXat29fwn2gSmzbti3hMiwN8cOdB084YbptStgzoyDg0QHVFf+FOA6F+DV07NgRWgScfheSpbXikCFDCGIk4AaVyWRcH/OmCKDp7unpyfV3kln68ANrBzwKBDES4OVXKBT5wv28AQzgNm3aEI7DUilCY2P37t0EMR4QRQRPGC97M/LAUCSsbaCCz4avj/AyxMHBAepGaKZyzhQsgoiICHAsl2SEO5bAUin279+fICYAYo9yNbwZ94AfVSJhrRShKQXPb+4OoFgSrK2t4bsTUwKxosGDB5dkgEb2AIYiE5vhOiy9GEFBQa9evZo8eTIRHmZqiCkZM2ZMVlbW06dPq1evTrjM8+fPMzIyuP4tGFhqM9iqIYjJgLDtixcv/vrrL8JloHXKA98pA0ul2KNHj1GjRhHElMBN/PDhQ8JloHXKD0ORsFaKYCvyb0gvFjJ16lT4vHTpEuEgb9++jY2NbdCgAeEFLJXiiRMn1qxZQ5BSAR58XJy2CVqn7dq1I3yBpVK0sbERpvu0TPD395fJZIRr8MlQJMKZABwpDgcPHuRKR43s7Oz27dtfvHiR8AWW1oqZmZnv3r0jSOkCIc0///yTcIGzZ8/yxmHDwFIpnjt3TrBzspchnTt3dnR0JFyAT75TBpZK0crKCgdcLBNatWoFnytXriTshmeGIkFbEdFJSEiIWCwGdw5hJefPnz9y5Mjy5csJj2BprZiVlQVRI4KUEdBS9fLyMu5gAkaEf4YiYa0UL1++/NNPPxGk7KhRo4ZIJGLeAWAb/DMUCWulCK48tBXLHJBijx49QkNDCZsICwurVasWMyQsn0BbEfkPEhISFAqFm5sbYQdLly6tVKkS/3q0srRWhAAuM/I8UuaUL18eIhwjR44k7ICXhiJhrRTDw8NnzZpFEHYglUonTZr077//krIGzgEeDS4uLoR3sDeu6ODgQBDW0KBBA29v75s3b2pSOnXqNGjQIFK68C+cqIGlUqxXr96CBQsIwibAUwJu1U8//RSWu3fvnqgG2i+kFOHNSDYFYakUZTIZXGaCsAxQIwTWu3Tp8vLlS1h9/fo1RNtJaREVFSUWi3kw17dOWCrFBw8ezJgxgyDsY8KECaBAzWppSpGX4UQNLJWipaUl18dd5yUBAQHx8fGaVQg8QuPl6tWrpFTgsaFIWCtFsEkgfEQQliFRox2LTk5OPnPmDDE98Ah4+/YtBPcJT2Hp4ItyuRziiuC2JgibOHLkyLFjx06fPh0dHf3mzZv09HSoGK9fvw7Xy9QDRvLYYcPA0rdtIiIiZs+evXfvXqKePv7w4cME0eLIuviEuMwcuVKRU1qXD45TOvNQ8exAFDQlKAtLiW9rJ9/2RY0nyq5acf78+YcOHYIHLTwgKIpq1KgRLJt6qGzOsXtxnDybbuDnVLWuvUKp/nEoijCPVGbaNljWTmFWmXR1kvo2BOuEIkpae3danYvKm0u1RDFlalKYA9Ekf06idcS8iXlOMm852pkp9f/3J0PRWpk1u6tRbSV5UvIfUfPtCp4Aef9DffhNCp6krrPNdzAq3wlQpGDFZk5y0sndS++unXwjtaVqNrMhhcCuWjE2Nvbrr7+Oi4vTpCiVyhYtWuDobxq2zom1dbDsPAKb7txj35KYynWsOwzS/aoQu9w2EDLy8/PTTnFwcBg8eDBB1IQGvVMoadQhR+k40O1JeGphW1nnQR0wYIC3tzezDDV2tWrVWrZsSRA1Uf+mOLnyZAYoAeLkaU5JSFiI7j7xrJNixYoVP/nkE6bZjFViPrKzFFZ2pnVUIiZFLCaJr3QPOcvGuOLAgQMrV64MavTx8cnXXhU48mylPJt7YwcjGsDfliPT7YYsqQf1ZbQ88mbq6/jM7AwlmDG04oMTSCSmYPXDOji8KIrWcmpRIlUaTX/wHIlEjPeObu0z07dcarly5XYujFV5qkR5d6RyP5XKPCcD5RNwvhb4pmbmcDISSyuRi7tF/U/sre3FBEHKAriTqUKqPwOl+Oh6+rWTiSlvc0AhYrFaZCLwL1MibW1QtLYXWO2XpgtEc+i862ovNqHNRQ7lHBxIDklPVjnYaUpB0fm/ga6wEMRAVP/ypaqUTBS0UhYXkXH9TKJILHKuYN5xSEWn8nyY6xPhFoWFLPS+Fx9cTbsQ/BrqWUtbc/ca5RzcuTfEyKvIlORXKbsXx9jYS/p97WnDoUpS/dAjCGdRNe6MIsWdPz1NSZLbl7f1qOtMOItrVTv4g4WYG/G/zY32rGbTa3wFwglogmMRcRqw2uBP9yZSbNbNiJJni+q09+a0DrXxblyhXsfKr15kb/4+hiCI6VGCnaTQ/TAtrhTXTI0sV9nJp3lFwjuqf+xhJrXcMiuWsB8REYuxgcphKHGhbptiSXHN1Cc+jb1cvG0JT6nU2MXMynzDt1GE5SiJQoENVA6jiikodW/6bymu/yaqQg0XqSPPAwBeDV2ldpa/zY0hbIYi6LXhNODML6w7yH9IceePT82lFs6efBuJWSdeDctnZ9FHN8cT1kIT9NpwGqYXik6KkuK/l1NT3uX4NOOId9EY1PzYK+Z+GkEQU1GoC7woKYYGv3GsaEMEhYRIbcx3LuCCCwfhIoU3agqV4r+XUuUypVttnsQtik+Vj9yTE+WEnahsRZYOR4QUC6pQc7/Q6xp2+q2FDXv744TfPT1t1kdp6SaYg1FEJObio5teEhaishWVpNSJiops277JnTu3CFJC6ELN/UKlmJ4sd60i0OEPbZ2t42OziMA4HLR/4eI5Ojc5ODgOHTLK1VVAXgMToXdcMfZ+JnzauQi0l6qTt0N2ZhlUPmXLo0f3C9vk5OQ8fNjYChV4+IJHKaN3XPHxrRSxmQltkpindzZunzRrQYfFK/sFH/8lKyudSQ+9cmDu4i4Jr2OWrhoI7c/lqweH3Tyq2etoyCrYuvDnviFnNiqVJpybWmojhkdXzL1swnGYhuWVKxc/7d951OiBTGLIiT/HTxzWJaAVfP5xcA/j0ft6yugTJ4+ePPkX5H8c8fDgoX19+3W6GPpPe/9mq9Ysy9dA1VnC5i1rArr7yeUfzOx9v+/w79Q8IyOjsF2KplefDkFHDqxesxwO3buv/5Kl86CombOnwurQYX3hVDU5//33zoz/TezRs+2QwD5r1/2cnp57O/0w75t58789depYx84t4NDY4YQ0AAALwUlEQVSTp4xJTn63fcemdh2aQuHr1q/UnMbTpzFTpo7t1qN1z97tv5r8xa3w60y69u/w8y+LoJBdu7dqjqtQKHr0anfw4F5SbNTvoBaySWdqcmKOSGIqKb5JjNuw7Uu5PHvi6M2Bgxa/TIhYt3WcQqGSllhilpmZGvTXsv69vls670r9uu32B/349p0q0Hfp2sFL1/7oEzD9qzG/OTu6nfp7CzEl0Ix4HsW6qIZIpJ/XhhmbdMeuzZ/1HzJ1ykxYPn0mZPGSH6pXq7lnV/CokRNAFavXLof0lSs21qpVt2PHgL/PXIet5ubmGRnpwcF/fPvNvN4980wqWlgJbdt0BKlcu3ZJk/PCxb9bNP/EysqqsF3+8+T3/b7dy8v7xPFLsNfxkODJU0a3b9f51Ikrbdv4L10+PzVNNU7Ms+dx02aMz8rOWr3qt/k/wFMjArLl5KhuJ4lEcu/f2/B34Pfj69fuhAWQmVKpOBp8bs7sRfsP7Lp6VTWh8tu3SRO/HA7N740b9qxZ9Zujg9P8H79jniDav0O/voPgO54+c1xzhqDY1NSUFi316N2ufgdV9ybdF1aWrTTdWx03b4dIxGbDBi4u7+JdwdWnX8/vn798dO/BOWarQiH3bzuqkmc9iqKaNAyA59bzl48h/eLl/fXrtAdxWlnZNW3UrapPE2JSKJKRzLr+8qoONko9LgzTo6ppk+b9Ph1cq2YdWD52LKh+fd+vv/rG0dGpkW/T4YFjg4L2w71YcMesrKwBAwI7tO/s4eGlvamwEqpUqebm5gHyY7IlJr65f/9uu3adin/QglSrWrNH976ghzat/WG1Tp36IEIQGEgCxPY0NhoST58+biYxAxGCaL29faZNnRUR+QjqMaYEmUw2ccI0e3uHSpUq+1SuKhaLoaUNTwffhk3AAH4SFQF5Dvyx29zCYtrUmW4V3eHLTp82OzMz40jwgYK/Q0DXXrGx0VA+U/i5c6dr1qgNexFjUMgzFhq0tKm0CK1TT4/a1ta50yc6OVZ0dvKIjv0wN5iXex1mwUqq6sqUmZUKgnyTFFfetbImj4dbTWJSVE161oUN4JxoovfrNtWr5Q5ur1QqoWZo2qSFZpOvb1NIvHNXt2u0Zo06+VKKLsG/Q5cLF88y49aev3BWKpW2+riNvgfVBtTFLFhbq9738vauwqxKpVbwCTUSUbVOb9esWQfExmwCgxaeCJrC3d09NcOWS62svCv5aAq3trJOU9erUdGR1arVBIVrjuXpUenx4wcFfwd4FoAgQfxEHao/d/6Mv38A0Qv10BU60d1fUSIRUbSpBgLOzEqLe34fTEHtxJTUD1O4Fewdm5WdDu0KCwsrTYq5uZSYEjAiLK148totPPKZBagiwJbbsnUt/GlnKKyCguooX0rRJXRo3wUssZu3wqAevnjx708+aQf3N9Qqeh1Um3x3gkik4+EIcnr46D4YkHkKT0rUuYvOEpIS34BitVMspdKMzAzNqvbv0KtHv117to4d8xW0TqHy7NChC9EL9dNUJ7qlaO9s/vZ1BjENtrbOlSs17NRutHaitbV9EbtYWliLRGK5/EOAIVtmqtNjgGeei4cl4ReWlpbQNuvoH+Dn11473a2ih1FKgBoDmqmhof9Ur14r/PaNRQt/NcpBi8bJuVy9eg2h2amdaG+nx5TVVtbWYGpqp2RmZHi4e+nM7N8xYP3GX67fuHr5yoWWLfzsbO2IPqjGttGrVvSoaRVxJ4WYBrfy1W7cPubj7at5RMW/inJx9ipiFzh9R4eKMU/vtv44N+XBo1BiMsCwBvO69kfse+mPUo8/XwKqVKkO3g6wlJhVqK9evnzu6qrHGMdFlwBW3NGjhypV8rGzswez0FgHLep8fKqdPPVXg/qNNLdTTExUPvu2aGpUrw3eY80MPCmpKbFPo8GDpTMzaK9N6w5gJYI5Ok3tCdMLMPz0ewe1DtyFSpKVZhK/hV/LgWAqBB//WSbLevU69uiJ1ctXD3qZEFn0Xg3qdrh7/+/wu6dh+eyFHbHP7hGTkRCRJJawsTMSnBOlLFHXjC9GToRa69jxI3AJ7t4NB1//lGljodlJ1GbVgwf3oHlZdNOxiBKANm384xNehoQEt23bEXwkxdmlhHz66WAoFlyy0BKOi4vdsPHXEaM+A/Ov+CV07943PT1t+YoFCQnxIOOFi2ZbWlh27dKrsPxdu/Zi/KjNm7cixqNQz4S5pejlw/9uzRsAuECnTdxjbiZduT5wya/9o2Ju9uv1/X+6YTq0Hv5R455Bx5aDkQlVYo8uXxNiqoFeUl6nO1c0J+yDNsBpkxdoy21cvxsihBCpgxgA3II/zl9hoTYmuwf0gdbH9BkTGL+iASUA7m4eNarXgshk+7adirlLCYFqasvm36WW0jHjPod4IzSMp0+bBYGT4pfg4e4JsY3o6MgBg7pBfBVSflm5mXEU6QSqd7CB/Tt01Xh6ik8Rb9sUOn3N+cOJ9y69q93OmwiPuyejBk6rVM6ddWpcN+OJe1Vp28/cCFJ2PHr8YNz4oTu2HdSrGcywe8ETj+rSbqN0XMFCa0W/3s4g0qSnguu8F30j3sxCzEIdvge78ZcZkZGPQ0PP/bRw1sABgQbosGiKqmGr+9pEhL9x8tLtvXiXnLBs9SCdm6QWNpnZujVcwcVn4uhNxHjMXNC+sE0KRY5YrOMLenvWGzV0ZWF7pSdldh9pnKCt0VH3sOFPN/7uPdoUtul//5sLMUnCMjZu+jXs+hV//64jho8jBqH2oOp+mP7H/IrQIrJxsfGsW67gJrCVs7PTde4lz5GZSQqpVShKamlMz2RmZqGzZBUmRYiLaIcotYkIfWZuSQJnViKsZN3/nrhX4U8DlXlzTSdg+xlgibGfIhqo//Ftxy2ssnp6pE4pgu9YKtU9Bpxpo+/5jiU12jh0b2JT5Vk5X/xYhbAWfg1sY2vD2zEEC0N1AfXtOpyLmLTpU/7B2RjCexQQw0gcv4zFOkS4j0qF+nYd1lC3lW2fiR73TsUQ/pL+Jvvfv6MnLGG7DkWqByq6bThMETNJFeuN5/KVLNoPLH/vVHRCxDvCO57eehV968WE5VUJ6985VdI03xqpAkP1to3BQxIz1GpqM2p+laRnyY8vxGWm8aSH+7vn6ff/jpVlZE1cUZVwAZUHFWtFnqKHk8rSmhq3xOfw2hdRV5+amYsd3O1cfewJN3l+Lyn1dZpCqajRxL7DABfCEWicR4rjQANVVMhbxHr7i3uPV/lhj6x78TL23auoJLFEbG4pEZuJRPAHLTzqfYUJD/APkwlT0LRSz3ya5yTU68V4xlM6GmX55zkV5e1TK1JNL/GhALFIqVBNvCzLkMuz5bCzmZmoSgM7/8HlCLdQxaRQjBwGGqjKQt4iNjB003OcSpAJsdm3z71LjJelJmfnJKuGINd0csyjRLGqr0NBKeamawE7wUMj37mqsinzqJHxBmtXEPAYyJ0MXH3gfCVLzFS3sMRcZGMnKu9l90lvFwspN5t5NAqRt5QoigrunI5DjdPVBUEEDk5GzyVEZpTYZMN/IaWASCISSYxkKyJliKW5RCFDKXIYCCva2OruHYbXlUu4eFokJghu2HI+IZMp/frqHnQfpcgluo4oL89URN3h/FjJwuTI6jiXwrukUxip4hwbpke517Bt3Y8z4VAkNVF2Ylu8i7tZtzGFTnaAUuQk2+Y9zUzLgRBOjqx4l+99bJYJMtG63mTVjj/p3BegKdU/8l9Hyb/8IUP+gJb2vpR2gEo7UEwV8rafOl1z2povVZwvkrvM7EAXmVOTojkGXWS2vCngpIGYPoQTXb0s+0wsqncbSpGrpCWTiJvJmWnFmwry/e0JAVa44sxngTy673jwNCg/xIjzvDtR4CCF3E6aoxfxBm1eKeaJS4sonWFx5nB5To9JV9/6RZ9e7u8AOWmdXSUKnKm6w69qd+0zK5irwKEpscje2bw4oweiFBGEFWAwA0FYAUoRQVgBShFBWAFKEUFYAUoRQVgBShFBWMH/AQAA///AFYj6AAAABklEQVQDAD0PKj4Rdd2sAAAAAElFTkSuQmCC", + "text/plain": [ + "" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Bind tools to LLM\n", + "llm_with_tools = llm.bind_tools(memory_search_tools)\n", + "\n", + "def chatbot(state: State):\n", + " response = llm_with_tools.invoke(state[\"messages\"])\n", + " return {\"messages\": state[\"messages\"] + [response]}\n", + "\n", + "def starting_system_prompt(state: State):\n", + " system_prompt = \"\"\"You are a helpful chatbot. You have a built up knowledge store of user preferences from past conversations, \n", + "so if the user asks something that requires pre-requisite knowledge, before clarifying with the user or asking\n", + "them for information, check your user preferences to see if that information is present. \n", + "\n", + "Similarly, if the user asks about a previous conversation topic, check the summary tool to see if that information is\n", + "stored before asking the user for more clarity.\n", + "\n", + "As a rule of thumb, most of the time if the user is asking about something you don't know, check the memory before asking.\n", + "\"\"\" \n", + " return {\"messages\": SystemMessage(system_prompt)}\n", + " \n", + "\n", + "# Build graph\n", + "graph_builder = StateGraph(State)\n", + "graph_builder.add_node(\"system_prompt\", starting_system_prompt)\n", + "graph_builder.add_node(\"chatbot\", chatbot)\n", + "\n", + "retrieve_memory_node = ToolNode(tools=memory_search_tools)\n", + "graph_builder.add_node(\"retrieve_memory\", retrieve_memory_node)\n", + "\n", + "graph_builder.add_edge(\"retrieve_memory\", \"chatbot\")\n", + "graph_builder.add_edge(START, \"system_prompt\")\n", + "graph_builder.add_edge(\"system_prompt\", \"chatbot\")\n", + "graph_builder.add_conditional_edges(\n", + " \"chatbot\", tools_condition, {\"tools\": \"retrieve_memory\", \"__end__\": \"__end__\"}\n", + ")\n", + "\n", + "graph = graph_builder.compile(checkpointer=memory)\n", + "graph" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "b29573af-9fd0-460a-a5e9-52c72cc1663f", + "metadata": {}, + "outputs": [], + "source": [ + "# Helper function to invoke the chatbot\n", + "def chat(user_input: str):\n", + " \"\"\"Send a message to the chatbot and display the response\"\"\"\n", + " events = graph.stream(\n", + " {\"messages\": [{\"role\": \"user\", \"content\": user_input}]},\n", + " config,\n", + " stream_mode=\"values\",\n", + " )\n", + " \n", + " for event in events:\n", + " # Print the last message (which will be the response)\n", + " event[\"messages\"][-1].pretty_print()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "eda04678-0584-449c-8a10-24cbcff4ea49", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "Hey claude, I'm off to my favorite coffee shop. Can you give me the latest updates on my favorite team?\n", + "================================\u001b[1m System Message \u001b[0m================================\n", + "\n", + "You are a helpful chatbot. You have a built up knowledge store of user preferences from past conversations, \n", + "so if the user asks something that requires pre-requisite knowledge, before clarifying with the user or asking\n", + "them for information, check your user preferences to see if that information is present. \n", + "\n", + "Similarly, if the user asks about a previous conversation topic, check the summary tool to see if that information is\n", + "stored before asking the user for more clarity.\n", + "\n", + "As a rule of thumb, most of the time if the user is asking about something you don't know, check the memory before asking.\n", + "\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "[{'type': 'text', 'text': \"I'll help you get updates on your favorite team, but I need to check which team you're referring to. Let me check your previous preferences.\"}, {'type': 'tool_use', 'name': 'retrieve_user_preferences', 'input': {'query': 'favorite sports team'}, 'id': 'tooluse_bilPJfyYSsij3tjo9m3COw'}]\n", + "Tool Calls:\n", + " retrieve_user_preferences (tooluse_bilPJfyYSsij3tjo9m3COw)\n", + " Call ID: tooluse_bilPJfyYSsij3tjo9m3COw\n", + " Args:\n", + " query: favorite sports team\n", + "=================================\u001b[1m Tool Message \u001b[0m=================================\n", + "Name: retrieve_user_preferences\n", + "\n", + "[{\"content\": \"{\\\"context\\\":\\\"User directly mentioned being a huge fan of Sunderland premier league team and expressed interest in playing soccer but is currently prevented by a knee injury\\\",\\\"preference\\\":\\\"Strong support for Sunderland football team and enjoys playing soccer, wants to be physically active\\\",\\\"categories\\\":[\\\"sports\\\",\\\"football\\\",\\\"soccer\\\",\\\"physical activity\\\"]}\", \"score\": 0.35400036, \"metadata\": {}}, {\"content\": \"{\\\"context\\\":\\\"User explicitly mentioned enjoying pumpkin spice latte and coffee during fall weather\\\",\\\"preference\\\":\\\"Enjoys pumpkin spice latte and hot coffee, especially during cold weather\\\",\\\"categories\\\":[\\\"food\\\",\\\"beverages\\\",\\\"coffee\\\",\\\"seasonal preferences\\\"]}\", \"score\": 0.3535085, \"metadata\": {}}, {\"content\": \"{\\\"context\\\":\\\"User mentioned recently making slow cooker meals\\\",\\\"preference\\\":\\\"Enjoys cooking, particularly slow cooker meals\\\",\\\"categories\\\":[\\\"cooking\\\",\\\"food preparation\\\"]}\", \"score\": 0.3531025, \"metadata\": {}}]\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Based on your preferences, I can see you're a big fan of Sunderland football team! Let me get you the latest updates on Sunderland FC:\n", + "\n", + "Sunderland AFC currently plays in the EFL Championship (the second tier of English football) after being relegated from the Premier League a few years ago. Recent developments would include their current position in the league table, recent match results, upcoming fixtures, transfer news, and any updates about key players or the manager.\n", + "\n", + "For the most current and detailed information about Sunderland's recent performances, standings, and upcoming matches, I'd recommend checking:\n", + "- The official Sunderland AFC website\n", + "- BBC Sport's Sunderland page\n", + "- Sky Sports' Championship coverage\n", + "- The Sunderland Echo newspaper\n", + "\n", + "Would you like me to find some specific information about Sunderland, such as their most recent match result, upcoming fixtures, or current league position?\n" + ] + } + ], + "source": [ + "chat(\"Hey claude, I'm off to my favorite coffee shop. Can you give me the latest updates on my favorite team?\")" + ] + }, + { + "cell_type": "markdown", + "id": "46ca86e9-96b5-40cc-a08d-b67fa64941dd", + "metadata": {}, + "source": [ + "## Conclusion\n", + "\n", + "The AgentCore Memory Retrieval tool works great for helping give the chatbot context into previous conversations and user preferences to learn the user and assist them more accurately. \n", + "\n", + "This notebook is not a one size fits all approach. You can add tools at different Nodes or provide different system instructions on when to use the long term memory search. If you want automatic adding of preferences or summaries to the LLM context, see the other sample notebook for how to implement pre and post model hooks using a more deterministic approach, such as semantic searching for user preferences every time before the LLM is invoked.\n", + "\n", + "For more documentation, please see here: https://docs.aws.amazon.com/bedrock-agentcore/latest/devguide/memory.html" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f5a2937e-67f4-4a6d-a299-61cf2059dcb8", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.15" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/samples/memory/langgraph_stateful_agent_with_memories.ipynb b/samples/memory/langgraph_stateful_agent_with_memories.ipynb new file mode 100644 index 00000000..e174eb33 --- /dev/null +++ b/samples/memory/langgraph_stateful_agent_with_memories.ipynb @@ -0,0 +1,542 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ac2f5e94-92be-4358-ba5a-33a0009b46d0", + "metadata": {}, + "source": [ + "# LangGraph Agent with Bedrock AgentCore Memories\n", + "\n", + "This notebook walks through creating a simple LangGraph agent with a chatbot (llm) node and a tool calling node. \n", + "\n", + "This agent integrates with Amazon Bedrock Agentcore Memory to store messages so that the agent can pick back up where it left off if the conversation is interrupted. \n", + "\n", + "Before and after each time the LLM node is invoked, the previous `User` message and the generated `LLM` message are saved to Bedrock Agentcore Memory. If the session is interuptted, the agent detects that no messages are present in the state, then lists the past 10 events from the session from Agentcore Memory. These messages are loaded into the state so the agent can have the previous context.\n", + "\n", + "### Pre-requisites for this sample\n", + "- Amazon Web Services account\n", + "- Amazon Bedrock Agentcore Memory configured - https://docs.aws.amazon.com/bedrock-agentcore/latest/devguide/memory-getting-started.html#memory-getting-started-create-memory\n", + "- Amazon Bedrock model access - https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "b51ead6e", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_aws import ChatBedrockConverse\n", + "\n", + "from typing import Annotated\n", + "from typing_extensions import TypedDict\n", + "\n", + "from langgraph.graph import StateGraph, START, END\n", + "from langgraph.graph.message import add_messages\n", + "\n", + "from langgraph.checkpoint.memory import InMemorySaver\n", + "from langgraph.prebuilt import ToolNode, tools_condition\n", + "\n", + "from langchain_core.messages import SystemMessage\n", + "\n", + "from langchain_aws.memory.bedrock_agentcore import (\n", + " store_agentcore_memory_events,\n", + " list_agentcore_memory_events,\n", + ")\n", + "\n", + "from bedrock_agentcore.memory import MemoryClient\n", + "\n", + "config = {\"configurable\": {\"thread_id\": \"1\"}}\n", + "memory = InMemorySaver()\n", + "\n", + "llm = ChatBedrockConverse(\n", + " model=\"us.anthropic.claude-3-7-sonnet-20250219-v1:0\",\n", + " max_tokens=5000,\n", + " region_name=\"us-west-2\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "826c8a58-b1ec-43e8-8103-fab8ad85e422", + "metadata": {}, + "source": [ + "## Configure Agentcore Memory\n", + "\n", + "AgentCore short-term memories are organized by a Memory ID (overall memory store) and then categorized by Actor ID (i.e which user) and Session ID (which chat session). \n", + "\n", + "Long term memories are stored in namespaces. Please see the long term memory search example notebook for more information. By configuring different strategies (i.e. Summarization, User Preferences) these short term memories are processed async as long term memories in the specified namespace." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "e2d189d3", + "metadata": {}, + "outputs": [], + "source": [ + "REGION = \"us-west-2\"\n", + "MEMORY_ID = \"YOUR_MEMORY_ID\"\n", + "SESSION_ID = \"session-10\"\n", + "ACTOR_ID = \"user-1\"\n", + "\n", + "# Initialize the memory client\n", + "memory_client = MemoryClient(region_name=REGION)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "1cfdcff5-19c2-4b68-a371-9cadb70907af", + "metadata": {}, + "outputs": [], + "source": [ + "config = {\"configurable\": {\"thread_id\": \"1\"}}\n", + "memory = InMemorySaver()\n", + "\n", + "class State(TypedDict):\n", + " messages: Annotated[list, add_messages]" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "189596c2-f725-4c11-bb59-8e58ceed51b2", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.tools import tool\n", + "\n", + "@tool\n", + "def multiply(a: int, b: int) -> int:\n", + " \"\"\"Multiply two numbers.\"\"\"\n", + " return a * b" + ] + }, + { + "cell_type": "markdown", + "id": "820010bb-6c4e-4911-aeea-e7aa4b496109", + "metadata": {}, + "source": [ + "## Pre/Post Model Hooks\n", + "\n", + "For this implementation, short term memories (message events) are stored before and after model invocation. Before the model runs, the previous user message is saved. After the model runs, the LLM message is saved. \n", + "\n", + "If the conversation is just starting or no messages are present in the state, a check is done in Agentcore memory to see if there are any previous events in that actor/session combination. If so, those messages are added as a special System message so that the LLM has the context from the previous interrupted conversation." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "e17dc78a-8f7a-476c-9399-b97f6d9357b6", + "metadata": {}, + "outputs": [], + "source": [ + "def pre_model_hook(state):\n", + " \"\"\"Store the previous user or tool message before the LLM responds, or load recent history if state is empty\"\"\"\n", + " try:\n", + " # Check if there are no messages in the state\n", + " last_message = state[\"messages\"][-1]\n", + " if len(state[\"messages\"]) == 1:\n", + " # Load the last 10 events from memory\n", + " recent_messages = list_agentcore_memory_events(\n", + " memory_client, MEMORY_ID, ACTOR_ID, SESSION_ID, 10\n", + " )\n", + " print(\"No messages in history, attempting to load from AgentCore memory\")\n", + " \n", + " if recent_messages:\n", + " print(f\"{len(recent_messages)} recent messages found in AgentCore memory, loading them into context.\")\n", + "\n", + " context_content = \"No messages in history, attempting to load from AgentCore memory\"\n", + " for i, msg in enumerate(recent_messages, 1):\n", + " # Extract message details\n", + " if hasattr(msg, 'content'):\n", + " content = msg.content\n", + " role = getattr(msg, 'type', 'unknown')\n", + " elif isinstance(msg, dict):\n", + " content = msg.get('content', str(msg))\n", + " role = msg.get('role', msg.get('type', 'unknown'))\n", + " else:\n", + " content = str(msg)\n", + " role = 'unknown'\n", + " \n", + " context_content += f\"\"\"\n", + " Message {i}:\n", + " Role: {role.capitalize()}\n", + " Content: {content}\n", + " ---\"\"\"\n", + " \n", + " context_content += \"\"\"\n", + " \n", + " === END HISTORY ===\n", + " \n", + " Please use this context to continue our conversation naturally. You should reference relevant parts of this history when appropriate, but don't explicitly mention that you're loading from memory unless asked.\n", + " \"\"\"\n", + " # Create a special system message with the previous context\n", + " ai_context_msg = SystemMessage(content=context_content)\n", + " state['messages'] = [ai_context_msg] + [last_message]\n", + " return state\n", + " else:\n", + " print(\"No past agentcore messages found.\")\n", + "\n", + " # Store the last message (user or tool message) as before\n", + " print(f\"Storing event pre-model: {last_message}\")\n", + " store_agentcore_memory_events(\n", + " memory_client=memory_client,\n", + " memory_id=MEMORY_ID,\n", + " actor_id=ACTOR_ID,\n", + " session_id=SESSION_ID,\n", + " messages=[last_message]\n", + " )\n", + " except Exception as e:\n", + " print(f\"Memory operation failed: {e}\")\n", + " \n", + " return state # Return state unchanged if messages exist or if there was an error\n", + "\n", + "\n", + "\n", + "def post_model_hook(state):\n", + " \"\"\"Store the LLM response after it's generated\"\"\"\n", + " try:\n", + " # Get the last message (LLM response)\n", + " if state[\"messages\"]:\n", + " last_message = state[\"messages\"][-1]\n", + " print(f\"Storing event post-model: {last_message}\")\n", + " store_agentcore_memory_events(\n", + " memory_client=memory_client,\n", + " memory_id=MEMORY_ID,\n", + " actor_id=ACTOR_ID,\n", + " session_id=SESSION_ID,\n", + " messages=[last_message]\n", + " )\n", + " except Exception as e:\n", + " print(f\"Memory storage failed: {e}\")\n", + "\n", + "\n", + "def chatbot_with_hooks(state: State):\n", + " # Pre-hook: Store the incoming user/tool message OR load recent history if empty\n", + " modified_state = pre_model_hook(state)\n", + " \n", + " # LLM call\n", + " response = llm_with_tools.invoke(modified_state[\"messages\"])\n", + " \n", + " # Create the new state with all messages including the response\n", + " new_state = {\"messages\": modified_state[\"messages\"] + [response]}\n", + " \n", + " # Post-hook: Store the LLM response\n", + " post_model_hook(new_state)\n", + " \n", + " return new_state" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "8022ef56-6f71-4bf8-9bf3-a9ac254f6b7b", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAANgAAAD5CAIAAADKsmwpAAAAAXNSR0IArs4c6QAAIABJREFUeJztnXlcVNXfx8+dnVlhFnaQRQQBFRSjyBXM3QRzr1+av9K0RUqzrEzTFn20tEwlTCvJFBX3JXNJVAwVEBQQQZF9h2FmmGH2ef6YHuLBAUHnzj3DPe8Xf9y55845n5n5cO73nhUzmUwAgSAaCtECEAiAjIiABWREBBQgIyKgABkRAQXIiAgooBEtADq0akNDpValMKgUeoPepNPaQfMW04FCY2BsHo3No7h4OxAt50nAUDuiGVWLviizpThX2VSjcXRmsHlUNo/GF9J0Gjv4fugsirRGq1LoaQys9K7KL5TrN5DjP5BLtK4egIwITCbTtRONNSWtEi+WXyjHM4BNtKKnQqs2Fue2lN9rrbzfGjVF1G8wj2hF3YLsRrx7XX5hf13UFNHgaCeitVgZhVR37USjSqEf+x9XDh/2GIzURrx8uJ5KB89PkRAtBEeaajVHt1WNmeviHQR1TU9eI/51sE7owhg0wpFoIbbgWELlsxNFLt4sooV0CkmNeCKxyiuQHTaSFC40c2xHZdBQfmAEpCEjGdsRr51ocPd3IJULAQBTF3tkXZQ2VGmIFmIZ0hmx6JYCADAkprc9mnSHOSu8Lx+uNxlhvAeSzoipKfXho8noQjN+A7hXjzUQrcIC5DLirUvSoAi+A5dKtBDCCBvpWHSrRSnXEy2kI+QyYkme8rkpQqJVEMyIaeLs1GaiVXSEREYsyVfS6BQqlUQf2SLeQZzcNBnRKjpCol/l4R2l7wCOjQv96KOPjh079gRvfOGFFyorK3FQBBgsisSTWXm/FY/MnxgSGbGpTutvcyPm5+c/wbuqq6ulUikOcv6hXzi34r4Kv/yfALIYUas2NlRqHLh4dbmmpaUtWrRo2LBhsbGxq1evbmhoAABERERUVVWtW7du1KhRAICWlpaEhIR58+aZL9u8ebNarTa/PSYmZt++fW+88UZERERqauqUKVMAAFOnTl22bBkeajkCen0FZA2KJnLQVKtJ+rIEp8zv3r07ZMiQnTt3VldXp6WlzZ49+6233jKZTGq1esiQIUePHjVftnPnzsjIyHPnzt28efPixYsTJkz47rvvzEnjxo2bMWPGxo0b09PTdTrdlStXhgwZUlFRgZPg2tLW/d+U4ZT5kwH7oAxroZTpOQK8Pmx2djaLxVqwYAGFQnF1dQ0ODr5///6jl73yyisxMTG+vr7mlzk5OdeuXXv33XcBABiGCQSC5cuX46SwAxwBTSmDqwWHLEY0GgHDAa84JCwsTK1Wx8fHR0ZGjhgxwsvLKyIi4tHL6HT633//vXr16sLCQr1eDwAQCv9tSwoODsZJ3qNQaBiDBVdUBpca/ODwqbJ6HU6ZBwUFff/99xKJZOvWrXFxcUuWLMnJyXn0sq1btyYmJsbFxR09ejQjI+O1115rn8pgMHCS9yjKZj2VhtmsuO5AFiOy+TQVnt0JUVFRq1atOnHixJo1a2QyWXx8vLnOa8NkMqWkpMyaNSsuLs7V1RUAoFAo8NPTNUq5HrahsmQxogOHKvZg6nVGPDLPzMy8du0aAEAikUyePHnZsmUKhaK6urr9NTqdrrW11dnZ2fxSq9VevnwZDzHdQaMyOnsxiSrdImQxIgDAgUstvqPEI+ecnJwVK1YcPnxYKpXm5ubu379fIpG4ubkxmUxnZ+f09PSMjAwKheLj43P8+PGKiorm5ua1a9eGhYXJ5XKl0oIkHx8fAMC5c+dyc3PxEFyYpXDpA9cgWRIZ0TeU8zAXFyO+8sorcXFxmzZteuGFFxYuXMjhcBITE2k0GgBgwYIFN2/eXLZsWWtr61dffcVisaZPnx4bG/vMM8+8/fbbLBZrzJgxVVVVHTL09PScMmVKQkLC1q1b8RBckq/yDbF1237XkGiEtlZjPLWrOm6JB9FCCKbsnqr4Tsuo6c5EC/l/kKhGZDApzp7MrIs4dp3ZBdeON4Q8JyBaRUfgenTCm6jJom3LH3Q2c9RoNEZHR1tM0mq1dDodwyw0efj5+e3evdvaSv8hOzs7Pj6+p5L69euXmJho8V2FWQonF4bEA64nFXLdms3kXG42Gk3hoyx7sbMmFY1Gw2Ra/vEwDONycVxT4QkkUSgUDsdyCHhqV9XwOAlfSLeqRitAOiMCAE7vrg6M4NnXihxWAeYPTqIYsY2JC9z+PtlYV64mWohNSU2pF7kx4HQhSWvEf/o5vqt4dpLI3le66SapKfXO3sz+Q/lEC+kUMtaI5sBuerzXzT+leenQDZq3LiaT6diOSr6QBrMLyVsjtvH3qYaHeaqoySKfYLgaeK1CxrmmvHT56JnO3oGwV/xkNyIAoLFKc+1kI9OB4hHg4BvCYfPsvkmrvkJTeleZeUE6cLhj5AQhhQLXQBuLICP+Q+WD1ns3FQ/zlE4udKELgyOgcfg0joBqMBCtrBtgmEnRpFfKDSajqTCrhcWh9B3EHTjcEbZBh12AjNiRmpLW+kqtUqZXyvUUCqZSWNOJra2txcXFISEhVswTAMB1ogET4PCpPCeau78Dzwm6ZsLHgoxoUx48eLBy5coDBw4QLQQ67KbqRvRukBERUICMiIACZEQEFCAjIqAAGREBBciICChARkRAATIiAgqQERFQgIyIgAJkRAQUICMioAAZEQEFyIgIKEBGREABMiICCpAREVCAjIiAAmREBBQgIyKgABkRAQXIiAgoQEa0KRiGte1wgWgPMqJNMZlMdXV1RKuAEWREBBQgIyKgABkRAQXIiAgoQEZEQAEyIgIKkBERUICMiIACZEQEFCAjIqAAGREBBciICChARkRAATIiAgqQERFQgDb8sQWzZ89WqVQAAK1W29jY6ObmZt6C/uzZs0RLgwVUI9qCqVOn1tTUVFVVNTQ0mEymqqqqqqoqHo9HtC6IQEa0BbNnz/b29m5/BsOwYcOGEacIOpARbQGGYdOmTaNSqW1n+vTpM2vWLEJFwQUyoo2YOXOml5eX+RjDsJEjR5ojRYQZZEQbQaPRZs+ezWQyAQCenp7Tp08nWhFcICPajmnTpnl6egIAoqKiUHXYARrRAghGpzVKa7QtchvtUz8l5vVzxnOjnplVnKu0QXEUCnByZgjEdrCPOKnbEdNPNxbdaqEzKTwh3aDrhd8D15FWXqgUiOmDo528A9lEy+kK8hoxNaUewyjhMSKiheCOTmM8l1Q5bKrIoy+8XiRpjJh2vIFCJYULAQB0JmXi616XDjXUV2qI1tIpZDSiollXW6oOG00KF7bx3BRJ5nkp0So6hYxGbKrWYlTSfXCBmFFWoCJaRaeQ7vcAAMileqELk2gVtobBovJEdLXKRu0DPYWMRgRGoNMaiRZBAIomHYZhRKuwDCmNiIAPZEQEFCAjIqAAGREBBciICChARkRAATIiAgqQERFQgIyIgAJkRAQUICMioAAZ8amYMWvCT7u2PU0Oq9esWLZ8sfUU2SvIiARw5OiBrzesfpocHj58MHvuZOspIh5kRAK4dy//aXMofNocYIPss/i6icFgOHho7697EgEAwf0HzJ+3aMCAMHMSjUY/fCQ54cctDAYjNDRs5UdrBXyBudI6fuJQ1q2bNTVVPn38Jk6MnfridABA/PsLc3KyAAB//nnqx4TfzPPtMzKvJyfvyc3L8ffv9+47K/oFBJkzT0tL/XVPYmnZQ4HAsW/fwKXvfOji4vrzLwl7kn4CAIyOiThz6iqLxSL0u7EOqEbsFok7tx47dnDt55s+/fhLicTlw5XvlJWVmJNSL59XKls2rN/6wfLPcnOzf/55h/n8tu3f3Lz599J3P1z/9fcTJ8Z+9/2G9OtpAIAt3yb27x86duykvy5kmA1XWvbw6LEDc+e+9tWXW4xG46er3jfPaMvIvP7Zmg/Gjp10YP/p1avW19ZWb/l+PQDgtflvzp71qouL618XMnqHC1GN2C0ULYoDB3+LX/rR0IhnAQCRkc+rVMrGpgZvbx8AAJvN+c8r/zVfmXYt9fadW+bjVau+VqmUbq7uAIDwsIg//jh+4+a1ZyOffzR/qbQp/t2PxGIJAODV/7yx8uOlOTlZYWFDdv+8Y8Tw6OkvzQUACASOSxa/v/yDJQX38oMCg237BdgCZMTHU15WAgAICgoxv6TRaGs/39iWOiA0rO1YwHfUav5vppzJdPjw/us30srLS80n3Nw8LObv7xdgdiEAIDRkEACgqroiLGxIcXHRyBExbZcF9gsGABQU5CEjkpQWZQsAgMW0fBOk0f79DtsG4huNxo8+XqrTad94/e2wsAgel/fO0v92lj+Hw207ZrPZAAC5XNbS0qLRaJjtCjUnqVS2WCLC9qAY8fFw2JyeOqCwqKCgIG/xm+8NHzaax+UBAFpaFJ1d3KpubTs2m57PF5iDP3W7JKVKCQAQCcVP8VHgBRnx8fj4+NNotJzbWeaXJpPpo4+Xnj17sou3yGTNAACJ2Nn8sqSkuKSkuLOLy8oeqtVq87G5ZcfTw5tGowX265+Xd7vtMvOxn3+AlT4WXCAjPh4Oh/PCmInHjh0888fxW9kZW3/YmJl5vX//0C7e4tPHj0ajJR9IkivkZWUlW3/YODTi2ZraanOqh4fX3bu5WbduSqVNAAAWy2HTN+vkCnlzs3Tv77udnV3MbUNxsbOupl1KSdknV8hvZWds3/Ht4PChAX0DAQCent6NjQ1Xr14yGCCdHtpTkBG7xdJ3PwwLi/jm2y/fX/bmnTvZa9dsND8yd4aLi+snH3+Rf/fO1Njojz997/X/vvXii9Pv3s2d99p0AMCUSdMwDPtgxVsPiot0el1oyCBvb98ZM8fPmDXBYDB8se5bc6w5duyk/y5YknwwaWps9Ib/WTNwQPhnq7425/9s5LABoWGrVi/XarW2+g7whYyLMN25Kqst10ZOlBAtxNbs21A8b5UP0wHG2gdGTQgSgoyIgAJkRAQUICMioAAZEQEFyIgIKEBGREABMiICCpAREVCAjIiAAmREBBQgIyKgABkRAQVkNCKdQWGyyPjBRW5MCrUb1xEBGX8PoRu94j68W9/ghKxRq5Lr6QxIf3FIZeGKsxeLwcQ0rb1kbHM3qStr7RvO7caFxEBGIwIAhsWKz++tIlqF7agqVhVclz03Ed7tB8k4QttMY7Xm0JaKiPESgZjOFdB75deAYaCpRqNo0j7IUcz+wItCgXTbKVIbEQCgVRtv/tl491YtFWNRTLaY4m00mXQ6HZPBwCl/pUqFYRiVSqVQKBQKRezBwjDgHcgeNMIRpxKtBakn2FPpJnFgk6E67fVFi2xT4oMHD1au/PTAgQM45b9y5cqzZ89iGObk5MTlcpkFTHd39376foNGwL4EI3lrxD179kyaNInD4dhyHSOFQpGZmTlq1Cic8i8oKIiPj29oaGh/0mg0urm5nTp1CqdCrQJJH1ZSUlKkUqlIJLLxalo8Hg8/FwIAgoKC+vfv3+Ekh8OB3IVkNOLFixcBAM8///zSpUttX3p9ff327dtxLWLu3LlOTk5tLykUypUrV3At0SqQy4jr168vLi4GALi6uhIiQC6XX7p0Cdcihg4d6u/vb464jEajn5/fsWPHcC3RKlDXrFlDtAZbcP/+faFQyOFwJk2aRKAMOp3u6enp49PVKhFPD5vNvnHjhkaj8fT0TElJOXDgQFpa2vDhw3Et9CkhxcPKypUrY2JixowZQ7QQ2/Hyyy/X1taeP3/e/DIlJeXIkSO//fYb0bo6x9SrUSgU5eXlZ8+eJVrIP9TV1W3bto2QovPz84cMGZKbm0tI6Y+lN8eI69ata2ho8PT0HDt2LNFa/sEGMWJn9O/fPyMjY8OGDYcOHSJEQNf0WiOmpKQMGDAA72ispzg7Oy9ZsoRAAXv27CkqKvr8888J1GCRXhgjJiYmLly4UKvVMnDrSbN3jh8/vnfv3qSkJHi+ot5WI3722WeOjo4AAHi+4vbYoB2xO7z44otffvnlyJEjs7OzidbyfxAdpFqNS5cumUym+vp6ooV0xf3792fMmEG0in9ZsGDB3r17iVZh6j0PKy+//LJ5lVWxGOq1zgmPETuwa9eu6urqTz/9lGgh9h8jVlRUODs7FxcXBwUFEa3FXjlz5szOnTuTkpI4HA5RGuy4RtTr9W+88YZarWYwGPbiQkhixA5MmDBh8+bNEyZMuHnzJlEa7NWIJpMpLS1t8eLFffv2JVpLDyCwHbFr+vTpc/ny5V27dv3666+ECLA/IxqNxvfee89kMo0cOXLw4MFEy+kZsMWIHUhISJDJZCtWrLB90fYXI65evTomJmbEiBFEC+m1XLhwYcuWLUlJSeaGMBtB9GN7D/jll1+IlvC0ENjX3CMqKyujo6OvXr1qsxLt5tY8fvz40NCuNnuyC6CNETvg7u5+4cKF5OTkn376yTYl2sGtOSsra/DgwWq1uhdsko33nBWrs2PHjsLCws2bN+NdENQ1olKpHDduHJ/PBwD0AhfaYM6K1Vm8eHFcXNy4cePq6urwLclmQUBPUSgUhYWFkHfZ9RR7iRE7UF9fP378+OzsbPyKgLRGPHz4cFZWVkBAAORddj2FxWLdunWLaBU9RiwWnzlzZtu2bZWVlTgVAekE+6KiIp1OR7QK68Pj8bZv397a2ophmN0FG1lZWe7u7jhlDmmN+Oabb06ePJloFbhAp9MdHBySk5Orq6uJ1tIDCgoKAgMDzSNL8ABSIwoEAgI74G3AvHnz4uPjiVbRA+7evfvo1H0rAqkRf/zxx5MnTxKtAl+Sk5MBAOXl5UQL6Rb5+fnBwcH45Q+pEWUymVKpJFqFLUhNTc3MzCRaxePBu0aEtEFbJpPRaLTefXdu44svvoBhaGrXREREZGRk4Jc/pDVir48R22N2YXp6OtFCOiU/Px/X6hBeI5IhRuxARUXF2bNniVZhGbzvy/AakTwxYhvTp0+Xy+VEq7AM3k8q8Bpx0aJFvbUdsQtmzJgBANi3bx/RQjpC3hqRVDFiB0QiEVSrghiNxqKiosDAQFxLgdSIJIwR2xg7dixUK6XY4L4MrxFJGCO2JyIiwrxqBdFCgG3uy/AakZwxYgfi4uL27t1LtAobGRHS0TcCgYBoCcQTHh7u4uJCtAqQn58/Z84cvEuBtEYkc4zYHvOwq7i4OKIE6PX6hw8fBgQE4F0QpEYkeYzYgYSEhKSkpPZnbLb0qG2eVFBfs92g1Wq1Wi2VSnVwcJg4cWJtbe24ceO++uorvMtNTk4uLS21wZR7FCPaBwwGg8FgDBs2zNHRsa6uDsOwvLy8pqYmoVCIa7n5+flDhw7FtQgzkN6aUYxoEZFIVFNTYz5uamqywU4+tnlkhteIKEZ8lJdeeqn93CWlUnnu3DlcS9RqteXl5f7+/riWYgbSW/OiRYtoNEi1EUJcXFxpaal5SzPzGQqFUlpaWlxc7Ofnh1OhNntSgbdGJHNfs0WOHDkSFxfn4+NjXhjJaDQCAGpra3G9O9vsvgxvjfjjjz96eHigzpX2rFq1CgBw+/btK1euXLlypbGxUSZVpV64Me3Fl3Eq8V5eWXh4uEKqf+IcTCbAF3bLY3A130RHR8tksjZJGIaZTCZXV9fTp08TLQ0uMs413b4qNWJ6vcbkgNv8aL1eT6XRnmYCqZMbs7JI1XcQJ3KiiC+kd3ElXDViVFTU6dOn28IgcyQ0ZcoUQkVBxx+/1nCF9AkLvLmOXf20kKDXGZvrtAe/q5j2loeTc6d7jsAVI86ZM6fDWgKenp426Oi0I878UuPkyhw0QmQXLgQA0OgUsQdr5vu+R7ZVyps6Xb0DLiOGhIS0XwQRw7Dx48fbdN1SuCnJVzIcqMHPOnXjWugYPcst/XRTZ6lwGREA8Oqrr7YtvOTp6Tlz5kyiFUFEXbmGzoTuJ+smTi7M+9mKzlKh+1TBwcEDBw40H0+YMMHJyS7/+3FCozKI3ZhEq3hCqDTMO5DTXK+1mAqdEQEA8+fPF4lErq6uqDrsgFJu0NvzGmlNtdrOlnF62qfmqgcqWYNeqdCr5AajAej1xqfMEAAAgGhY4GIOh5NxRgNA7dNnx3SgYABj86lsPlXkzpS422ul0ot5QiOW3lUWZrUU5yqdXB1MJoxKp1LoVAqVaq1WydCBowAACiv1NreoMKPBYKjUG7RqnVqmUxv8B3KCIngufexshcJeTI+NWP2w9fKRRjqbgdGY/s850ehUfIThiLZV39igTD0qdWCD4bEiRwmMG+qSjZ4Z8fy++qpitchXyHGy47qE4UATegkAAPI6ZcrWqv7P8KImi4gWRXa6+7Ci1xl/WVuqNjC9B7vbtQvbw3fm+D/nVVdDObINr6WhEd2kW0Y06E2JK4vdgl24ol44IsbRg08X8Pdvso8FM3srjzei0WjaseJBcIwvk2MffUpPAFfE5nsIf/2ilGgh5OXxRtz7dVlAlIdNxBAJ25El9HI8tcueFljvTTzGiJdSGhy9HJkcUjxX8py5OsDMTm0mWggZ6cqIjVWah7lKnoRrQz0E4+guuHq0AaoxmiShKyNePtoo9sV3tiKEuPZzunK0kWgVpKNTI9aUtOoNFJ6EbVs93SX7zvnlqyJblFKr5yz2caws1mhaDVbP2U6JnTZmTxLum+V2asT7OUqM2msfkx8DRinJUxEtwjp8vvaj02eOEa3i8XRqxAe3lTxnSKtDvGELOUXZLUSrsA737uUTLaFbWO7ik9ZpHXh0/B6WS8pu//nXT+UV+VyOU//AYWNHv85icQAAaekHz6XuXrxgx579K2vrit1c+o6ImjN08D9z+U7+sTUj5zSTwQ4fOM5Z7I2TNgAA35ldnQfpuuo9YnRMBABg46Z1OxI2nzh2CQCQlpb6657E0rKHAoFj376BS9/50MXF1XxxF0ltpF9PS07eU3AvTygUh4YOWvj6OyKRdbaPtVwjtjTr1a1WGdBlgYbG8h9/eUen07y98Kd5czdU1xbt2L3YYNADAKg0emur4uipTTNjP964Nn1gaPSBo19Im2sAANdupFy7cWjapA+WLvpZ5OR+7q9dOMkzT1FokeqU8iefRgkJf5xOAwB8sHyV2YUZmdc/W/PB2LGTDuw/vXrV+tra6i3frzdf2UVSG4VFBSs/XhoePvSX3YfefWfFgweFG/5njbWkWjaiSm6g4jasJivnDxqVPn/OBheJj6uz34ypn1RW38u9m2pONRh0L4x+vY/XAAzDIsImmUymyupCAMDVvw8MDIkZGBrNZvOHDp7c1y8CJ3lmGCyqUmb3RuzA7p93jBgePf2luQKBY0jIwCWL309Pv1pwL7/rpDZy72SzWKxXXl7g4uIa+UzUNxt3zJkz31raOjGiQk9l4DXTtKTstpdnMIfzz5QooZObSOj5sDS77QJvjxDzAduBDwBoVStMJlNDU7mLs2/bNZ7uQTjJM0N3oKrsv0bsQHFxUVBQSNvLwH7BAICCgryuk9oIHRCmVqtXfhJ/8NDeispygcAxPMxq1UGnbsMAXo26reqW8sr85asi25+UK/5tunt0NLlaozQaDUzmvw9PDIYDTvLMGA0A4LY3MSG0tLRoNBom89+RU2w2GwCgUim7SGqfQ7+AoPVff3/58oXEnVu379g8ZPAz8+ctCg0dZBV5lo3I5tMMOrVVCngUHk/k2ydsXPTC9ic5nK4WRGQxORQKVddOkkaLb/OKQWvg8OFafeApYbFYAAC1urXtjFKlBACIhOIukjpkEvlMVOQzUa/NfzMz83rK4X0ffxJ/5PB5KtUKUZzlWzObRzXo8GrRdXcJaJbV+PmE9/UbYv7jcp2cxV3tLIJhmJOjW0nZnbYzd++l4STPjFZtYPPtb/B5F9BotMB+/fPybredMR/7+Qd0kdQ+h+zszOs3rgEAxGLJuHGT31qyTNGiaGiot4o8y0bkC2l0Bl43phFRc4xG4/Ezm7VadV196cmzP3zzw9zq2vtdv2tQ6Jg7+X9l3zkPALh4ZU9pRS5O8swj37iOtF5QIzKZTInEOSMj/VZ2hl6vj4uddTXtUkrKPrlCfis7Y/uObweHDw3oGwgA6CKpjdy8nDWfrzhx8nBzszT/bu7hI/vFYolYLLGKVMvftUDM0KsNaoWWxbN+UyKbzV/+9u9/XUnakjCvrr7E2zNkRuwnj334GDPyNaVSevT0N78d+MS3T9iLE+J/P/gZTqMT5LVKJ+de0qv08twFP/+ScOPmtX2/nxw7dlJ9Q13ywaQftn/j4uIaMeTZN15/23xZF0ltzJzxSnOz9Idtm77d/BWDwYgePW7zt4lWuS93tRrY36caK0pMEj8yzm+vyqsbGsMNCOcRLaQjf/xa4+7P9R1gr+Ohjmwtnfqmu0Bs4Z+80y6+voM4Jn1va7/oJhhm8A3phZMiYKbTMEjiyXJgm2S1SoGL5Z+kWVa36QfL63Q5MLmtGst9ta4Sv7cX7nxStRb49MuYzpIMBj2VauEDenuGLJz3fWfvqi+W+gY70BgwroHRi+kqHh8xTXxoS2VnRuRxhe8vSbKYpNWqGQzLM/0oFCs/AXSmAQCg1WkYdAuLOtBonQa+RoOx/qFsxlu2WL4c0Z6ubCEQ0ftHchvrFTyJhWiJSqUJndwtvc+mWFeDvFo2aoZ1evERPeIxN6CoyWJVQ4uqGa/GbaiQVcu5HGNwJNpriAAeHwnNet+z7FaNTt3LH1yaa1pam1rGzHUmWghJ6VZIvmiDX1FaeS+uF2U1LUCtnL3ci2gh5KVbRsQwbMmmvvLKJnltpyt+2i/ScikDa41dTHy8S2Z60Egxe7mXSGQoTq+Q1/WSzcmklfKCS6W+gbQJ8zsORUbYmJ41pjw/RRQcybt8pLHhgcpEpfMlHHtch6RVrlHUq4wajdidPnFNH6ZDrxrcYKf0uFXPyZkxdZFbTYm6KLvlwe1aJptmNGJUBpVKp1JoVIDbKManAcMwvc5g1Or1WoO2Vcd0oASEcfsNlqCVEeHhCZuXXX1Yrj6s4bFLafUMAAABBUlEQVTiphqtrEGnlOuVMr1BbzToYTQig4VRqBQOn83mU8UeDK7A/mrxXs/T9nMIXRlCV1SvIJ4W1KNqT3AENLte9EDoyuwseENGtCccOJSGSg3RKp4QndZYUagUiC3fP5ER7QmXPiydxl4X5Wmq0XQxxBMZ0Z7w6sfGMHDrol0uVnbx96rnX+x00Xy49mtGdIfLh+t1OpP/QL7I3Q5W1VfK9bJ6zV/7a/7ziTen8/YKZES7JPdvWd41uVpl0OC2MoxVkHgwm+u0vgM4z08Rd72dJTKiHWMyAa0aaiOajCYWp1sdV8iICChADysIKEBGREABMiICCpAREVCAjIiAAmREBBT8LxNhB/DtPHnJAAAAAElFTkSuQmCC", + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Bind tools to LLM\n", + "llm_with_tools = llm.bind_tools([multiply])\n", + "\n", + "# Build graph\n", + "graph_builder = StateGraph(State)\n", + "graph_builder.add_node(\"chatbot\", chatbot_with_hooks)\n", + "\n", + "tool_node = ToolNode(tools=[multiply])\n", + "graph_builder.add_node(\"tools\", tool_node)\n", + "\n", + "graph_builder.add_edge(\"tools\", \"chatbot\")\n", + "graph_builder.add_edge(START, \"chatbot\")\n", + "graph_builder.add_conditional_edges(\n", + " \"chatbot\", tools_condition, {\"tools\": \"tools\", \"__end__\": \"__end__\"}\n", + ")\n", + "\n", + "graph = graph_builder.compile(checkpointer=memory)\n", + "graph" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a77465a6-1005-402a-b911-cbde002b6732", + "metadata": {}, + "outputs": [], + "source": [ + "# Helper function to invoke the chatbot\n", + "def chat(user_input: str):\n", + " \"\"\"Send a message to the chatbot and display the response\"\"\"\n", + " events = graph.stream(\n", + " {\"messages\": [{\"role\": \"user\", \"content\": user_input}]},\n", + " config,\n", + " stream_mode=\"values\",\n", + " )\n", + " \n", + " for event in events:\n", + " # Print the last message (which will be the response)\n", + " event[\"messages\"][-1].pretty_print()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "3b02dd91-a304-457b-b308-0bfa9d6ec8f3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "Hello, world!\n", + "No messages in history, attempting to load from AgentCore memory\n", + "No past agentcore messages found.\n", + "Storing event pre-model: content='Hello, world!' additional_kwargs={} response_metadata={} id='68931e20-11b2-423d-bbef-d4a1a7508ec1'\n", + "Storing event post-model: content=\"Hello! Welcome! How can I assist you today? I have a function available that can multiply two numbers. Would you like me to help you with a multiplication calculation? If so, please provide me with the two numbers you'd like to multiply.\" additional_kwargs={} response_metadata={'ResponseMetadata': {'RequestId': '1280187f-a22d-4ec8-97d6-74b84b42437d', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Fri, 29 Aug 2025 21:39:47 GMT', 'content-type': 'application/json', 'content-length': '531', 'connection': 'keep-alive', 'x-amzn-requestid': '1280187f-a22d-4ec8-97d6-74b84b42437d'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': [2574]}, 'model_name': 'us.anthropic.claude-3-7-sonnet-20250219-v1:0'} id='run--76244606-9264-45c1-b380-7afe009e2170-0' usage_metadata={'input_tokens': 387, 'output_tokens': 53, 'total_tokens': 440, 'input_token_details': {'cache_creation': 0, 'cache_read': 0}}\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Hello! Welcome! How can I assist you today? I have a function available that can multiply two numbers. Would you like me to help you with a multiplication calculation? If so, please provide me with the two numbers you'd like to multiply.\n" + ] + } + ], + "source": [ + "chat(\"Hello, world!\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "80436df7-d4c9-41a2-bff7-37cded3e4aea", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[AIMessage(content=\"Hello! Welcome! How can I assist you today? I have a function available that can multiply two numbers. Would you like me to help you with a multiplication calculation? If so, please provide me with the two numbers you'd like to multiply.\", additional_kwargs={'event_id': '0000001756503587000#f06f3cff'}, response_metadata={}),\n", + " HumanMessage(content='Hello, world!', additional_kwargs={'event_id': '0000001756503584000#5a553d5c'}, response_metadata={})]" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list_agentcore_memory_events(memory_client, MEMORY_ID, ACTOR_ID, SESSION_ID, 10)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "b8a14359-2fd6-45c2-a7a0-a5977c258dbc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "What's the result of 1337 multiplied by 78?\n", + "Storing event pre-model: content=\"What's the result of 1337 multiplied by 78?\" additional_kwargs={} response_metadata={} id='d81510bb-c9a8-474a-9264-0d3e87bbd2fd'\n", + "Storing event post-model: content=[{'type': 'text', 'text': \"I'll calculate the result of 1337 multiplied by 78 for you.\"}, {'type': 'tool_use', 'name': 'multiply', 'input': {'a': 1337, 'b': 78}, 'id': 'tooluse_7ku3y30NS1-Vb-99T2r1Fw'}] additional_kwargs={} response_metadata={'ResponseMetadata': {'RequestId': 'b813c905-eabd-46f7-901d-7f30daead916', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Fri, 29 Aug 2025 21:40:00 GMT', 'content-type': 'application/json', 'content-length': '456', 'connection': 'keep-alive', 'x-amzn-requestid': 'b813c905-eabd-46f7-901d-7f30daead916'}, 'RetryAttempts': 0}, 'stopReason': 'tool_use', 'metrics': {'latencyMs': [2296]}, 'model_name': 'us.anthropic.claude-3-7-sonnet-20250219-v1:0'} id='run--9ca724ca-8c14-46b3-9236-34a075737ed0-0' tool_calls=[{'name': 'multiply', 'args': {'a': 1337, 'b': 78}, 'id': 'tooluse_7ku3y30NS1-Vb-99T2r1Fw', 'type': 'tool_call'}] usage_metadata={'input_tokens': 458, 'output_tokens': 89, 'total_tokens': 547, 'input_token_details': {'cache_creation': 0, 'cache_read': 0}}\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "[{'type': 'text', 'text': \"I'll calculate the result of 1337 multiplied by 78 for you.\"}, {'type': 'tool_use', 'name': 'multiply', 'input': {'a': 1337, 'b': 78}, 'id': 'tooluse_7ku3y30NS1-Vb-99T2r1Fw'}]\n", + "Tool Calls:\n", + " multiply (tooluse_7ku3y30NS1-Vb-99T2r1Fw)\n", + " Call ID: tooluse_7ku3y30NS1-Vb-99T2r1Fw\n", + " Args:\n", + " a: 1337\n", + " b: 78\n", + "=================================\u001b[1m Tool Message \u001b[0m=================================\n", + "Name: multiply\n", + "\n", + "104286\n", + "Storing event pre-model: content='104286' name='multiply' id='656db2b6-f80e-42a6-8d96-f9da85d6a4d3' tool_call_id='tooluse_7ku3y30NS1-Vb-99T2r1Fw'\n", + "Storing event post-model: content='The result of 1337 multiplied by 78 equals 104,286.' additional_kwargs={} response_metadata={'ResponseMetadata': {'RequestId': '494b7af6-139d-4ef9-ae37-e0ff5f12700e', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Fri, 29 Aug 2025 21:40:01 GMT', 'content-type': 'application/json', 'content-length': '344', 'connection': 'keep-alive', 'x-amzn-requestid': '494b7af6-139d-4ef9-ae37-e0ff5f12700e'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': [996]}, 'model_name': 'us.anthropic.claude-3-7-sonnet-20250219-v1:0'} id='run--ab7051b2-cb8c-4e9f-b931-34b1af1944b7-0' usage_metadata={'input_tokens': 560, 'output_tokens': 23, 'total_tokens': 583, 'input_token_details': {'cache_creation': 0, 'cache_read': 0}}\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "The result of 1337 multiplied by 78 equals 104,286.\n" + ] + } + ], + "source": [ + "chat(\"What's the result of 1337 multiplied by 78?\")" + ] + }, + { + "cell_type": "markdown", + "id": "8e910ec4-9757-4ed8-bdcd-aef2461c25fa", + "metadata": {}, + "source": [ + "## Clearing and loading the previous memories\n", + "\n", + "For this sample, we will clear the in-memory state so that no messages are present. The messages from the conversation will be loaded from Bedrock Agentcore memory so that the agent can continue where the user left off." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "cfd5a83e-c1be-478e-aac0-2f0b6f5297f9", + "metadata": {}, + "outputs": [], + "source": [ + "def clear_memory(memory, thread_id: str) -> None:\n", + " \"\"\" Clear the memory for a given thread_id. \"\"\"\n", + " try:\n", + " # If it's an InMemorySaver (which MemorySaver is an alias for),\n", + " # we can directly clear the storage and writes\n", + " if hasattr(memory, 'storage') and hasattr(memory, 'writes'):\n", + " # Clear all checkpoints for this thread_id (all namespaces)\n", + " memory.storage.pop(thread_id, None)\n", + "\n", + " # Clear all writes for this thread_id (for all namespaces)\n", + " keys_to_remove = [key for key in memory.writes.keys() if key[0] == thread_id]\n", + " for key in keys_to_remove:\n", + " memory.writes.pop(key, None)\n", + "\n", + " print(f\"Memory cleared for thread_id: {thread_id}\")\n", + " return\n", + "\n", + " except Exception as e:\n", + " print(f\"Error clearing InMemorySaver storage for thread_id {thread_id}: {e}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "011f536e-f9bc-4a18-be8a-39d758e003e6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Thread ID: 1\n", + "✅ No conversation state found - memory is clear!\n" + ] + } + ], + "source": [ + "# Clear the in-memory messages\n", + "thread_id = config.get(\"configurable\").get(\"thread_id\")\n", + "print(f\"Thread ID: {thread_id}\")\n", + "memory.delete_thread(config.get(\"configurable\").get(\"thread_id\"))\n", + "\n", + "state = graph.get_state(config)\n", + " \n", + "if state and state.values and 'messages' in state.values:\n", + " messages = state.values['messages']\n", + " print(f\"📝 Found {len(messages)} messages in conversation state:\")\n", + " for i, msg in enumerate(messages):\n", + " print(f\" {i+1}. {msg.type}: {msg.content[:100]}...\")\n", + "else:\n", + " print(\"✅ No conversation state found - memory is clear!\")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "c99a2c72-d83f-4746-b0ee-bb902017f466", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "What numbers was I multiplying earlier?\n", + "No messages in history, attempting to load from AgentCore memory\n", + "6 recent messages found in AgentCore memory, loading them into context.\n", + "Storing event post-model: content='Based on our earlier conversation, you were asking about multiplying 1337 by 78. I calculated that for you, and the result was 104,286.' additional_kwargs={} response_metadata={'ResponseMetadata': {'RequestId': 'b2504f9f-0c3b-494d-b478-41cc44bca118', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Fri, 29 Aug 2025 21:40:12 GMT', 'content-type': 'application/json', 'content-length': '429', 'connection': 'keep-alive', 'x-amzn-requestid': 'b2504f9f-0c3b-494d-b478-41cc44bca118'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': [1470]}, 'model_name': 'us.anthropic.claude-3-7-sonnet-20250219-v1:0'} id='run--8f0bb0ac-588a-479d-aaa9-7392329cb1b6-0' usage_metadata={'input_tokens': 675, 'output_tokens': 38, 'total_tokens': 713, 'input_token_details': {'cache_creation': 0, 'cache_read': 0}}\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Based on our earlier conversation, you were asking about multiplying 1337 by 78. I calculated that for you, and the result was 104,286.\n" + ] + } + ], + "source": [ + "chat(\"What numbers was I multiplying earlier?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "2e8d9f1e-7ac0-4099-994b-49da28f405e0", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[HumanMessage(content='What numbers was I multiplying earlier?', additional_kwargs={}, response_metadata={}, id='012b3c53-725c-4588-a0d4-46489bf80fdf'),\n", + " SystemMessage(content=\"No messages in history, attempting to load from AgentCore memory\\n Message 1:\\n Role: Ai\\n Content: The result of 1337 multiplied by 78 equals 104,286.\\n ---\\n Message 2:\\n Role: Ai\\n Content: I'll calculate the result of 1337 multiplied by 78 for you.\\n ---\\n Message 3:\\n Role: Tool\\n Content: 104286\\n ---\\n Message 4:\\n Role: Human\\n Content: What's the result of 1337 multiplied by 78?\\n ---\\n Message 5:\\n Role: Ai\\n Content: Hello! Welcome! How can I assist you today? I have a function available that can multiply two numbers. Would you like me to help you with a multiplication calculation? If so, please provide me with the two numbers you'd like to multiply.\\n ---\\n Message 6:\\n Role: Human\\n Content: Hello, world!\\n ---\\n \\n === END HISTORY ===\\n \\n Please use this context to continue our conversation naturally. You should reference relevant parts of this history when appropriate, but don't explicitly mention that you're loading from memory unless asked.\\n \", additional_kwargs={}, response_metadata={}, id='07cc8cda-0e33-4a83-88eb-3d429444b283'),\n", + " AIMessage(content='Based on our earlier conversation, you were asking about multiplying 1337 by 78. I calculated that for you, and the result was 104,286.', additional_kwargs={}, response_metadata={'ResponseMetadata': {'RequestId': 'b2504f9f-0c3b-494d-b478-41cc44bca118', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Fri, 29 Aug 2025 21:40:12 GMT', 'content-type': 'application/json', 'content-length': '429', 'connection': 'keep-alive', 'x-amzn-requestid': 'b2504f9f-0c3b-494d-b478-41cc44bca118'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': [1470]}, 'model_name': 'us.anthropic.claude-3-7-sonnet-20250219-v1:0'}, id='run--8f0bb0ac-588a-479d-aaa9-7392329cb1b6-0', usage_metadata={'input_tokens': 675, 'output_tokens': 38, 'total_tokens': 713, 'input_token_details': {'cache_creation': 0, 'cache_read': 0}})]" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "state = graph.get_state(config)\n", + "state.values[\"messages\"]" + ] + }, + { + "cell_type": "markdown", + "id": "1cfe5d85-b047-4143-a57c-93f7de7ba3d2", + "metadata": {}, + "source": [ + "## Conclusion\n", + "As you can see, Bedrock Agentcore memories can be saved and loaded easily using the short term memory API and the helper functions implemented in hooks. \n", + "\n", + "This is not a one-size fits all approach and developers can utilize the storing/listing/searching memory functionalities in their own node, pre/post model hooks, or as tools themselves. Check out the other examples for various implementations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7b3aca3-9308-4524-bd64-df6262a8831d", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.15" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}