diff --git a/README.md b/README.md index bdbadf90..8f0972f4 100644 --- a/README.md +++ b/README.md @@ -48,16 +48,15 @@ pip install langgraph-checkpoint-aws Here's a simple example of how to use the `langchain-aws` package. ```python -from langchain_aws import ChatBedrock +from langchain_aws import ChatBedrockConverse # Initialize the Bedrock chat model -llm = ChatBedrock( - model="anthropic.claude-3-sonnet-20240229-v1:0", - beta_use_converse_api=True +model = ChatBedrockConverse( + model="us.anthropic.claude-sonnet-4-5-20250929-v1:0" ) -# Invoke the llm -response = llm.invoke("Hello! How are you today?") +# Invoke the model +response = model.invoke("Hello! How are you today?") print(response) ``` diff --git a/libs/aws/README.md b/libs/aws/README.md index b0715e47..330dc28e 100644 --- a/libs/aws/README.md +++ b/libs/aws/README.md @@ -17,13 +17,13 @@ Alternatively, set the `AWS_BEARER_TOKEN_BEDROCK` environment variable locally f ## Chat Models -`ChatBedrock` class exposes chat models from Bedrock. +`ChatBedrockConverse` class exposes chat models from Bedrock. ```python -from langchain_aws import ChatBedrock +from langchain_aws import ChatBedrockConverse -llm = ChatBedrock() -llm.invoke("Sing a ballad of LangChain.") +model = ChatBedrockConverse() +model.invoke("Sing a ballad of LangChain.") ``` ## Embeddings diff --git a/libs/aws/langchain_aws/chains/graph_qa/neptune_cypher.py b/libs/aws/langchain_aws/chains/graph_qa/neptune_cypher.py index 795a661d..8349ce0e 100644 --- a/libs/aws/langchain_aws/chains/graph_qa/neptune_cypher.py +++ b/libs/aws/langchain_aws/chains/graph_qa/neptune_cypher.py @@ -106,13 +106,13 @@ def create_neptune_opencypher_qa_chain( See https://python.langchain.com/docs/security for more information. Example: - .. code-block:: python - + ```python chain = create_neptune_opencypher_qa_chain( llm=llm, graph=graph ) response = chain.invoke({"query": "your_query_here"}) + ``` """ diff --git a/libs/aws/langchain_aws/chains/graph_qa/neptune_sparql.py b/libs/aws/langchain_aws/chains/graph_qa/neptune_sparql.py index d27dae99..3798f9ba 100644 --- a/libs/aws/langchain_aws/chains/graph_qa/neptune_sparql.py +++ b/libs/aws/langchain_aws/chains/graph_qa/neptune_sparql.py @@ -80,13 +80,13 @@ def create_neptune_sparql_qa_chain( See https://python.langchain.com/docs/security for more information. Example: - .. code-block:: python - + ```python chain = create_neptune_sparql_qa_chain( llm=llm, graph=graph ) response = chain.invoke({"query": "your_query_here"}) + ``` """ if allow_dangerous_requests is not True: diff --git a/libs/aws/langchain_aws/chat_models/bedrock.py b/libs/aws/langchain_aws/chat_models/bedrock.py index 4320d332..186b2b7b 100644 --- a/libs/aws/langchain_aws/chat_models/bedrock.py +++ b/libs/aws/langchain_aws/chat_models/bedrock.py @@ -761,11 +761,11 @@ class ChatBedrock(BaseChatModel, BedrockBase): system_prompt_with_tools: str = "" beta_use_converse_api: bool = False - """Use the new Bedrock ``converse`` API which provides a standardized interface to + """Use the new Bedrock `converse` API which provides a standardized interface to all Bedrock models. Support still in beta. See ChatBedrockConverse docs for more.""" stop_sequences: Optional[List[str]] = Field(default=None, alias="stop") - """Stop sequence inference parameter from new Bedrock ``converse`` API providing + """Stop sequence inference parameter from new Bedrock `converse` API providing a sequence of characters that causes a model to stop generating a response. See https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_InferenceConfiguration.html for more. @@ -1216,7 +1216,7 @@ def bind_tools( (if any), or a dict of the form: {"type": "function", "function": {"name": <>}}. **kwargs: Any additional parameters to pass to the - :class:`~langchain.runnable.Runnable` constructor. + [Runnable][langchain_core.runnables.Runnable] constructor. """ if self.beta_use_converse_api: @@ -1315,81 +1315,81 @@ def with_structured_output( outputs a BaseModel. Example: Pydantic schema (include_raw=False): - .. code-block:: python + ```python + from langchain_aws.chat_models.bedrock import ChatBedrock + from pydantic import BaseModel - from langchain_aws.chat_models.bedrock import ChatBedrock - from pydantic import BaseModel + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + answer: str + justification: str - class AnswerWithJustification(BaseModel): - '''An answer to the user question along with justification for the answer.''' - answer: str - justification: str + llm = ChatBedrock( + model_id="anthropic.claude-3-sonnet-20240229-v1:0", + model_kwargs={"temperature": 0.001}, + ) # type: ignore[call-arg] + structured_model = model.with_structured_output(AnswerWithJustification) - llm =ChatBedrock( - model_id="anthropic.claude-3-sonnet-20240229-v1:0", - model_kwargs={"temperature": 0.001}, - ) # type: ignore[call-arg] - structured_llm = llm.with_structured_output(AnswerWithJustification) + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") - - # -> AnswerWithJustification( - # answer='They weigh the same', - # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' - # ) + # -> AnswerWithJustification( + # answer='They weigh the same', + # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' + # ) + ``` Example: Pydantic schema (include_raw=True): - .. code-block:: python - - from langchain_aws.chat_models.bedrock import ChatBedrock - from pydantic import BaseModel - - class AnswerWithJustification(BaseModel): - '''An answer to the user question along with justification for the answer.''' - answer: str - justification: str - - llm =ChatBedrock( - model_id="anthropic.claude-3-sonnet-20240229-v1:0", - model_kwargs={"temperature": 0.001}, - ) # type: ignore[call-arg] - structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True) - - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") - # -> { - # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), - # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), - # 'parsing_error': None - # } + ```python + from langchain_aws.chat_models.bedrock import ChatBedrock + from pydantic import BaseModel + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + answer: str + justification: str + + model = ChatBedrock( + model_id="anthropic.claude-3-sonnet-20240229-v1:0", + model_kwargs={"temperature": 0.001}, + ) # type: ignore[call-arg] + structured_model = model.with_structured_output(AnswerWithJustification, include_raw=True) + + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") + # -> { + # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), + # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), + # 'parsing_error': None + # } + ``` Example: Dict schema (include_raw=False): - .. code-block:: python - - from langchain_aws.chat_models.bedrock import ChatBedrock - - schema = { - "name": "AnswerWithJustification", - "description": "An answer to the user question along with justification for the answer.", - "input_schema": { - "type": "object", - "properties": { - "answer": {"type": "string"}, - "justification": {"type": "string"}, - }, - "required": ["answer", "justification"] - } + ```python + from langchain_aws.chat_models.bedrock import ChatBedrock + + schema = { + "name": "AnswerWithJustification", + "description": "An answer to the user question along with justification for the answer.", + "input_schema": { + "type": "object", + "properties": { + "answer": {"type": "string"}, + "justification": {"type": "string"}, + }, + "required": ["answer", "justification"] } - llm =ChatBedrock( - model_id="anthropic.claude-3-sonnet-20240229-v1:0", - model_kwargs={"temperature": 0.001}, - ) # type: ignore[call-arg] - structured_llm = llm.with_structured_output(schema) - - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") - # -> { - # 'answer': 'They weigh the same', - # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' - # } + } + model = ChatBedrock( + model_id="anthropic.claude-3-sonnet-20240229-v1:0", + model_kwargs={"temperature": 0.001}, + ) # type: ignore[call-arg] + structured_model = model.with_structured_output(schema) + + structured_model.invoke("What weighs more a pound of bricks or a pound of feathers") + # -> { + # 'answer': 'They weigh the same', + # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' + # } + ``` """ # noqa: E501 if self.beta_use_converse_api: diff --git a/libs/aws/langchain_aws/chat_models/bedrock_converse.py b/libs/aws/langchain_aws/chat_models/bedrock_converse.py index 519f7ce8..4db4e868 100644 --- a/libs/aws/langchain_aws/chat_models/bedrock_converse.py +++ b/libs/aws/langchain_aws/chat_models/bedrock_converse.py @@ -113,9 +113,9 @@ class ChatBedrockConverse(BaseChatModel): Once that's completed, install the LangChain integration: - .. code-block:: bash - - pip install -U langchain-aws + ```bash + pip install -U langchain-aws + ``` Key init args — completion params: model: str @@ -137,214 +137,214 @@ class ChatBedrockConverse(BaseChatModel): See full list of supported init args and their descriptions in the params section. Instantiate: - .. code-block:: python - - from langchain_aws import ChatBedrockConverse - - llm = ChatBedrockConverse( - model="anthropic.claude-3-sonnet-20240229-v1:0", - temperature=0, - max_tokens=None, - # other params... - ) + ```python + from langchain_aws import ChatBedrockConverse + + model = ChatBedrockConverse( + model="anthropic.claude-3-sonnet-20240229-v1:0", + temperature=0, + max_tokens=None, + # other params... + ) + ``` Invoke: - .. code-block:: python - - messages = [ - ("system", "You are a helpful translator. Translate the user sentence to French."), - ("human", "I love programming."), - ] - llm.invoke(messages) - - .. code-block:: python - - AIMessage(content=[{'type': 'text', 'text': "J'aime la programmation."}], response_metadata={'ResponseMetadata': {'RequestId': '9ef1e313-a4c1-4f79-b631-171f658d3c0e', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Sat, 15 Jun 2024 01:19:24 GMT', 'content-type': 'application/json', 'content-length': '205', 'connection': 'keep-alive', 'x-amzn-requestid': '9ef1e313-a4c1-4f79-b631-171f658d3c0e'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': 609}}, id='run-754e152b-2b41-4784-9538-d40d71a5c3bc-0', usage_metadata={'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36}) + ```python + messages = [ + ("system", "You are a helpful translator. Translate the user sentence to French."), + ("human", "I love programming."), + ] + model.invoke(messages) + ``` + + ```python + AIMessage(content=[{'type': 'text', 'text': "J'aime la programmation."}], response_metadata={'ResponseMetadata': {'RequestId': '9ef1e313-a4c1-4f79-b631-171f658d3c0e', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Sat, 15 Jun 2024 01:19:24 GMT', 'content-type': 'application/json', 'content-length': '205', 'connection': 'keep-alive', 'x-amzn-requestid': '9ef1e313-a4c1-4f79-b631-171f658d3c0e'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': 609}}, id='run-754e152b-2b41-4784-9538-d40d71a5c3bc-0', usage_metadata={'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36}) + ``` Stream: - .. code-block:: python - - for chunk in llm.stream(messages): - print(chunk) - - .. code-block:: python - - AIMessageChunk(content=[], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') - AIMessageChunk(content=[{'type': 'text', 'text': 'J', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') - AIMessageChunk(content=[{'text': "'", 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') - AIMessageChunk(content=[{'text': 'a', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') - AIMessageChunk(content=[{'text': 'ime', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') - AIMessageChunk(content=[{'text': ' la', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') - AIMessageChunk(content=[{'text': ' programm', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') - AIMessageChunk(content=[{'text': 'ation', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') - AIMessageChunk(content=[{'text': '.', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') - AIMessageChunk(content=[{'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') - AIMessageChunk(content=[], response_metadata={'stopReason': 'end_turn'}, id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') - AIMessageChunk(content=[], response_metadata={'metrics': {'latencyMs': 581}}, id='run-da3c2606-4792-440a-ac66-72e0d1f6d117', usage_metadata={'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36}) - - .. code-block:: python - - stream = llm.stream(messages) - full = next(stream) - for chunk in stream: - full += chunk - full - - .. code-block:: python - - AIMessageChunk(content=[{'type': 'text', 'text': "J'aime la programmation.", 'index': 0}], response_metadata={'stopReason': 'end_turn', 'metrics': {'latencyMs': 554}}, id='run-56a5a5e0-de86-412b-9835-624652dc3539', usage_metadata={'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36}) + ```python + for chunk in model.stream(messages): + print(chunk) + ``` + + ```python + AIMessageChunk(content=[], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') + AIMessageChunk(content=[{'type': 'text', 'text': 'J', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') + AIMessageChunk(content=[{'text': "'", 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') + AIMessageChunk(content=[{'text': 'a', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') + AIMessageChunk(content=[{'text': 'ime', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') + AIMessageChunk(content=[{'text': ' la', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') + AIMessageChunk(content=[{'text': ' programm', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') + AIMessageChunk(content=[{'text': 'ation', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') + AIMessageChunk(content=[{'text': '.', 'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') + AIMessageChunk(content=[{'index': 0}], id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') + AIMessageChunk(content=[], response_metadata={'stopReason': 'end_turn'}, id='run-da3c2606-4792-440a-ac66-72e0d1f6d117') + AIMessageChunk(content=[], response_metadata={'metrics': {'latencyMs': 581}}, id='run-da3c2606-4792-440a-ac66-72e0d1f6d117', usage_metadata={'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36}) + ``` + + ```python + stream = model.stream(messages) + full = next(stream) + for chunk in stream: + full += chunk + full + ``` + + ```python + AIMessageChunk(content=[{'type': 'text', 'text': "J'aime la programmation.", 'index': 0}], response_metadata={'stopReason': 'end_turn', 'metrics': {'latencyMs': 554}}, id='run-56a5a5e0-de86-412b-9835-624652dc3539', usage_metadata={'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36}) + ``` Tool calling: - .. code-block:: python + ```python + from pydantic import BaseModel, Field - from pydantic import BaseModel, Field + class GetWeather(BaseModel): + '''Get the current weather in a given location''' - class GetWeather(BaseModel): - '''Get the current weather in a given location''' + location: str = Field(..., description="The city and state, e.g. San Francisco, CA") - location: str = Field(..., description="The city and state, e.g. San Francisco, CA") + class GetPopulation(BaseModel): + '''Get the current population in a given location''' - class GetPopulation(BaseModel): - '''Get the current population in a given location''' + location: str = Field(..., description="The city and state, e.g. San Francisco, CA") - location: str = Field(..., description="The city and state, e.g. San Francisco, CA") + model_with_tools = model.bind_tools([GetWeather, GetPopulation]) + ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?") + ai_msg.tool_calls + ``` - llm_with_tools = llm.bind_tools([GetWeather, GetPopulation]) - ai_msg = llm_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?") - ai_msg.tool_calls + ```python + [{'name': 'GetWeather', + 'args': {'location': 'Los Angeles, CA'}, + 'id': 'tooluse_Mspi2igUTQygp-xbX6XGVw'}, + {'name': 'GetWeather', + 'args': {'location': 'New York, NY'}, + 'id': 'tooluse_tOPHiDhvR2m0xF5_5tyqWg'}, + {'name': 'GetPopulation', + 'args': {'location': 'Los Angeles, CA'}, + 'id': 'tooluse__gcY_klbSC-GqB-bF_pxNg'}, + {'name': 'GetPopulation', + 'args': {'location': 'New York, NY'}, + 'id': 'tooluse_-1HSoGX0TQCSaIg7cdFy8Q'}] + ``` - .. code-block:: python - - [{'name': 'GetWeather', - 'args': {'location': 'Los Angeles, CA'}, - 'id': 'tooluse_Mspi2igUTQygp-xbX6XGVw'}, - {'name': 'GetWeather', - 'args': {'location': 'New York, NY'}, - 'id': 'tooluse_tOPHiDhvR2m0xF5_5tyqWg'}, - {'name': 'GetPopulation', - 'args': {'location': 'Los Angeles, CA'}, - 'id': 'tooluse__gcY_klbSC-GqB-bF_pxNg'}, - {'name': 'GetPopulation', - 'args': {'location': 'New York, NY'}, - 'id': 'tooluse_-1HSoGX0TQCSaIg7cdFy8Q'}] - - See ``ChatBedrockConverse.bind_tools()`` method for more. + See `ChatBedrockConverse.bind_tools()` method for more. Structured output: - .. code-block:: python - - from typing import Optional + ```python + from typing import Optional - from pydantic import BaseModel, Field + from pydantic import BaseModel, Field - class Joke(BaseModel): - '''Joke to tell user.''' + class Joke(BaseModel): + '''Joke to tell user.''' - setup: str = Field(description="The setup of the joke") - punchline: str = Field(description="The punchline to the joke") - rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10") + setup: str = Field(description="The setup of the joke") + punchline: str = Field(description="The punchline to the joke") + rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10") - structured_llm = llm.with_structured_output(Joke) - structured_llm.invoke("Tell me a joke about cats") + structured_model = model.with_structured_output(Joke) + structured_model.invoke("Tell me a joke about cats") + ``` - .. code-block:: python + ```python + Joke(setup='What do you call a cat that gets all dressed up?', punchline='A purrfessional!', rating=7) + ``` - Joke(setup='What do you call a cat that gets all dressed up?', punchline='A purrfessional!', rating=7) - - See ``ChatBedrockConverse.with_structured_output()`` for more. + See `ChatBedrockConverse.with_structured_output()` for more. Extended thinking: Some models, such as Claude 3.7 Sonnet, support an extended thinking feature that outputs the step-by-step reasoning process that led to an answer. - To use it, specify the ``thinking`` parameter when initializing - ``ChatBedrockConverse`` as shown below. + To use it, specify the `thinking` parameter when initializing + `ChatBedrockConverse` as shown below. You will need to specify a token budget to use this feature. See usage example: - .. code-block:: python - - from langchain_aws import ChatBedrockConverse + ```python + from langchain_aws import ChatBedrockConverse - thinking_params= { - "thinking": { - "type": "enabled", - "budget_tokens": 2000 - } + thinking_params= { + "thinking": { + "type": "enabled", + "budget_tokens": 2000 } + } - llm = ChatBedrockConverse( - model="us.anthropic.claude-3-7-sonnet-20250219-v1:0", - max_tokens=5000, - region_name="us-west-2", - additional_model_request_fields=thinking_params, - ) - - response = llm.invoke("What is the cube root of 50.653?") - print(response.content) + model = ChatBedrockConverse( + model="us.anthropic.claude-3-7-sonnet-20250219-v1:0", + max_tokens=5000, + region_name="us-west-2", + additional_model_request_fields=thinking_params, + ) - .. code-block:: python + response = model.invoke("What is the cube root of 50.653?") + print(response.content) + ``` - [ - {'type': 'reasoning_content', 'reasoning_content': {'type': 'text', 'text': 'I need to calculate the cube root of... ', 'signature': '...'}}, - {'type': 'text', 'text': 'The cube root of 50.653 is...'} - ] + ```python + [ + {'type': 'reasoning_content', 'reasoning_content': {'type': 'text', 'text': 'I need to calculate the cube root of... ', 'signature': '...'}}, + {'type': 'text', 'text': 'The cube root of 50.653 is...'} + ] + ``` Image input: - .. code-block:: python - - import base64 - import httpx - from langchain_core.messages import HumanMessage - - image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" - image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8") - message = HumanMessage( - content=[ - {"type": "text", "text": "describe the weather in this image"}, - { - "type": "image", - "source": {"type": "base64", "media_type": "image/jpeg", "data": image_data}, - }, - ], - ) - ai_msg = llm.invoke([message]) - ai_msg.content - - .. code-block:: python + ```python + import base64 + import httpx + from langchain_core.messages import HumanMessage + + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8") + message = HumanMessage( + content=[ + {"type": "text", "text": "describe the weather in this image"}, + { + "type": "image", + "source": {"type": "base64", "media_type": "image/jpeg", "data": image_data}, + }, + ], + ) + ai_msg = model.invoke([message]) + ai_msg.content + ``` - [{'type': 'text', - 'text': 'The image depicts a sunny day with a partly cloudy sky. The sky is a brilliant blue color with scattered white clouds drifting across. The lighting and cloud patterns suggest pleasant, mild weather conditions. The scene shows an open grassy field or meadow, indicating warm temperatures conducive for vegetation growth. Overall, the weather portrayed in this scenic outdoor image appears to be sunny with some clouds, likely representing a nice, comfortable day.'}] + ```python + [{'type': 'text', + 'text': 'The image depicts a sunny day with a partly cloudy sky. The sky is a brilliant blue color with scattered white clouds drifting across. The lighting and cloud patterns suggest pleasant, mild weather conditions. The scene shows an open grassy field or meadow, indicating warm temperatures conducive for vegetation growth. Overall, the weather portrayed in this scenic outdoor image appears to be sunny with some clouds, likely representing a nice, comfortable day.'}] + ``` Token usage: - .. code-block:: python + ```python + ai_msg = model.invoke(messages) + ai_msg.usage_metadata + ``` - ai_msg = llm.invoke(messages) - ai_msg.usage_metadata - - .. code-block:: python - - {'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36} + ```python + {'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36} + ``` Response metadata - .. code-block:: python - - ai_msg = llm.invoke(messages) - ai_msg.response_metadata - - .. code-block:: python - - {'ResponseMetadata': {'RequestId': '776a2a26-5946-45ae-859e-82dc5f12017c', - 'HTTPStatusCode': 200, - 'HTTPHeaders': {'date': 'Mon, 17 Jun 2024 01:37:05 GMT', - 'content-type': 'application/json', - 'content-length': '206', - 'connection': 'keep-alive', - 'x-amzn-requestid': '776a2a26-5946-45ae-859e-82dc5f12017c'}, - 'RetryAttempts': 0}, - 'stopReason': 'end_turn', - 'metrics': {'latencyMs': 1290}} + ```python + ai_msg = model.invoke(messages) + ai_msg.response_metadata + ``` + + ```python + {'ResponseMetadata': {'RequestId': '776a2a26-5946-45ae-859e-82dc5f12017c', + 'HTTPStatusCode': 200, + 'HTTPHeaders': {'date': 'Mon, 17 Jun 2024 01:37:05 GMT', + 'content-type': 'application/json', + 'content-length': '206', + 'connection': 'keep-alive', + 'x-amzn-requestid': '776a2a26-5946-45ae-859e-82dc5f12017c'}, + 'RetryAttempts': 0}, + 'stopReason': 'end_turn', + 'metrics': {'latencyMs': 1290}} + ``` """ # noqa: E501 @@ -357,7 +357,7 @@ class Joke(BaseModel): model_id: str = Field(alias="model") """ID of the model to call. - e.g., ``"anthropic.claude-3-sonnet-20240229-v1:0"``. This is equivalent to the + e.g., `"anthropic.claude-3-sonnet-20240229-v1:0"`. This is equivalent to the modelID property in the list-foundation-models api. For custom and provisioned models, an ARN value is expected. See https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns diff --git a/libs/aws/langchain_aws/chat_models/sagemaker_endpoint.py b/libs/aws/langchain_aws/chat_models/sagemaker_endpoint.py index 3dd2dd40..932275a1 100644 --- a/libs/aws/langchain_aws/chat_models/sagemaker_endpoint.py +++ b/libs/aws/langchain_aws/chat_models/sagemaker_endpoint.py @@ -146,44 +146,43 @@ class ChatSagemakerEndpoint(BaseChatModel): Example: - .. code-block:: python - - from langchain_aws.chat_models.sagemaker_endpoint import - ChatSagemakerEndpoint - endpoint_name = ( - "my-endpoint-name" - ) - region_name = ( - "us-west-2" - ) - credentials_profile_name = ( - "default" - ) - se = ChatSagemakerEndpoint( - endpoint_name=endpoint_name, - region_name=region_name, - credentials_profile_name=credentials_profile_name - ) - - # Usage with Inference Component - se = ChatSagemakerEndpoint( - endpoint_name=endpoint_name, - inference_component_name=inference_component_name, - region_name=region_name, - credentials_profile_name=credentials_profile_name - ) + ```python + from langchain_aws.chat_models.sagemaker_endpoint import + ChatSagemakerEndpoint + endpoint_name = ( + "my-endpoint-name" + ) + region_name = ( + "us-west-2" + ) + credentials_profile_name = ( + "default" + ) + se = ChatSagemakerEndpoint( + endpoint_name=endpoint_name, + region_name=region_name, + credentials_profile_name=credentials_profile_name + ) + + # Usage with Inference Component + se = ChatSagemakerEndpoint( + endpoint_name=endpoint_name, + inference_component_name=inference_component_name, + region_name=region_name, + credentials_profile_name=credentials_profile_name + ) #Use with boto3 client - client = boto3.client( - "sagemaker-runtime", - region_name=region_name - ) - - se = ChatSagemakerEndpoint( - endpoint_name=endpoint_name, - client=client - ) - + client = boto3.client( + "sagemaker-runtime", + region_name=region_name + ) + + se = ChatSagemakerEndpoint( + endpoint_name=endpoint_name, + client=client + ) + ``` """ client: Any = None """Boto3 client for sagemaker runtime""" @@ -204,12 +203,12 @@ class ChatSagemakerEndpoint(BaseChatModel): region_name: Optional[str] = "" """The aws region, e.g., `us-west-2`. - Falls back to ``AWS_REGION`` or ``AWS_DEFAULT_REGION`` env variable or region - specified in ``~/.aws/config`` in case it is not provided here. + Falls back to `AWS_REGION` or `AWS_DEFAULT_REGION` env variable or region + specified in `~/.aws/config` in case it is not provided here. """ credentials_profile_name: Optional[str] = Field(default=None, exclude=True) - """The name of the profile in the ``~/.aws/credentials`` or ``~/.aws/config`` files. + """The name of the profile in the `~/.aws/credentials` or `~/.aws/config` files. Profile should either have access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, @@ -227,11 +226,10 @@ class ChatSagemakerEndpoint(BaseChatModel): If provided, aws_secret_access_key must also be provided. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. - + See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_ACCESS_KEY_ID`` environment variable. - + If not provided, will be read from `AWS_ACCESS_KEY_ID` environment variable. """ aws_secret_access_key: Optional[SecretStr] = Field( @@ -239,12 +237,12 @@ class ChatSagemakerEndpoint(BaseChatModel): ) """AWS secret_access_key. - If provided, ``aws_access_key_id`` must also be provided. + If provided, `aws_access_key_id` must also be provided. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SECRET_ACCESS_KEY`` environment variable. + If not provided, will be read from `AWS_SECRET_ACCESS_KEY` environment variable. """ @@ -258,7 +256,7 @@ class ChatSagemakerEndpoint(BaseChatModel): See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SESSION_TOKEN`` environment variable. + If not provided, will be read from `AWS_SESSION_TOKEN` environment variable. """ endpoint_url: Optional[str] = Field(default=None, alias="base_url") @@ -278,23 +276,23 @@ class ChatSagemakerEndpoint(BaseChatModel): """ Example: - .. code-block:: python - - from langchain_community.llms.sagemaker_endpoint import ChatContentHandler - - class ContentHandler(ChatContentHandler): - content_type = "application/json" - accepts = "application/json" - - def transform_input( - self, prompt: List[Dict[str, Any]], model_kwargs: Dict - ) -> bytes: - input_str = json.dumps({prompt: prompt, **model_kwargs}) - return input_str.encode('utf-8') - - def transform_output(self, output: bytes) -> BaseMessage: - response_json = json.loads(output.read().decode("utf-8")) - return response_json[0]["generated_text"] + ```python + from langchain_aws.chat_models.sagemaker_endpoint import ChatModelContentHandler + + class ContentHandler(ChatModelContentHandler): + content_type = "application/json" + accepts = "application/json" + + def transform_input( + self, prompt: List[Dict[str, Any]], model_kwargs: Dict + ) -> bytes: + input_str = json.dumps({prompt: prompt, **model_kwargs}) + return input_str.encode('utf-8') + + def transform_output(self, output: bytes) -> BaseMessage: + response_json = json.loads(output.read().decode("utf-8")) + return response_json[0]["generated_text"] + ``` """ model_kwargs: Optional[Dict] = None diff --git a/libs/aws/langchain_aws/document_compressors/rerank.py b/libs/aws/langchain_aws/document_compressors/rerank.py index bdc8852e..188ce02c 100644 --- a/libs/aws/langchain_aws/document_compressors/rerank.py +++ b/libs/aws/langchain_aws/document_compressors/rerank.py @@ -23,8 +23,8 @@ class BedrockRerank(BaseDocumentCompressor): region_name: Optional[str] = None """The aws region, e.g., `us-west-2`. - Falls back to ``AWS_REGION`` or ``AWS_DEFAULT_REGION`` env variable or region - specified in ``~/.aws/config`` in case it is not provided here. + Falls back to `AWS_REGION` or `AWS_DEFAULT_REGION` env variable or region + specified in `~/.aws/config` in case it is not provided here. """ credentials_profile_name: Optional[str] = Field( @@ -37,13 +37,13 @@ class BedrockRerank(BaseDocumentCompressor): ) """AWS access key id. - If provided, ``aws_secret_access_key`` must also be provided. + If provided, `aws_secret_access_key` must also be provided. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_ACCESS_KEY_ID`` environment variable. + If not provided, will be read from `AWS_ACCESS_KEY_ID` environment variable. """ @@ -58,7 +58,7 @@ class BedrockRerank(BaseDocumentCompressor): See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SECRET_ACCESS_KEY`` environment variable. + If not provided, will be read from `AWS_SECRET_ACCESS_KEY` environment variable. """ @@ -72,7 +72,7 @@ class BedrockRerank(BaseDocumentCompressor): See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SESSION_TOKEN`` environment variable. + If not provided, will be read from `AWS_SESSION_TOKEN` environment variable. """ @@ -120,7 +120,7 @@ def rerank( additional_model_request_fields: Additional fields to pass to the model. Returns: - List[Dict[str, Any]]: A list of ranked documents with relevance scores. + A list of ranked documents with relevance scores. """ if len(documents) == 0: diff --git a/libs/aws/langchain_aws/embeddings/bedrock.py b/libs/aws/langchain_aws/embeddings/bedrock.py index 481a7f20..5c90d9a3 100644 --- a/libs/aws/langchain_aws/embeddings/bedrock.py +++ b/libs/aws/langchain_aws/embeddings/bedrock.py @@ -32,19 +32,18 @@ class BedrockEmbeddings(BaseModel, Embeddings): """ Example: - .. code-block:: python - - from langchain_community.bedrock_embeddings import BedrockEmbeddings - region_name ="us-east-1" - credentials_profile_name = "default" - model_id = "amazon.titan-embed-text-v1" - - be = BedrockEmbeddings( - credentials_profile_name=credentials_profile_name, - region_name=region_name, - model_id=model_id - ) - + ```python + from langchain_community.bedrock_embeddings import BedrockEmbeddings + region_name ="us-east-1" + credentials_profile_name = "default" + model_id = "amazon.titan-embed-text-v1" + + be = BedrockEmbeddings( + credentials_profile_name=credentials_profile_name, + region_name=region_name, + model_id=model_id + ) + ``` """ client: Any = Field(default=None, exclude=True) #: :meta private: @@ -52,14 +51,14 @@ class BedrockEmbeddings(BaseModel, Embeddings): region_name: Optional[str] = None """The aws region e.g., `us-west-2`. - Falls back to ``AWS_REGION``/``AWS_DEFAULT_REGION`` env variable or region - specified in ``~/.aws/config`` in case it is not provided here. + Falls back to `AWS_REGION`/`AWS_DEFAULT_REGION` env variable or region + specified in `~/.aws/config` in case it is not provided here. """ credentials_profile_name: Optional[str] = None - """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which - has either access keys or role information specified. + """The name of the profile in the `~/.aws/credentials` or `~/.aws/config` files, + which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. @@ -78,7 +77,7 @@ class BedrockEmbeddings(BaseModel, Embeddings): See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_ACCESS_KEY_ID`` environment variable. + If not provided, will be read from `AWS_ACCESS_KEY_ID` environment variable. """ @@ -93,7 +92,7 @@ class BedrockEmbeddings(BaseModel, Embeddings): See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SECRET_ACCESS_KEY`` environment variable. + If not provided, will be read from `AWS_SECRET_ACCESS_KEY` environment variable. """ @@ -102,20 +101,20 @@ class BedrockEmbeddings(BaseModel, Embeddings): ) """AWS session token. - If provided, ``aws_access_key_id`` and ``aws_secret_access_key`` must also be + If provided, `aws_access_key_id` and `aws_secret_access_key` must also be provided. Not required unless using temporary credentials. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SESSION_TOKEN`` environment variable. + If not provided, will be read from `AWS_SESSION_TOKEN` environment variable. """ model_id: str = "amazon.titan-embed-text-v1" - """Id of the model to call, e.g., ``'amazon.titan-embed-text-v1'``, this is - equivalent to the ``modelId`` property in the list-foundation-models api + """Id of the model to call, e.g., `'amazon.titan-embed-text-v1'`, this is + equivalent to the `modelId` property in the list-foundation-models api """ @@ -129,13 +128,13 @@ class BedrockEmbeddings(BaseModel, Embeddings): """ endpoint_url: Optional[str] = None - """Needed if you don't want to default to ``'us-east-1'`` endpoint""" + """Needed if you don't want to default to `'us-east-1'` endpoint""" normalize: bool = False """Whether the embeddings should be normalized to unit vectors""" config: Any = None - """An optional ``botocore.config.Config`` instance to pass to the client.""" + """An optional `botocore.config.Config` instance to pass to the client.""" model_config = ConfigDict( extra="forbid", diff --git a/libs/aws/langchain_aws/graphs/neptune_graph.py b/libs/aws/langchain_aws/graphs/neptune_graph.py index b31a508f..795878cc 100644 --- a/libs/aws/langchain_aws/graphs/neptune_graph.py +++ b/libs/aws/langchain_aws/graphs/neptune_graph.py @@ -206,11 +206,11 @@ class NeptuneAnalyticsGraph(BaseNeptuneGraph): graph_identifier: the graph identifier for a Neptune Analytics graph Example: - .. code-block:: python - + ```python graph = NeptuneAnalyticsGraph( graph_identifier='' ) + ``` *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. @@ -350,12 +350,12 @@ class NeptuneGraph(BaseNeptuneGraph): config: optional botocore Config object Example: - .. code-block:: python - + ```python graph = NeptuneGraph( host='', port=8182 ) + ``` *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. diff --git a/libs/aws/langchain_aws/graphs/neptune_rdf_graph.py b/libs/aws/langchain_aws/graphs/neptune_rdf_graph.py index 9560b341..cf02919a 100644 --- a/libs/aws/langchain_aws/graphs/neptune_rdf_graph.py +++ b/libs/aws/langchain_aws/graphs/neptune_rdf_graph.py @@ -43,8 +43,7 @@ class NeptuneRdfGraph: sign: optional, whether to sign the request payload, default is True Example: - .. code-block:: python - + ```python graph = NeptuneRdfGraph( host=', port= @@ -59,6 +58,7 @@ class NeptuneRdfGraph: schema_elem = graph.get_schema_elements() #... change schema_elements ... graph.load_schema(schema_elem) + ``` *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. diff --git a/libs/aws/langchain_aws/llms/bedrock.py b/libs/aws/langchain_aws/llms/bedrock.py index ca452265..c3f0d4d0 100644 --- a/libs/aws/langchain_aws/llms/bedrock.py +++ b/libs/aws/langchain_aws/llms/bedrock.py @@ -96,7 +96,7 @@ def _stream_response_to_generation_chunk( output_key: str, messages_api: bool, coerce_content_to_string: bool, -) -> Union[GenerationChunk, AIMessageChunk, None]: # type ignore[return] +) -> Union[GenerationChunk, AIMessageChunk, None]: # type: ignore[return] """Convert a stream response to a generation chunk.""" if messages_api: msg_type = stream_response.get("type") @@ -642,14 +642,14 @@ class BedrockBase(BaseLanguageModel, ABC): """The bedrock client for making control plane API calls""" region_name: Optional[str] = Field(default=None, alias="region") - """The aws region e.g., `us-west-2`. Falls back to ``AWS_REGION`` or - ``AWS_DEFAULT_REGION`` env variable or region specified in ``~/.aws/config`` in + """The aws region e.g., `us-west-2`. Falls back to `AWS_REGION` or + `AWS_DEFAULT_REGION` env variable or region specified in `~/.aws/config` in case it is not provided here. """ credentials_profile_name: Optional[str] = Field(default=None, exclude=True) - """The name of the profile in the ``~/.aws/credentials`` or ``~/.aws/config files``, + """The name of the profile in the `~/.aws/credentials` or `~/.aws/config files`, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, @@ -664,30 +664,30 @@ class BedrockBase(BaseLanguageModel, ABC): ) """AWS access key id. - If provided, ``aws_secret_access_key`` must also be provided. + If provided, `aws_secret_access_key` must also be provided. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_ACCESS_KEY_ID`` environment variable. + If not provided, will be read from `AWS_ACCESS_KEY_ID` environment variable. """ aws_secret_access_key: Optional[SecretStr] = Field( default_factory=secret_from_env("AWS_SECRET_ACCESS_KEY", default=None) ) - """AWS ``secret_access_key``. + """AWS `secret_access_key`. - If provided, ``aws_access_key_id`` must also be provided. + If provided, `aws_access_key_id` must also be provided. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SECRET_ACCESS_KEY`` environment variable. + If not provided, will be read from `AWS_SECRET_ACCESS_KEY` environment variable. """ @@ -696,39 +696,39 @@ class BedrockBase(BaseLanguageModel, ABC): ) """AWS session token. - If provided, ``aws_access_key_id`` and ``aws_secret_access_key`` must also be + If provided, `aws_access_key_id` and `aws_secret_access_key` must also be provided. Not required unless using temporary credentials. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SESSION_TOKEN`` environment variable. + If not provided, will be read from `AWS_SESSION_TOKEN` environment variable. """ config: Any = None - """An optional ``botocore.config.Config`` instance to pass to the client.""" + """An optional `botocore.config.Config` instance to pass to the client.""" provider: Optional[str] = None - """The model provider, e.g., ``'amazon'``, ``'cohere'``, ``'ai21'``, etc. When not + """The model provider, e.g., `'amazon'`, `'cohere'`, `'ai21'`, etc. When not supplied, provider is extracted from the first part of the model_id e.g. - ``'amazon'`` in ``'amazon.titan-text-express-v1'``. This value should be provided + `'amazon'` in `'amazon.titan-text-express-v1'`. This value should be provided for model ids that do not have the provider in them, e.g., custom and provisioned models that have an ARN associated with them. """ model_id: str = Field(alias="model") - """Id of the model to call, e.g., ``'amazon.titan-text-express-v1'``, this is - equivalent to the ``modelId`` property in the list-foundation-models api. For custom + """Id of the model to call, e.g., `'amazon.titan-text-express-v1'`, this is + equivalent to the `modelId` property in the list-foundation-models api. For custom and provisioned models, an ARN value is expected. """ base_model_id: Optional[str] = Field(default=None, alias="base_model") """An optional field to pass the base model id. If provided, this will be used over - the value of ``model_id`` to identify the base model. + the value of `model_id` to identify the base model. """ @@ -736,7 +736,7 @@ class BedrockBase(BaseLanguageModel, ABC): """Keyword arguments to pass to the model.""" endpoint_url: Optional[str] = None - """Needed if you don't want to default to ``'us-east-1'`` endpoint""" + """Needed if you don't want to default to `'us-east-1'` endpoint""" streaming: bool = False """Whether to stream the results.""" @@ -765,8 +765,8 @@ class BedrockBase(BaseLanguageModel, ABC): """ An optional dictionary to configure guardrails for Bedrock. - This field ``guardrails`` consists of two keys: ``'guardrailId'`` and - ``'guardrailVersion'``, which should be strings, but are initialized to None. + This field `guardrails` consists of two keys: `'guardrailId'` and + `'guardrailVersion'`, which should be strings, but are initialized to None. It's used to determine if specific guardrails are enabled and properly set. @@ -942,10 +942,10 @@ def _model_is_anthropic(self) -> bool: def _guardrails_enabled(self) -> bool: """ Determines if guardrails are enabled and correctly configured. - Checks if ``guardrails`` is a dictionary with non-empty ``'id'`` and - ``'version'`` keys. + Checks if `guardrails` is a dictionary with non-empty `'id'` and + `'version'` keys. - Checks if ``'guardrails.trace'`` is true. + Checks if `'guardrails.trace'` is true. Returns: bool: True if guardrails are correctly configured, False otherwise. @@ -1301,7 +1301,7 @@ class BedrockLLM(LLM, BedrockBase): https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass - the name of the profile from the ``~/.aws/credentials`` file that is to be used. + the name of the profile from the `~/.aws/credentials` file that is to be used. Make sure the credentials / roles used have the required policies to access the Bedrock service. @@ -1309,16 +1309,15 @@ class BedrockLLM(LLM, BedrockBase): """ Example: - .. code-block:: python - - from bedrock_langchain.bedrock_llm import BedrockLLM - - llm = BedrockLLM( - credentials_profile_name="default", - model_id="amazon.titan-text-express-v1", - streaming=True - ) + ```python + from langchain_aws import BedrockLLM + llm = BedrockLLM( + credentials_profile_name="default", + model_id="amazon.titan-text-express-v1", + streaming=True + ) + ``` """ @model_validator(mode="after") @@ -1382,7 +1381,7 @@ def _stream( Args: prompt (str): The prompt to pass into the model stop (Optional[List[str]], optional): Stop sequences. These will - override any stop sequences in the ``model_kwargs`` attribute. + override any stop sequences in the `model_kwargs` attribute. Defaults to None. run_manager (Optional[CallbackManagerForLLMRun], optional): Callback run managers used to process the output. Defaults to None. @@ -1415,9 +1414,9 @@ def _call( The string generated by the model. Example: - .. code-block:: python - + ```python response = llm("Tell me a joke.") + ``` """ @@ -1476,7 +1475,7 @@ async def _astream( Args: prompt (str): The prompt to pass into the model stop (Optional[List[str]], optional): Stop sequences. These will - override any stop sequences in the ``model_kwargs`` attribute. + override any stop sequences in the `model_kwargs` attribute. Defaults to None. run_manager (Optional[CallbackManagerForLLMRun], optional): Callback run managers used to process the output. Defaults to None. @@ -1508,9 +1507,9 @@ async def _acall( The string generated by the model. Example: - .. code-block:: python - + ```python response = await llm._acall("Tell me a joke.") + ``` """ diff --git a/libs/aws/langchain_aws/llms/sagemaker_endpoint.py b/libs/aws/langchain_aws/llms/sagemaker_endpoint.py index 2381ae68..b2dd7da3 100644 --- a/libs/aws/langchain_aws/llms/sagemaker_endpoint.py +++ b/libs/aws/langchain_aws/llms/sagemaker_endpoint.py @@ -103,7 +103,7 @@ class SagemakerEndpoint(LLM): https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass - the name of the profile from the ``~/.aws/credentials`` file that is to be used. + the name of the profile from the `~/.aws/credentials` file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. @@ -114,12 +114,12 @@ class SagemakerEndpoint(LLM): """ Args: - region_name: The aws region e.g., ``'us-west-2'``. - Falls back to ``AWS_REGION``/``AWS_DEFAULT_REGION`` env variable - or region specified in ``~/.aws/config``. + region_name: The aws region e.g., `'us-west-2'`. + Falls back to `AWS_REGION`/`AWS_DEFAULT_REGION` env variable + or region specified in `~/.aws/config`. - credentials_profile_name: The name of the profile in the ``~/.aws/credentials`` - or ~/.aws/config files, which has either access keys or role information + credentials_profile_name: The name of the profile in the `~/.aws/credentials` + or `~/.aws/config` files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. @@ -129,42 +129,42 @@ class SagemakerEndpoint(LLM): Example: - .. code-block:: python - - from langchain_community.llms import SagemakerEndpoint - endpoint_name = ( - "my-endpoint-name" - ) - region_name = ( - "us-west-2" - ) - credentials_profile_name = ( - "default" - ) - se = SagemakerEndpoint( - endpoint_name=endpoint_name, - region_name=region_name, - credentials_profile_name=credentials_profile_name - ) - - # Usage with Inference Component - se = SagemakerEndpoint( - endpoint_name=endpoint_name, - inference_component_name=inference_component_name, - region_name=region_name, - credentials_profile_name=credentials_profile_name - ) - - #Use with boto3 client - client = boto3.client( - "sagemaker-runtime", - region_name=region_name - ) - - se = SagemakerEndpoint( - endpoint_name=endpoint_name, - client=client - ) + ```python + from langchain_community.llms import SagemakerEndpoint + endpoint_name = ( + "my-endpoint-name" + ) + region_name = ( + "us-west-2" + ) + credentials_profile_name = ( + "default" + ) + se = SagemakerEndpoint( + endpoint_name=endpoint_name, + region_name=region_name, + credentials_profile_name=credentials_profile_name + ) + + # Usage with Inference Component + se = SagemakerEndpoint( + endpoint_name=endpoint_name, + inference_component_name=inference_component_name, + region_name=region_name, + credentials_profile_name=credentials_profile_name + ) + + # Use with boto3 client + client = boto3.client( + "sagemaker-runtime", + region_name=region_name + ) + + se = SagemakerEndpoint( + endpoint_name=endpoint_name, + client=client + ) + ``` """ client: Any = None @@ -181,10 +181,10 @@ class SagemakerEndpoint(LLM): """Optional name of the inference component to invoke if specified with endpoint name.""" # noqa: E501 region_name: str = "" - """The aws region where the Sagemaker model is deployed, eg. ``'us-west-2'``.""" + """The aws region where the Sagemaker model is deployed, eg. `'us-west-2'`.""" credentials_profile_name: Optional[str] = None - """The name of the profile in the ``~/.aws/credentials`` or ``~/.aws/config`` files, + """The name of the profile in the `~/.aws/credentials` or `~/.aws/config` files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, @@ -210,16 +210,16 @@ class SagemakerEndpoint(LLM): aws_secret_access_key: Optional[SecretStr] = Field( default_factory=secret_from_env("AWS_SECRET_ACCESS_KEY", default=None) ) - """AWS ``secret_access_key``. + """AWS `secret_access_key`. - If provided, ``aws_access_key_id`` must also be provided. + If provided, `aws_access_key_id` must also be provided. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SECRET_ACCESS_KEY`` environment variable. + If not provided, will be read from `AWS_SECRET_ACCESS_KEY` environment variable. """ @@ -228,22 +228,22 @@ class SagemakerEndpoint(LLM): ) """AWS session token. - If provided, ``aws_access_key_id`` and ``aws_secret_access_key`` must also be + If provided, `aws_access_key_id` and `aws_secret_access_key` must also be provided. Not required unless using temporary credentials. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SESSION_TOKEN`` environment variable. + If not provided, will be read from `AWS_SESSION_TOKEN` environment variable. """ config: Any = None - """An optional ``botocore.config.Config`` instance to pass to the client.""" + """An optional `botocore.config.Config` instance to pass to the client.""" endpoint_url: Optional[str] = None - """Needed if you don't want to default to ``'us-east-1'`` endpoint""" + """Needed if you don't want to default to `'us-east-1'` endpoint""" content_handler: LLMContentHandler """The content handler class that provides an input and output transform functions @@ -256,8 +256,7 @@ class SagemakerEndpoint(LLM): """ Example: - .. code-block:: python - + ```python from langchain_community.llms.sagemaker_endpoint import LLMContentHandler class ContentHandler(LLMContentHandler): @@ -271,7 +270,7 @@ def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes: def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json[0]["generated_text"] - + ``` """ model_kwargs: Optional[Dict] = None @@ -279,9 +278,9 @@ def transform_output(self, output: bytes) -> str: endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint - function. See ``boto3``_. docs for more info. + function. See `boto3` docs for more info. - .. _boto3: + [boto3](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) """ @@ -383,9 +382,9 @@ def _call( The string generated by the model. Example: - .. code-block:: python - - response = se("Tell me a joke.") + ```python + response = se("Tell me a joke.") + ``` """ _model_kwargs = self.model_kwargs or {} diff --git a/libs/aws/langchain_aws/retrievers/bedrock.py b/libs/aws/langchain_aws/retrievers/bedrock.py index 7dcaa99a..4195867d 100644 --- a/libs/aws/langchain_aws/retrievers/bedrock.py +++ b/libs/aws/langchain_aws/retrievers/bedrock.py @@ -59,34 +59,34 @@ class AmazonKnowledgeBasesRetriever(BaseRetriever): Attributes: knowledge_base_id: Knowledge Base ID. - region_name: The aws region e.g., ``'us-west-2'``. - Fallback to ``AWS_REGION``/``AWS_DEFAULT_REGION`` env variable or region - specified in ``~/.aws/config``. - credentials_profile_name: The name of the profile in the ``~/.aws/credentials`` - or ``~/.aws/config`` files, which has either access keys or role information + region_name: The aws region e.g., `'us-west-2'`. + Fallback to `AWS_REGION`/`AWS_DEFAULT_REGION` env variable or region + specified in `~/.aws/config`. + credentials_profile_name: The name of the profile in the `~/.aws/credentials` + or `~/.aws/config` files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. aws_access_key_id: AWS access key id. If provided, ``aws_secret_access_key`` must also be provided. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_ACCESS_KEY_ID`` environment + If not provided, will be read from `AWS_ACCESS_KEY_ID` environment variable. - aws_secret_access_key: AWS ``secret_access_key``. If provided, + aws_secret_access_key: AWS `secret_access_key`. If provided, ``aws_access_key_id`` must also be provided. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SECRET_ACCESS_KEY`` environment + If not provided, will be read from `AWS_SECRET_ACCESS_KEY` environment variable. - aws_session_token: AWS session token. If provided, ``aws_access_key_id`` and - ``aws_secret_access_key`` must also be provided. Not required unless using + aws_session_token: AWS session token. If provided, `aws_access_key_id` and + `aws_secret_access_key` must also be provided. Not required unless using temporary credentials. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SESSION_TOKEN`` environment + If not provided, will be read from `AWS_SESSION_TOKEN` environment variable. - endpoint_url: Needed if you don't want to default to ``'us-east-1'`` endpoint. - config: An optional ``botocore.config.Config`` instance to pass to the client. + endpoint_url: Needed if you don't want to default to `'us-east-1'` endpoint. + config: An optional `botocore.config.Config` instance to pass to the client. client: boto3 client for bedrock agent runtime. guardrail_config: Configuration information for a guardrail that you want to use in the request. @@ -96,18 +96,18 @@ class AmazonKnowledgeBasesRetriever(BaseRetriever): (0.0 to 1.0). Example: - .. code-block:: python - - from langchain_community.retrievers import AmazonKnowledgeBasesRetriever - - retriever = AmazonKnowledgeBasesRetriever( - knowledge_base_id="", - retrieval_config={ - "vectorSearchConfiguration": { - "numberOfResults": 4 - } - }, - ) + ```python + from langchain_community.retrievers import AmazonKnowledgeBasesRetriever + + retriever = AmazonKnowledgeBasesRetriever( + knowledge_base_id="", + retrieval_config={ + "vectorSearchConfiguration": { + "numberOfResults": 4 + } + }, + ) + ``` """ @@ -176,9 +176,12 @@ def _get_relevant_documents( ) -> List[Document]: """Get relevant document from a KnowledgeBase - :param query: the user's query - :param run_manager: The callback handler to use - :return: List of relevant documents + Parameters: + query: The user's query. + run_manager: The callback handler to use. + + Returns: + A list of relevant documents. """ retrieve_request: Dict[str, Any] = self._get_retrieve_request(query) @@ -191,12 +194,7 @@ def _get_relevant_documents( return self._filter_by_score_confidence(docs=documents) def _get_retrieve_request(self, query: str) -> Dict[str, Any]: - """Build a Retrieve request - - :param query: - :return: - - """ + """Build a Retrieve request.""" request: Dict[str, Any] = { "retrievalQuery": {"text": query.strip()}, "knowledgeBaseId": self.knowledge_base_id, @@ -226,9 +224,11 @@ def _retrieval_results_to_documents( ) -> List[Document]: """Convert the Retrieve API results to LangChain Documents - :param results: Retrieve API results list - :return: List of LangChain Documents + Parameters: + results: Retrieve API results list. + Returns: + A list of LangChain Documents. """ documents = [] for result in results: @@ -251,8 +251,11 @@ def _retrieval_results_to_documents( def _get_content_from_result(result: Dict[str, Any]) -> Optional[str]: """Convert the content from one Retrieve API result to string - :param result: Retrieve API search result - :return: string representation of the content attribute + Parameters: + result: Retrieve API search result. + + Returns: + A string representation of the content attribute. """ if not result: diff --git a/libs/aws/langchain_aws/retrievers/kendra.py b/libs/aws/langchain_aws/retrievers/kendra.py index 61838b91..92321332 100644 --- a/libs/aws/langchain_aws/retrievers/kendra.py +++ b/libs/aws/langchain_aws/retrievers/kendra.py @@ -339,12 +339,12 @@ class AmazonKendraRetriever(BaseRetriever): Attributes: index_id: Kendra index id - region_name: The aws region e.g., ``'us-west-2'``. - Falls back to ``AWS_REGION``/``AWS_DEFAULT_REGION`` env variable - or region specified in ``~/.aws/config``. + region_name: The aws region e.g., `'us-west-2'`. + Falls back to `AWS_REGION`/`AWS_DEFAULT_REGION` env variable + or region specified in `~/.aws/config`. - credentials_profile_name: The name of the profile in the ``~/.aws/credentials`` - or ``~/.aws/config`` files, which has either access keys or role information + credentials_profile_name: The name of the profile in the `~/.aws/credentials` + or `~/.aws/config` files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. @@ -354,30 +354,30 @@ class AmazonKendraRetriever(BaseRetriever): See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_ACCESS_KEY_ID`` environment + If not provided, will be read from `AWS_ACCESS_KEY_ID` environment variable. - aws_secret_access_key: AWS secret_access_key. If provided, ``aws_access_key_id`` + aws_secret_access_key: AWS secret_access_key. If provided, `aws_access_key_id` must also be provided. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SECRET_ACCESS_KEY`` environment + If not provided, will be read from `AWS_SECRET_ACCESS_KEY` environment variable. - aws_session_token: AWS session token. If provided, ``aws_access_key_id`` and - ``aws_secret_access_key`` must also be provided. Not required unless using + aws_session_token: AWS session token. If provided, `aws_access_key_id` and + `aws_secret_access_key` must also be provided. Not required unless using temporary credentials. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from ``AWS_SESSION_TOKEN`` environment + If not provided, will be read from `AWS_SESSION_TOKEN` environment variable. - endpoint_url: Needed if you don't want to default to ``'us-east-1'`` endpoint. + endpoint_url: Needed if you don't want to default to `'us-east-1'` endpoint. - config: An optional ``botocore.config.Config`` instance to pass to the client. + config: An optional `botocore.config.Config` instance to pass to the client. top_k: No of results to return @@ -395,11 +395,11 @@ class AmazonKendraRetriever(BaseRetriever): See: https://docs.aws.amazon.com/kendra/latest/APIReference Example: - .. code-block:: python - - retriever = AmazonKendraRetriever( - index_id="c0806df7-e76b-4bce-9b5c-d5582f6b1a03" - ) + ```python + retriever = AmazonKendraRetriever( + index_id="c0806df7-e76b-4bce-9b5c-d5582f6b1a03" + ) + ``` """ @@ -505,10 +505,9 @@ def _get_relevant_documents( """Run search on Kendra index and get top k documents Example: - .. code-block:: python - + ```python docs = retriever.get_relevant_documents('This is my query') - + ``` """ result_items = self._kendra_query(query) top_k_docs = self._get_top_k_docs(result_items) diff --git a/libs/aws/langchain_aws/retrievers/s3_vectors.py b/libs/aws/langchain_aws/retrievers/s3_vectors.py index dc81e24a..f62dacf2 100644 --- a/libs/aws/langchain_aws/retrievers/s3_vectors.py +++ b/libs/aws/langchain_aws/retrievers/s3_vectors.py @@ -5,14 +5,12 @@ class AmazonS3VectorsRetriever(VectorStoreRetriever): """AmazonS3VectorsRetriever is a retriever for Amazon S3 Vectors. Examples: - - .. code-block:: python - + ```python from langchain_aws.vectorstores import AmazonS3Vectors vector_store = AmazonS3Vectors(...) retriever = vector_store.as_retriever() - + ``` """ allowed_search_types = [ diff --git a/libs/aws/langchain_aws/runnables/q_business.py b/libs/aws/langchain_aws/runnables/q_business.py index e2434d8b..54c59c8e 100644 --- a/libs/aws/langchain_aws/runnables/q_business.py +++ b/libs/aws/langchain_aws/runnables/q_business.py @@ -82,14 +82,13 @@ def invoke( The string generated by the model. Example: - .. code-block:: python - + ```python model = AmazonQ( credentials=your_credentials, application_id=your_app_id ) response = model.invoke("Tell me a joke") - + ``` """ try: # Prepare the request diff --git a/libs/aws/langchain_aws/tools/browser_session_manager.py b/libs/aws/langchain_aws/tools/browser_session_manager.py index 268d7b62..ae878e0e 100644 --- a/libs/aws/langchain_aws/tools/browser_session_manager.py +++ b/libs/aws/langchain_aws/tools/browser_session_manager.py @@ -21,7 +21,7 @@ class BrowserSessionManager: Concurrency protection is also implemented. Each browser session is tied to a specific thread_id and includes protection against concurrent usage. - When a browser is obtained via get_async_browser() or get_sync_browser(), + When a browser is obtained via `get_async_browser()` or `get_sync_browser()`, it is marked as "in use", and subsequent attempts to access the same browser session will raise a RuntimeError until it is released. In general, different callers should use different thread_ids to avoid concurrency issues. diff --git a/libs/aws/langchain_aws/tools/browser_toolkit.py b/libs/aws/langchain_aws/tools/browser_toolkit.py index acbe1dde..5d6d4481 100644 --- a/libs/aws/langchain_aws/tools/browser_toolkit.py +++ b/libs/aws/langchain_aws/tools/browser_toolkit.py @@ -20,7 +20,6 @@ class BrowserToolkit: Example: ```python - import asyncio from langchain.agents import create_agent from langchain_aws.tools import create_browser_toolkit diff --git a/libs/aws/langchain_aws/tools/code_interpreter_toolkit.py b/libs/aws/langchain_aws/tools/code_interpreter_toolkit.py index 26a81f6b..b16fcaca 100644 --- a/libs/aws/langchain_aws/tools/code_interpreter_toolkit.py +++ b/libs/aws/langchain_aws/tools/code_interpreter_toolkit.py @@ -29,7 +29,6 @@ class CodeInterpreterToolkit: Example: ```python - import asyncio from langchain.agents import create_agent from langchain_aws.tools import create_code_interpreter_toolkit diff --git a/libs/aws/langchain_aws/utilities/redis.py b/libs/aws/langchain_aws/utilities/redis.py index b9814294..6532297b 100644 --- a/libs/aws/langchain_aws/utilities/redis.py +++ b/libs/aws/langchain_aws/utilities/redis.py @@ -57,17 +57,17 @@ def get_client(redis_url: str, **kwargs: Any) -> RedisType: Before creating a connection the existence of the database driver is checked and ValueError raised otherwise. - To use, you should have the ``redis`` python package installed. + To use, you should have the `redis` python package installed. Example: - .. code-block:: python - - from langchain_community.utilities.redis import get_client - redis_client = get_client( - redis_url="redis://username:password@localhost:6379" - index_name="my-index", - embedding_function=embeddings.embed_query, - ) + ```python + from langchain_community.utilities.redis import get_client + redis_client = get_client( + redis_url="redis://username:password@localhost:6379" + index_name="my-index", + embedding_function=embeddings.embed_query, + ) + ``` """ diff --git a/libs/aws/langchain_aws/utils.py b/libs/aws/langchain_aws/utils.py index c509351a..c44534b2 100644 --- a/libs/aws/langchain_aws/utils.py +++ b/libs/aws/langchain_aws/utils.py @@ -31,19 +31,19 @@ class ContentHandlerBase(Generic[INPUT_TYPE, OUTPUT_TYPE]): """ Example: - .. code-block:: python - - class ContentHandler(ContentHandlerBase): - content_type = "application/json" - accepts = "application/json" - - def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes: - input_str = json.dumps({prompt: prompt, **model_kwargs}) - return input_str.encode('utf-8') - - def transform_output(self, output: bytes) -> str: - response_json = json.loads(output.read().decode("utf-8")) - return response_json[0]["generated_text"] + ```python + class ContentHandler(ContentHandlerBase): + content_type = "application/json" + accepts = "application/json" + + def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes: + input_str = json.dumps({prompt: prompt, **model_kwargs}) + return input_str.encode('utf-8') + + def transform_output(self, output: bytes) -> str: + response_json = json.loads(output.read().decode("utf-8")) + return response_json[0]["generated_text"] + ``` """ content_type: Optional[str] = "text/plain" diff --git a/libs/aws/langchain_aws/vectorstores/inmemorydb/base.py b/libs/aws/langchain_aws/vectorstores/inmemorydb/base.py index abb43170..df7eb155 100644 --- a/libs/aws/langchain_aws/vectorstores/inmemorydb/base.py +++ b/libs/aws/langchain_aws/vectorstores/inmemorydb/base.py @@ -70,10 +70,12 @@ def check_index_exists(client: InMemoryDBType, index_name: str) -> bool: class InMemoryVectorStore(VectorStore): """InMemoryVectorStore vector database. - To use, you should have the ``redis`` python package installed - for AWS MemoryDB + To use, you should have the `redis` python package installed + for AWS MemoryDB: - .. code-block:: bash + ```bash + pip install redis + ``` Once running, you can connect to the MemoryDB server with the following url schemas: - redis://: # simple connection @@ -88,70 +90,67 @@ class InMemoryVectorStore(VectorStore): LangChain. For all the following examples assume we have the following imports: - - .. code-block:: python - + ```python from langchain_aws.vectorstores import InMemoryVectorStore + ``` + Initialize, create index, and load Documents: + ```python + from langchain_aws.vectorstores import InMemoryVectorStore - Initialize, create index, and load Documents - .. code-block:: python - - from langchain_aws.vectorstores import InMemoryVectorStore - - rds = InMemoryVectorStore.from_documents( - documents, # a list of Document objects from loaders or created - embeddings, # an Embeddings object - redis_url="redis://cluster_endpoint:6379", - ) - - Initialize, create index, and load Documents with metadata - .. code-block:: python - - - rds = InMemoryVectorStore.from_texts( - texts, # a list of strings - metadata, # a list of metadata dicts - embeddings, # an Embeddings object - redis_url="redis://cluster_endpoint:6379", - ) + rds = InMemoryVectorStore.from_documents( + documents, # a list of Document objects from loaders or created + embeddings, # an Embeddings object + redis_url="redis://cluster_endpoint:6379", + ) + ``` + + Initialize, create index, and load Documents with metadata: + ```python + rds = InMemoryVectorStore.from_texts( + texts, # a list of strings + metadata, # a list of metadata dicts + embeddings, # an Embeddings object + redis_url="redis://cluster_endpoint:6379", + ) + ``` Initialize, create index, and load Documents with metadata and return keys - .. code-block:: python - - rds, keys = InMemoryVectorStore.from_texts_return_keys( - texts, # a list of strings - metadata, # a list of metadata dicts - embeddings, # an Embeddings object - redis_url="redis://cluster_endpoint:6379", - ) + ```python + rds, keys = InMemoryVectorStore.from_texts_return_keys( + texts, # a list of strings + metadata, # a list of metadata dicts + embeddings, # an Embeddings object + redis_url="redis://cluster_endpoint:6379", + ) + ``` For use cases where the index needs to stay alive, you can initialize with an index name such that it's easier to reference later - .. code-block:: python - - rds = InMemoryVectorStore.from_texts( - texts, # a list of strings - metadata, # a list of metadata dicts - embeddings, # an Embeddings object - index_name="my-index", - redis_url="redis://cluster_endpoint:6379", - ) + ```python + rds = InMemoryVectorStore.from_texts( + texts, # a list of strings + metadata, # a list of metadata dicts + embeddings, # an Embeddings object + index_name="my-index", + redis_url="redis://cluster_endpoint:6379", + ) + ``` Initialize and connect to an existing index (from above) - .. code-block:: python - - # must pass in schema and key_prefix from another index - existing_rds = InMemoryVectorStore.from_existing_index( - embeddings, # an Embeddings object - index_name="my-index", - schema=rds.schema, # schema dumped from another index - key_prefix=rds.key_prefix, # key prefix from another index - redis_url="redis://cluster_endpoint:6379", - ) + ```python + # must pass in schema and key_prefix from another index + existing_rds = InMemoryVectorStore.from_existing_index( + embeddings, # an Embeddings object + index_name="my-index", + schema=rds.schema, # schema dumped from another index + key_prefix=rds.key_prefix, # key prefix from another index + redis_url="redis://username:password@cluster_endpoint:6379", + ) + ``` Advanced examples: @@ -162,19 +161,19 @@ class InMemoryVectorStore(VectorStore): vector schema for your use case. ex. using HNSW instead of FLAT (knn) which is the default - .. code-block:: python - - vector_schema = { - "algorithm": "HNSW" - } + ```python + vector_schema = { + "algorithm": "HNSW" + } - rds = InMemoryVectorStore.from_texts( - texts, # a list of strings - metadata, # a list of metadata dicts - embeddings, # an Embeddings object - vector_schema=vector_schema, - redis_url="redis://cluster_endpoint:6379", - ) + rds = InMemoryVectorStore.from_texts( + texts, # a list of strings + metadata, # a list of metadata dicts + embeddings, # an Embeddings object + vector_schema=vector_schema, + redis_url="redis://cluster_endpoint:6379", + ) + ``` Custom index schema can be supplied to change the way that the metadata is indexed. This is useful for you would like to use the @@ -193,30 +192,30 @@ class InMemoryVectorStore(VectorStore): To override these rules, you can pass in a custom index schema like the following - .. code-block:: yaml - - tag: - - name: credit_score - text: - - name: user - - name: job + ```yaml + tag: + - name: credit_score + text: + - name: user + - name: job + ``` - Typically, the ``credit_score`` field would be a text field since it's a string, + Typically, the `credit_score` field would be a text field since it's a string, however, we can override this behavior by specifying the field type as shown with the yaml config (can also be a dictionary) above and the code below. - .. code-block:: python - - rds = InMemoryVectorStore.from_texts( - texts, # a list of strings - metadata, # a list of metadata dicts - embeddings, # an Embeddings object - index_schema="path/to/index_schema.yaml", # can also be a dictionary - redis_url="redis://cluster_endpoint:6379", - ) + ```python + rds = InMemoryVectorStore.from_texts( + texts, # a list of strings + metadata, # a list of metadata dicts + embeddings, # an Embeddings object + index_schema="path/to/index_schema.yaml", # can also be a dictionary + redis_url="redis://cluster_endpoint:6379", + ) + ``` When connecting to an existing index where a custom schema has been applied, it's - important to pass in the same schema to the ``from_existing_index`` method. + important to pass in the same schema to the `from_existing_index` method. Otherwise, the schema for newly added samples will be incorrect and metadata will not be returned. @@ -291,15 +290,15 @@ def from_texts_return_keys( always present in the langchain schema. Example: - .. code-block:: python - - from langchain_aws.vectorstores import InMemoryVectorStore - embeddings = OpenAIEmbeddings() - redis, keys = InMemoryVectorStore.from_texts_return_keys( - texts, - embeddings, - redis_url="redis://cluster_endpoint:6379" - ) + ```python + from langchain_aws.vectorstores import InMemoryVectorStore + embeddings = OpenAIEmbeddings() + redis, keys = InMemoryVectorStore.from_texts_return_keys( + texts, + embeddings, + redis_url="redis://cluster_endpoint:6379" + ) + ``` Args: texts: List of texts to add to the vectorstore. @@ -427,10 +426,11 @@ def from_texts( Example: - .. code-block:: python + ```python + from langchain_aws.vectorstores import InMemoryVectorStore - from langchain_aws.vectorstores import InMemoryVectorStore - embeddings = OpenAIEmbeddings() + embeddings = OpenAIEmbeddings() + ``` Args: texts: List of texts to add to the vectorstore. @@ -475,20 +475,20 @@ def from_existing_index( """Connect to an existing InMemoryVectorStore index. Example: - .. code-block:: python - - from langchain_aws.vectorstores import InMemoryVectorStore + ```python + from langchain_aws.vectorstores import InMemoryVectorStore - embeddings = OpenAIEmbeddings() + embeddings = OpenAIEmbeddings() - # must pass in schema and key_prefix from another index - existing_rds = InMemoryVectorStore.from_existing_index( - embeddings, - index_name="my-index", - schema=rds.schema, # schema dumped from another index - key_prefix=rds.key_prefix, # key prefix from another index - redis_url="redis://username:password@cluster_endpoint:6379", - ) + # must pass in schema and key_prefix from another index + existing_rds = InMemoryVectorStore.from_existing_index( + embeddings, + index_name="my-index", + schema=rds.schema, # schema dumped from another index + key_prefix=rds.key_prefix, # key prefix from another index + redis_url="redis://username:password@cluster_endpoint:6379", + ) + ``` Args: embedding (Embeddings): Embedding model class (i.e. OpenAIEmbeddings) @@ -564,7 +564,7 @@ def delete( Args: ids: List of ids (keys in redis) to delete. - **kwargs: Additional keyword arguments. Supports ``redis_url`` for Redis + **kwargs: Additional keyword arguments. Supports `redis_url` for Redis connection url (can also be set as an environment variable: REDIS_URL). Returns: @@ -676,7 +676,7 @@ def add_texts( embeddings (Optional[List[List[float]]], optional): Optional pre-generated embeddings. batch_size (int, optional): Batch size to use for writes. Defaults to 1000. - **kwargs: Additional keyword arguments. Supports ``keys`` or ``ids`` for + **kwargs: Additional keyword arguments. Supports `keys` or `ids` for identifiers of entries. Returns: @@ -745,7 +745,7 @@ def similarity_search_with_score( The "scores" returned from this function are the raw vector distances from the query vector. For similarity scores, use - ``similarity_search_with_relevance_scores``. + `similarity_search_with_relevance_scores`. Args: query (str): The query text for which to find similar documents. diff --git a/libs/aws/langchain_aws/vectorstores/inmemorydb/cache.py b/libs/aws/langchain_aws/vectorstores/inmemorydb/cache.py index cab28f08..cb506920 100644 --- a/libs/aws/langchain_aws/vectorstores/inmemorydb/cache.py +++ b/libs/aws/langchain_aws/vectorstores/inmemorydb/cache.py @@ -149,11 +149,7 @@ def __init__( redis_url (str): URL to connect to MemoryDB. embedding (Embedding): Embedding provider for semantic encoding and search. score_threshold (float, 0.2): - - Example: - - .. code-block:: python - + Example: ```python from langchain_core.globals import set_llm_cache from langchain_aws.cache import InMemorySemanticCache @@ -162,6 +158,7 @@ def __init__( redis_url="redis://localhost:6379", embedding=OpenAIEmbeddings() )) + ``` """ self._cache_dict: Dict[str, InMemoryVectorStore] = {} diff --git a/libs/aws/langchain_aws/vectorstores/inmemorydb/filters.py b/libs/aws/langchain_aws/vectorstores/inmemorydb/filters.py index 75f25201..87f77407 100644 --- a/libs/aws/langchain_aws/vectorstores/inmemorydb/filters.py +++ b/libs/aws/langchain_aws/vectorstores/inmemorydb/filters.py @@ -154,8 +154,10 @@ def __eq__( The tag(s) to filter on. Example: - >>> from langchain_community.vectorstores.InMemoryDB import InMemoryDBTag - >>> filter = InMemoryDBTag("brand") == "nike" + ```python + from langchain_community.vectorstores.InMemoryDB import InMemoryDBTag + filter = InMemoryDBTag("brand") == "nike" + ``` """ self._set_tag_value(other, InMemoryDBFilterOperator.EQ) return InMemoryDBFilterExpression(str(self)) @@ -171,8 +173,10 @@ def __ne__( The tag(s) to filter on. Example: - >>> from langchain_community.vectorstores.InMemoryDB import InMemoryDBTag - >>> filter = InMemoryDBTag("brand") != "nike" + ```python + from langchain_community.vectorstores.InMemoryDB import InMemoryDBTag + filter = InMemoryDBTag("brand") != "nike" + ``` """ self._set_tag_value(other, InMemoryDBFilterOperator.NE) return InMemoryDBFilterExpression(str(self)) @@ -238,8 +242,10 @@ def __eq__(self, other: Union[int, float]) -> "InMemoryDBFilterExpression": other (Union[int, float]): The value to filter on. Example: - >>> from langchain_community.vectorstores.InMemoryDB import InMemoryDBNum - >>> filter = InMemoryDBNum("zipcode") == 90210 + ```python + from langchain_community.vectorstores.InMemoryDB import InMemoryDBNum + filter = InMemoryDBNum("zipcode") == 90210 + ``` """ self._set_value(other, self.SUPPORTED_VAL_TYPES, InMemoryDBFilterOperator.EQ) # type: ignore @@ -253,8 +259,10 @@ def __ne__(self, other: Union[int, float]) -> "InMemoryDBFilterExpression": other (Union[int, float]): The value to filter on. Example: - >>> from langchain_community.vectorstores.InMemoryDB import InMemoryDBNum - >>> filter = InMemoryDBNum("zipcode") != 90210 + ```python + from langchain_community.vectorstores.InMemoryDB import InMemoryDBNum + filter = InMemoryDBNum("zipcode") != 90210 + ``` """ self._set_value(other, self.SUPPORTED_VAL_TYPES, InMemoryDBFilterOperator.NE) # type: ignore @@ -267,8 +275,10 @@ def __gt__(self, other: Union[int, float]) -> "InMemoryDBFilterExpression": other (Union[int, float]): The value to filter on. Example: - >>> from langchain_community.vectorstores.InMemoryDB import InMemoryDBNum - >>> filter = InMemoryDBNum("age") > 18 + ```python + from langchain_community.vectorstores.InMemoryDB import InMemoryDBNum + filter = InMemoryDBNum("age") > 18 + ``` """ self._set_value(other, self.SUPPORTED_VAL_TYPES, InMemoryDBFilterOperator.GT) # type: ignore @@ -281,8 +291,10 @@ def __lt__(self, other: Union[int, float]) -> "InMemoryDBFilterExpression": other (Union[int, float]): The value to filter on. Example: - >>> from langchain_community.vectorstores.InMemoryDB import InMemoryDBNum - >>> filter = InMemoryDBNum("age") < 18 + ```python + from langchain_community.vectorstores.InMemoryDB import InMemoryDBNum + filter = InMemoryDBNum("age") < 18 + ``` """ self._set_value(other, self.SUPPORTED_VAL_TYPES, InMemoryDBFilterOperator.LT) # type: ignore @@ -295,8 +307,10 @@ def __ge__(self, other: Union[int, float]) -> "InMemoryDBFilterExpression": other (Union[int, float]): The value to filter on. Example: - >>> from langchain_community.vectorstores.InMemoryDB import InMemoryDBNum - >>> filter = InMemoryDBNum("age") >= 18 + ```python + from langchain_community.vectorstores.InMemoryDB import InMemoryDBNum + filter = InMemoryDBNum("age") >= 18 + ``` """ self._set_value(other, self.SUPPORTED_VAL_TYPES, InMemoryDBFilterOperator.GE) # type: ignore @@ -309,8 +323,10 @@ def __le__(self, other: Union[int, float]) -> "InMemoryDBFilterExpression": other (Union[int, float]): The value to filter on. Example: - >>> from langchain_community.vectorstores.InMemoryDB import InMemoryDBNum - >>> filter = InMemoryDBNum("age") <= 18 + ```python + from langchain_community.vectorstores.InMemoryDB import InMemoryDBNum + filter = InMemoryDBNum("age") <= 18 + ``` """ self._set_value(other, self.SUPPORTED_VAL_TYPES, InMemoryDBFilterOperator.LE) # type: ignore @@ -340,8 +356,10 @@ def __eq__(self, other: str) -> "InMemoryDBFilterExpression": other (str): The text value to filter on. Example: - >>> from langchain_community.vectorstores.InMemoryDB import InMemoryDBText - >>> filter = InMemoryDBText("job") == "engineer" + ```python + from langchain_community.vectorstores.InMemoryDB import InMemoryDBText + filter = InMemoryDBText("job") == "engineer" + ``` """ self._set_value(other, self.SUPPORTED_VAL_TYPES, InMemoryDBFilterOperator.EQ) # type: ignore @@ -355,8 +373,10 @@ def __ne__(self, other: str) -> "InMemoryDBFilterExpression": other (str): The text value to filter on. Example: - >>> from langchain_community.vectorstores.InMemoryDB import InMemoryDBText - >>> filter = InMemoryDBText("job") != "engineer" + ```python + from langchain_community.vectorstores.InMemoryDB import InMemoryDBText + filter = InMemoryDBText("job") != "engineer" + ``` """ self._set_value(other, self.SUPPORTED_VAL_TYPES, InMemoryDBFilterOperator.NE) # type: ignore return InMemoryDBFilterExpression(str(self)) @@ -368,11 +388,13 @@ def __mod__(self, other: str) -> "InMemoryDBFilterExpression": other (str): The text value to filter on. Example: - >>> from langchain_aws.vectorstores.inmemorydb import InMemoryDBText - >>> filter = InMemoryDBText("job") % "engine*" # suffix wild card match - >>> filter = InMemoryDBText("job") % "%%engine%%" # fuzzy match w/ LD - >>> filter = InMemoryDBText("job") % "engineer|doctor" # contains either - >>> filter = InMemoryDBText("job") % "engineer doctor" # contains both + ```python + from langchain_aws.vectorstores.inmemorydb import InMemoryDBText + filter = InMemoryDBText("job") % "engine*" # suffix wild card match + filter = InMemoryDBText("job") % "%%engine%%" # fuzzy match w/ LD + filter = InMemoryDBText("job") % "engineer|doctor" # contains either + filter = InMemoryDBText("job") % "engineer doctor" # contains both + ``` """ self._set_value(other, self.SUPPORTED_VAL_TYPES, InMemoryDBFilterOperator.LIKE) # type: ignore @@ -402,14 +424,18 @@ class InMemoryDBFilterExpression: by combining InMemoryDBFilterFields using the & and | operators. Examples: - >>> from langchain_aws.vectorstores.inmemorydb import ( - ... InMemoryDBTag, InMemoryDBNum - ... ) - >>> brand_is_nike = InMemoryDBTag("brand") == "nike" - >>> price_is_under_100 = InMemoryDBNum("price") < 100 - >>> filter = brand_is_nike & price_is_under_100 - >>> print(str(filter)) - (@brand:{nike} @price:[-inf (100)]) + ```python + from langchain_aws.vectorstores.inmemorydb import ( + InMemoryDBTag, InMemoryDBNum + ) + brand_is_nike = InMemoryDBTag("brand") == "nike" + price_is_under_100 = InMemoryDBNum("price") < 100 + filter = brand_is_nike & price_is_under_100 + print(str(filter)) + + # Result + # (@brand:{nike} @price:[-inf (100)]) + ``` """ diff --git a/libs/aws/langchain_aws/vectorstores/s3_vectors/base.py b/libs/aws/langchain_aws/vectorstores/s3_vectors/base.py index 5ae0d4f2..06e5cfd1 100644 --- a/libs/aws/langchain_aws/vectorstores/s3_vectors/base.py +++ b/libs/aws/langchain_aws/vectorstores/s3_vectors/base.py @@ -38,47 +38,48 @@ class AmazonS3Vectors(VectorStore): For all the following examples assume we have the following: - .. code-block:: python - + ```python from langchain_aws.embeddings import BedrockEmbeddings from langchain_aws.vectorstores.s3_vectors import AmazonS3Vectors embedding = BedrockEmbeddings() + ``` - Initialize, create vector index if not exist, and add texts - .. code-block:: python - - vector_store = AmazonS3Vectors.from_texts( - ["hello", "developer", "wife"], - vector_bucket_name="", - index_name="", - embedding=embedding, - ) - - Initialize, create vector index if not exist, and add texts and add Documents - .. code-block:: python + Initialize, create vector index if it does not exist, and add texts: - from langchain_core.documents import Document + ```python + vector_store = AmazonS3Vectors.from_texts( + ["hello", "developer", "wife"], + vector_bucket_name="", + index_name="", + embedding=embedding, + ) + ``` - vector_store = AmazonS3Vectors( - vector_bucket_name="", - index_name="", - embedding=embedding, - ) - vector_store.add_documents( - [ - Document("Star Wars", id="key1", metadata={"genre": "scifi"}), - Document("Jurassic Park", id="key2", metadata={"genre": "scifi"}), - Document("Finding Nemo", id="key3", metadata={"genre": "family"}), - ] - ) + Initialize, create vector index if it does not exist, and add Documents: + ```python + from langchain_core.documents import Document - Search with score(distance) and metadata filter - .. code-block:: python + vector_store = AmazonS3Vectors( + vector_bucket_name="", + index_name="", + embedding=embedding, + ) + vector_store.add_documents( + [ + Document("Star Wars", id="key1", metadata={"genre": "scifi"}), + Document("Jurassic Park", id="key2", metadata={"genre": "scifi"}), + Document("Finding Nemo", id="key3", metadata={"genre": "family"}), + ] + ) + ``` - vector_store.similarity_search_with_score( - "adventures in space", filter={"genre": {"$eq": "family"}} - ) + Search with score(distance) and metadata filter: + ```python + vector_store.similarity_search_with_score( + "adventures in space", filter={"genre": {"$eq": "family"}} + ) + ``` """ @@ -119,7 +120,7 @@ def __init__( keys page_content_metadata_key (Optional[str]): Key of metadata to store page_content in Document. If None, embedding page_content - but stored as an empty string. Default is "_page_content". + but stored as an empty string. Default is `_page_content`. create_index_if_not_exist (bool): Automatically create vector index if it does not exist. Default is True. relevance_score_fn (Optional[Callable[[float], float]]): The 'correct' @@ -138,25 +139,25 @@ def __init__( If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from 'AWS_ACCESS_KEY_ID' + If not provided, will be read from `AWS_ACCESS_KEY_ID` environment variable. aws_secret_access_key (Optional[str]): AWS secret_access_key. If provided, aws_access_key_id must also be provided. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from 'AWS_SECRET_ACCESS_KEY' + If not provided, will be read from `AWS_SECRET_ACCESS_KEY` environment variable. aws_session_token (Optional[str]): AWS session token. If provided, aws_access_key_id and aws_secret_access_key must also be provided. Not required unless using temporary credentials. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from 'AWS_SESSION_TOKEN' + If not provided, will be read from `AWS_SESSION_TOKEN` environment variable. endpoint_url (Optional[str]): Needed if you don't want to default to us-east-1 endpoint - config (Any): An optional botocore.config.Config instance to pass to + config (Any): An optional `botocore.config.Config` instance to pass to the client. client (Any): Boto3 client for s3vectors kwargs (Any): Additional keyword arguments. @@ -371,7 +372,7 @@ def similarity_search( query: Input text. k: Number of Documents to return. Defaults to 4. filter: Metadata filter to apply during the query. - See:https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-vectors-metadata-filtering.html + See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-vectors-metadata-filtering.html **kwargs: Arguments to pass to the search method. Returns: @@ -397,7 +398,7 @@ def similarity_search_with_score( query: Input text. k: Number of Documents to return. Defaults to 4. filter: Metadata filter to apply during the query. - See:https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-vectors-metadata-filtering.html + See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-vectors-metadata-filtering.html **kwargs: Additional keyword arguments. Returns: @@ -434,7 +435,7 @@ def similarity_search_by_vector( embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Metadata filter to apply during the query. - See:https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-vectors-metadata-filtering.html + See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-vectors-metadata-filtering.html **kwargs: Additional keyword arguments. Returns: @@ -523,25 +524,25 @@ def from_texts( # type: ignore[override] If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from 'AWS_ACCESS_KEY_ID' + If not provided, will be read from `AWS_ACCESS_KEY_ID` environment variable. aws_secret_access_key (Optional[str]): AWS secret_access_key. If provided, aws_access_key_id must also be provided. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from 'AWS_SECRET_ACCESS_KEY' + If not provided, will be read from `AWS_SECRET_ACCESS_KEY` environment variable. aws_session_token (Optional[str]): AWS session token. If provided, aws_access_key_id and aws_secret_access_key must also be provided. Not required unless using temporary credentials. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - If not provided, will be read from 'AWS_SESSION_TOKEN' + If not provided, will be read from `AWS_SESSION_TOKEN` environment variable. endpoint_url (Optional[str]): Needed if you don't want to default to us-east-1 endpoint - config (Any): An optional botocore.config.Config instance to pass to + config (Any): An optional `botocore.config.Config` instance to pass to the client. client (Any): Boto3 client for s3vectors kwargs: Arguments to pass to AmazonS3Vectors. diff --git a/libs/langgraph-checkpoint-aws/README.md b/libs/langgraph-checkpoint-aws/README.md index c36a0f1f..1b23b643 100644 --- a/libs/langgraph-checkpoint-aws/README.md +++ b/libs/langgraph-checkpoint-aws/README.md @@ -48,12 +48,12 @@ MODEL_ID = "us.anthropic.claude-3-7-sonnet-20250219-v1:0" # Sessions will be saved and persisted for actor_id/session_id combinations checkpointer = AgentCoreMemorySaver(MEMORY_ID, region_name=REGION) -# Initialize LLM -llm = init_chat_model(MODEL_ID, model_provider="bedrock_converse", region_name=REGION) +# Initialize chat model +model = init_chat_model(MODEL_ID, model_provider="bedrock_converse", region_name=REGION) # Create a pre-built langgraph agent (configurations work for custom agents too) graph = create_react_agent( - model=llm, + model=model, tools=tools, checkpointer=checkpointer, # AgentCoreMemorySaver we created above ) @@ -95,7 +95,7 @@ store = AgentCoreMemoryStore(MEMORY_ID, region_name=REGION) # Pre-model hook runs and saves messages of your choosing to AgentCore Memory # for async processing and extraction def pre_model_hook(state, config: RunnableConfig, *, store: BaseStore): - """Hook that runs pre-LLM invocation to save the latest human message""" + """Hook that runs pre-model invocation to save the latest human message""" actor_id = config["configurable"]["actor_id"] thread_id = config["configurable"]["thread_id"] @@ -103,7 +103,7 @@ def pre_model_hook(state, config: RunnableConfig, *, store: BaseStore): namespace = (actor_id, thread_id) messages = state.get("messages", []) - # Save the last human message we see before LLM invocation + # Save the last human message we see before model invocation for msg in reversed(messages): if isinstance(msg, HumanMessage): store.put(namespace, str(uuid.uuid4()), {"message": msg}) @@ -114,14 +114,14 @@ def pre_model_hook(state, config: RunnableConfig, *, store: BaseStore): # preferences = store.search(user_preferences_namespace, query=msg.content, limit=5) # # Add to input messages as needed - return {"llm_input_messages": messages} + return {"model_input_messages": messages} -# Initialize LLM -llm = init_chat_model(MODEL_ID, model_provider="bedrock_converse", region_name=REGION) +# Initialize chat model +model = init_chat_model(MODEL_ID, model_provider="bedrock_converse", region_name=REGION) # Create a pre-built langgraph agent (configurations work for custom agents too) graph = create_react_agent( - model=llm, + model=model, tools=[], pre_model_hook=pre_model_hook, ) diff --git a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/README.md b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/README.md index b76ee1da..f10bc369 100644 --- a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/README.md +++ b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/README.md @@ -44,12 +44,12 @@ MODEL_ID = "us.anthropic.claude-3-7-sonnet-20250219-v1:0" # Sessions will be saved and persisted for actor_id/session_id combinations checkpointer = AgentCoreMemorySaver(MEMORY_ID, region_name=REGION) -# Initialize LLM -llm = init_chat_model(MODEL_ID, model_provider="bedrock_converse", region_name=REGION) +# Initialize chat model +model = init_chat_model(MODEL_ID, model_provider="bedrock_converse", region_name=REGION) # Create a pre-built langgraph agent (configurations work for custom agents too) graph = create_react_agent( - model=llm, + model=model, tools=tools, checkpointer=checkpointer, # AgentCoreMemorySaver we created above ) @@ -91,7 +91,7 @@ store = AgentCoreMemoryStore(MEMORY_ID, region_name=REGION) # Pre-model hook runs and saves messages of your choosing to AgentCore Memory # for async processing and extraction def pre_model_hook(state, config: RunnableConfig, *, store: BaseStore): - """Hook that runs pre-LLM invocation to save the latest human message""" + """Hook that runs pre-model invocation to save the latest human message""" actor_id = config["configurable"]["actor_id"] thread_id = config["configurable"]["thread_id"] @@ -99,7 +99,7 @@ def pre_model_hook(state, config: RunnableConfig, *, store: BaseStore): namespace = (actor_id, thread_id) messages = state.get("messages", []) - # Save the last human message we see before LLM invocation + # Save the last human message we see before model invocation for msg in reversed(messages): if isinstance(msg, HumanMessage): store.put(namespace, str(uuid.uuid4()), {"message": msg}) @@ -110,14 +110,14 @@ def pre_model_hook(state, config: RunnableConfig, *, store: BaseStore): # preferences = store.search(user_preferences_namespace, query=msg.content, limit=5) # # Add to input messages as needed - return {"llm_input_messages": messages} + return {"model_input_messages": messages} -# Initialize LLM -llm = init_chat_model(MODEL_ID, model_provider="bedrock_converse", region_name=REGION) +# Initialize chat model +model = init_chat_model(MODEL_ID, model_provider="bedrock_converse", region_name=REGION) # Create a pre-built langgraph agent (configurations work for custom agents too) graph = create_react_agent( - model=llm, + model=model, tools=[], pre_model_hook=pre_model_hook, ) diff --git a/samples/agents/agents_with_nova.ipynb b/samples/agents/agents_with_nova.ipynb index b1c49e6b..08eecd58 100644 --- a/samples/agents/agents_with_nova.ipynb +++ b/samples/agents/agents_with_nova.ipynb @@ -86,7 +86,7 @@ "source": [ "from langchain_aws import ChatBedrock\n", "\n", - "llm = ChatBedrock(\n", + "model = ChatBedrock(\n", " model=\"us.amazon.nova-lite-v1:0\",\n", " model_kwargs=dict(\n", " temperature=1,\n", @@ -95,7 +95,7 @@ " ),\n", ")\n", "\n", - "llm_with_tools = llm.bind_tools(tools)" + "model_with_tools = model.bind_tools(tools)" ] }, { @@ -119,7 +119,7 @@ } ], "source": [ - "response = llm_with_tools.invoke([(\"user\", \"How much will my 8 day trip cost?\")])\n", + "response = model_with_tools.invoke([(\"user\", \"How much will my 8 day trip cost?\")])\n", "\n", "print(response.tool_calls)" ] @@ -174,7 +174,7 @@ "memory = MemorySaver()\n", "\n", "agent_executor = create_agent(\n", - " llm, tools, prompt=system_prompt, checkpointer=memory\n", + " model, tools, prompt=system_prompt, checkpointer=memory\n", ")\n", "\n", "\n", diff --git a/samples/inmemory/retriever.ipynb b/samples/inmemory/retriever.ipynb index b2bcb66f..cced364f 100644 --- a/samples/inmemory/retriever.ipynb +++ b/samples/inmemory/retriever.ipynb @@ -71,7 +71,7 @@ "outputs": [], "source": [ "# use the Anthropic Claude model\n", - "llm = ChatBedrock(\n", + "model = ChatBedrock(\n", " model_id=\"anthropic.claude-3-sonnet-20240229-v1:0\", model_kwargs=model_kwargs\n", ")" ] @@ -392,7 +392,7 @@ " (\"human\", \"{input}\"),\n", " ]\n", ")\n", - "question_answer_chain = create_stuff_documents_chain(llm, prompt)\n", + "question_answer_chain = create_stuff_documents_chain(model, prompt)\n", "chain = create_retrieval_chain(retriever, question_answer_chain)\n", "\n", "query = \"How do i create a MemoryDB cluster?\"\n", diff --git a/samples/inmemory/semantic_cache.ipynb b/samples/inmemory/semantic_cache.ipynb index ad5eab8c..5296445e 100644 --- a/samples/inmemory/semantic_cache.ipynb +++ b/samples/inmemory/semantic_cache.ipynb @@ -54,7 +54,7 @@ "outputs": [], "source": [ "# Use the Anthropic Claude model\n", - "llm = ChatBedrock(\n", + "model = ChatBedrock(\n", " model_id=\"anthropic.claude-3-sonnet-20240229-v1:0\", model_kwargs=model_kwargs\n", ")\n" ] @@ -122,7 +122,7 @@ "outputs": [], "source": [ "%%time\n", - "response = llm.invoke(\"Tell me about mission to moon\")\n", + "response = model.invoke(\"Tell me about mission to moon\")\n", "print(response.content)" ] }, @@ -169,7 +169,7 @@ "outputs": [], "source": [ "%%time\n", - "response = llm.invoke(\"Tell me about mission to moon\")\n", + "response = model.invoke(\"Tell me about mission to moon\")\n", "print(response.content)" ] }, @@ -181,7 +181,7 @@ "outputs": [], "source": [ "%%time\n", - "response = llm.invoke(\"Who first invented a telescope\")\n", + "response = model.invoke(\"Who first invented a telescope\")\n", "print(response.content)" ] }, @@ -193,7 +193,7 @@ "outputs": [], "source": [ "%%time\n", - "response = llm.invoke(\"Who first invented a car\")\n", + "response = model.invoke(\"Who first invented a car\")\n", "print(response.content)" ] }, @@ -205,7 +205,7 @@ "outputs": [], "source": [ "%%time\n", - "respone3 = llm.invoke(\"Who first a Telescope\")\n", + "respone3 = model.invoke(\"Who first a Telescope\")\n", "print(respone3.content)" ] } diff --git a/samples/memory/agentcore_memory_checkpointer.ipynb b/samples/memory/agentcore_memory_checkpointer.ipynb index 7d9b75c1..68d4bea6 100644 --- a/samples/memory/agentcore_memory_checkpointer.ipynb +++ b/samples/memory/agentcore_memory_checkpointer.ipynb @@ -74,7 +74,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "id": "226d094c-a05d-4f88-851d-cc42ff63ef11", "metadata": {}, "outputs": [], @@ -86,8 +86,8 @@ "# Initialize checkpointer for state persistence\n", "checkpointer = AgentCoreMemorySaver(MEMORY_ID, region_name=REGION)\n", "\n", - "# Initialize LLM\n", - "llm = init_chat_model(MODEL_ID, model_provider=\"bedrock_converse\", region_name=REGION)" + "# Initialize Bedrock LLM\n", + "model = init_chat_model(MODEL_ID, model_provider=\"bedrock_converse\", region_name=REGION)" ] }, { @@ -134,7 +134,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "a116a57f", "metadata": {}, "outputs": [ @@ -152,7 +152,7 @@ ], "source": [ "graph = create_agent(\n", - " llm,\n", + " model,\n", " tools=tools,\n", " prompt=\"You are a helpful assistant\",\n", " checkpointer=checkpointer,\n", diff --git a/samples/memory/agentcore_memory_checkpointer_human_loop.ipynb b/samples/memory/agentcore_memory_checkpointer_human_loop.ipynb index 56cc9111..71236209 100644 --- a/samples/memory/agentcore_memory_checkpointer_human_loop.ipynb +++ b/samples/memory/agentcore_memory_checkpointer_human_loop.ipynb @@ -69,7 +69,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "id": "fb67b4ee-ff3f-4576-8072-8885c2a47e11", "metadata": {}, "outputs": [], @@ -81,8 +81,8 @@ "# Initialize checkpointer for state persistence\n", "checkpointer = AgentCoreMemorySaver(MEMORY_ID, region_name=REGION)\n", "\n", - "# Initialize LLM\n", - "llm = init_chat_model(MODEL_ID, model_provider=\"bedrock_converse\", region_name=REGION)" + "# Initialize chat model\n", + "model = init_chat_model(MODEL_ID, model_provider=\"bedrock_converse\", region_name=REGION)" ] }, { @@ -134,7 +134,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "2cbc7289-6595-4b31-8472-47d2de00c4bd", "metadata": {}, "outputs": [ @@ -152,7 +152,7 @@ ], "source": [ "graph = create_agent(\n", - " llm,\n", + " model,\n", " tools=tools,\n", " prompt=\"You are a helpful assistant\",\n", " checkpointer=checkpointer,\n", @@ -191,10 +191,10 @@ "name": "stdout", "output_type": "stream", "text": [ - "================================\u001B[1m Human Message \u001B[0m=================================\n", + "================================\u001b[1m Human Message \u001b[0m=================================\n", "\n", "I would like to work with a customer service human agent.\n", - "==================================\u001B[1m Ai Message \u001B[0m==================================\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", "\n", "[{'type': 'text', 'text': \"I understand you'd like to connect with a customer service human agent. I can help you with that by requesting human assistance.\"}, {'type': 'tool_use', 'name': 'human_assistance', 'input': {'query': 'Customer requesting to speak with a human customer service agent.'}, 'id': 'tooluse_tFutleh5RK-mGOvg6YQwYw'}]\n", "Tool Calls:\n", @@ -271,7 +271,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "==================================\u001B[1m Ai Message \u001B[0m==================================\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", "\n", "[{'type': 'text', 'text': \"I understand you'd like to connect with a customer service human agent. I can help you with that by requesting human assistance.\"}, {'type': 'tool_use', 'name': 'human_assistance', 'input': {'query': 'Customer requesting to speak with a human customer service agent.'}, 'id': 'tooluse_tFutleh5RK-mGOvg6YQwYw'}]\n", "Tool Calls:\n", @@ -279,11 +279,11 @@ " Call ID: tooluse_tFutleh5RK-mGOvg6YQwYw\n", " Args:\n", " query: Customer requesting to speak with a human customer service agent.\n", - "=================================\u001B[1m Tool Message \u001B[0m=================================\n", + "=================================\u001b[1m Tool Message \u001b[0m=================================\n", "Name: human_assistance\n", "\n", "I'm sorry to hear that you are frustrated. Looking at the past conversation history, I can see that you've requested a refund. I've gone ahead and credited it to your account.\n", - "==================================\u001B[1m Ai Message \u001B[0m==================================\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", "\n", "A human customer service agent has responded to your request and provided the following information:\n", "\n", diff --git a/samples/memory/agentcore_memory_store_long_term_search.ipynb b/samples/memory/agentcore_memory_store_long_term_search.ipynb index 1fc8f3ed..2c67310a 100644 --- a/samples/memory/agentcore_memory_store_long_term_search.ipynb +++ b/samples/memory/agentcore_memory_store_long_term_search.ipynb @@ -79,7 +79,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -91,7 +91,7 @@ "store = AgentCoreMemoryStore(memory_id=MEMORY_ID, region_name=REGION)\n", "\n", "# Initialize Bedrock LLM\n", - "llm = init_chat_model(MODEL_ID, model_provider=\"bedrock_converse\", region_name=REGION)" + "model = init_chat_model(MODEL_ID, model_provider=\"bedrock_converse\", region_name=REGION)" ] }, { @@ -174,12 +174,12 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "graph = create_react_agent(\n", - " llm,\n", + " model,\n", " store=store,\n", " tools=[],\n", " checkpointer=InMemorySaver(),\n", diff --git a/samples/models/getting_started_with_nova.ipynb b/samples/models/getting_started_with_nova.ipynb index 44e6ad61..7052bb8f 100644 --- a/samples/models/getting_started_with_nova.ipynb +++ b/samples/models/getting_started_with_nova.ipynb @@ -60,7 +60,7 @@ "from langchain_aws import ChatBedrock\n", "from langchain_core.messages import HumanMessage\n", "\n", - "llm = ChatBedrock(\n", + "model = ChatBedrock(\n", " model_id=\"us.amazon.nova-lite-v1:0\", model_kwargs=dict(temperature=0.7)\n", ")\n", "\n", @@ -69,7 +69,7 @@ " (\"user\", \"Teardrops on My Guitar\"),\n", "]\n", "\n", - "response = llm.invoke(messages)\n", + "response = model.invoke(messages)\n", "print(f\"Request ID: {response.id}\")\n", "response.pretty_print()\n", "\n", @@ -81,7 +81,7 @@ " HumanMessage(content=\"Select your favorite and tell me why\"),\n", "]\n", "\n", - "response = llm.invoke(multi_turn_messages)\n", + "response = model.invoke(multi_turn_messages)\n", "print(f\"\\n\\nRequest ID: {response.id}\")\n", "response.pretty_print()" ] @@ -105,11 +105,11 @@ "from langchain_core.messages import HumanMessage, SystemMessage\n", "from langchain_core.output_parsers import StrOutputParser\n", "\n", - "llm = ChatBedrock(\n", + "model = ChatBedrock(\n", " model_id=\"us.amazon.nova-lite-v1:0\", model_kwargs=dict(temperature=0.7)\n", ")\n", "\n", - "chain = llm | StrOutputParser()\n", + "chain = model | StrOutputParser()\n", "\n", "messages = [\n", " SystemMessage(content=\"You are an author with experience writing creative novels\"),\n", @@ -173,7 +173,7 @@ "\n", "tools = [multiply]\n", "\n", - "llm_with_tools = ChatBedrock(\n", + "model_with_tools = ChatBedrock(\n", " model_id=\"us.amazon.nova-lite-v1:0\",\n", " model_kwargs=dict(\n", " temperature=1,\n", @@ -182,7 +182,7 @@ " ),\n", ").bind_tools(tools)\n", "\n", - "response = llm_with_tools.invoke([(\"user\", \"What is 8*8\")])\n", + "response = model_with_tools.invoke([(\"user\", \"What is 8*8\")])\n", "\n", "print(\"[Model Response]\\n\")\n", "print(response.content)\n", @@ -247,7 +247,7 @@ "\n", "tools = [multiply]\n", "\n", - "llm_with_tools = ChatBedrock(\n", + "model_with_tools = ChatBedrock(\n", " model_id=\"us.amazon.nova-lite-v1:0\",\n", " model_kwargs=dict(\n", " temperature=1,\n", @@ -268,7 +268,7 @@ " ]\n", ")\n", "\n", - "agent = create_tool_calling_agent(llm, tools, prompt)\n", + "agent = create_tool_calling_agent(model, tools, prompt)\n", "\n", "agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)\n", "\n", @@ -314,7 +314,7 @@ " punchline: str = Field(description=\"The punchline to the joke\")\n", "\n", "\n", - "llm = ChatBedrock(\n", + "model = ChatBedrock(\n", " model=\"us.amazon.nova-lite-v1:0\",\n", " model_kwargs=dict(\n", " temperature=1,\n", @@ -323,8 +323,8 @@ " ),\n", ")\n", "\n", - "structured_llm = llm.with_structured_output(Joke)\n", - "structured_llm.invoke(\"Tell me a joke about cats\")" + "structured_model = model.with_structured_output(Joke)\n", + "structured_model.invoke(\"Tell me a joke about cats\")" ] } ],