Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,16 +48,15 @@ pip install langgraph-checkpoint-aws
Here's a simple example of how to use the `langchain-aws` package.

```python
from langchain_aws import ChatBedrock
from langchain_aws import ChatBedrockConverse

# Initialize the Bedrock chat model
llm = ChatBedrock(
model="anthropic.claude-3-sonnet-20240229-v1:0",
beta_use_converse_api=True
model = ChatBedrockConverse(
model="us.anthropic.claude-sonnet-4-5-20250929-v1:0"
)

# Invoke the llm
response = llm.invoke("Hello! How are you today?")
# Invoke the model
response = model.invoke("Hello! How are you today?")
print(response)
```

Expand Down
8 changes: 4 additions & 4 deletions libs/aws/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@ Alternatively, set the `AWS_BEARER_TOKEN_BEDROCK` environment variable locally f

## Chat Models

`ChatBedrock` class exposes chat models from Bedrock.
`ChatBedrockConverse` class exposes chat models from Bedrock.

```python
from langchain_aws import ChatBedrock
from langchain_aws import ChatBedrockConverse

llm = ChatBedrock()
llm.invoke("Sing a ballad of LangChain.")
model = ChatBedrockConverse()
model.invoke("Sing a ballad of LangChain.")
```

## Embeddings
Expand Down
4 changes: 2 additions & 2 deletions libs/aws/langchain_aws/chains/graph_qa/neptune_cypher.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,13 +106,13 @@ def create_neptune_opencypher_qa_chain(
See https://python.langchain.com/docs/security for more information.

Example:
.. code-block:: python

```python
chain = create_neptune_opencypher_qa_chain(
llm=llm,
graph=graph
)
response = chain.invoke({"query": "your_query_here"})
```

"""

Expand Down
4 changes: 2 additions & 2 deletions libs/aws/langchain_aws/chains/graph_qa/neptune_sparql.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,13 +80,13 @@ def create_neptune_sparql_qa_chain(
See https://python.langchain.com/docs/security for more information.

Example:
.. code-block:: python

```python
chain = create_neptune_sparql_qa_chain(
llm=llm,
graph=graph
)
response = chain.invoke({"query": "your_query_here"})
```

"""
if allow_dangerous_requests is not True:
Expand Down
138 changes: 69 additions & 69 deletions libs/aws/langchain_aws/chat_models/bedrock.py
Original file line number Diff line number Diff line change
Expand Up @@ -761,11 +761,11 @@ class ChatBedrock(BaseChatModel, BedrockBase):

system_prompt_with_tools: str = ""
beta_use_converse_api: bool = False
"""Use the new Bedrock ``converse`` API which provides a standardized interface to
"""Use the new Bedrock `converse` API which provides a standardized interface to
all Bedrock models. Support still in beta. See ChatBedrockConverse docs for more."""

stop_sequences: Optional[List[str]] = Field(default=None, alias="stop")
"""Stop sequence inference parameter from new Bedrock ``converse`` API providing
"""Stop sequence inference parameter from new Bedrock `converse` API providing
a sequence of characters that causes a model to stop generating a response. See
https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_InferenceConfiguration.html
for more.
Expand Down Expand Up @@ -1216,7 +1216,7 @@ def bind_tools(
(if any), or a dict of the form:
{"type": "function", "function": {"name": <<tool_name>>}}.
**kwargs: Any additional parameters to pass to the
:class:`~langchain.runnable.Runnable` constructor.
[Runnable][langchain_core.runnables.Runnable] constructor.

"""
if self.beta_use_converse_api:
Expand Down Expand Up @@ -1315,81 +1315,81 @@ def with_structured_output(
outputs a BaseModel.

Example: Pydantic schema (include_raw=False):
.. code-block:: python
```python
from langchain_aws.chat_models.bedrock import ChatBedrock
from pydantic import BaseModel

from langchain_aws.chat_models.bedrock import ChatBedrock
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str

class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
llm = ChatBedrock(
model_id="anthropic.claude-3-sonnet-20240229-v1:0",
model_kwargs={"temperature": 0.001},
) # type: ignore[call-arg]
structured_model = model.with_structured_output(AnswerWithJustification)

llm =ChatBedrock(
model_id="anthropic.claude-3-sonnet-20240229-v1:0",
model_kwargs={"temperature": 0.001},
) # type: ignore[call-arg]
structured_llm = llm.with_structured_output(AnswerWithJustification)
structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")

structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")

# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
```

Example: Pydantic schema (include_raw=True):
.. code-block:: python

from langchain_aws.chat_models.bedrock import ChatBedrock
from pydantic import BaseModel

class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str

llm =ChatBedrock(
model_id="anthropic.claude-3-sonnet-20240229-v1:0",
model_kwargs={"temperature": 0.001},
) # type: ignore[call-arg]
structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True)

structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
# 'parsing_error': None
# }
```python
from langchain_aws.chat_models.bedrock import ChatBedrock
from pydantic import BaseModel

class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str

model = ChatBedrock(
model_id="anthropic.claude-3-sonnet-20240229-v1:0",
model_kwargs={"temperature": 0.001},
) # type: ignore[call-arg]
structured_model = model.with_structured_output(AnswerWithJustification, include_raw=True)

structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
# 'parsing_error': None
# }
```

Example: Dict schema (include_raw=False):
.. code-block:: python

from langchain_aws.chat_models.bedrock import ChatBedrock

schema = {
"name": "AnswerWithJustification",
"description": "An answer to the user question along with justification for the answer.",
"input_schema": {
"type": "object",
"properties": {
"answer": {"type": "string"},
"justification": {"type": "string"},
},
"required": ["answer", "justification"]
}
```python
from langchain_aws.chat_models.bedrock import ChatBedrock

schema = {
"name": "AnswerWithJustification",
"description": "An answer to the user question along with justification for the answer.",
"input_schema": {
"type": "object",
"properties": {
"answer": {"type": "string"},
"justification": {"type": "string"},
},
"required": ["answer", "justification"]
}
llm =ChatBedrock(
model_id="anthropic.claude-3-sonnet-20240229-v1:0",
model_kwargs={"temperature": 0.001},
) # type: ignore[call-arg]
structured_llm = llm.with_structured_output(schema)

structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
}
model = ChatBedrock(
model_id="anthropic.claude-3-sonnet-20240229-v1:0",
model_kwargs={"temperature": 0.001},
) # type: ignore[call-arg]
structured_model = model.with_structured_output(schema)

structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
```

""" # noqa: E501
if self.beta_use_converse_api:
Expand Down
Loading