From 403075ea8906036a16dbc87307bd425dc03d9c00 Mon Sep 17 00:00:00 2001 From: Nick Clegg Date: Fri, 27 Jun 2025 18:28:07 +0000 Subject: [PATCH] fix: Fix docs warnings --- src/strands/agent/agent.py | 4 ++-- .../null_conversation_manager.py | 4 ++-- src/strands/models/anthropic.py | 4 ++-- src/strands/models/bedrock.py | 4 ++-- src/strands/models/litellm.py | 4 ++-- src/strands/models/llamaapi.py | 4 ++-- src/strands/models/ollama.py | 4 ++-- src/strands/models/openai.py | 4 ++-- src/strands/tools/decorator.py | 17 ++++++++--------- src/strands/types/models/model.py | 4 ++-- src/strands/types/models/openai.py | 4 ++-- 11 files changed, 28 insertions(+), 29 deletions(-) diff --git a/src/strands/agent/agent.py b/src/strands/agent/agent.py index 878cbefcd..460cf5567 100644 --- a/src/strands/agent/agent.py +++ b/src/strands/agent/agent.py @@ -410,9 +410,9 @@ def structured_output(self, output_model: Type[T], prompt: Optional[str] = None) instruct the model to output the structured data. Args: - output_model(Type[BaseModel]): The output model (a JSON schema written as a Pydantic BaseModel) + output_model: The output model (a JSON schema written as a Pydantic BaseModel) that the agent will use when responding. - prompt(Optional[str]): The prompt to use for the agent. + prompt: The prompt to use for the agent. """ messages = self.messages if not messages and not prompt: diff --git a/src/strands/agent/conversation_manager/null_conversation_manager.py b/src/strands/agent/conversation_manager/null_conversation_manager.py index 4af4eb788..cfd5562d5 100644 --- a/src/strands/agent/conversation_manager/null_conversation_manager.py +++ b/src/strands/agent/conversation_manager/null_conversation_manager.py @@ -23,7 +23,7 @@ def apply_management(self, _agent: "Agent") -> None: """Does nothing to the conversation history. Args: - agent: The agent whose conversation history will remain unmodified. + _agent: The agent whose conversation history will remain unmodified. """ pass @@ -31,7 +31,7 @@ def reduce_context(self, _agent: "Agent", e: Optional[Exception] = None) -> None """Does not reduce context and raises an exception. Args: - agent: The agent whose conversation history will remain unmodified. + _agent: The agent whose conversation history will remain unmodified. e: The exception that triggered the context reduction, if any. Raises: diff --git a/src/strands/models/anthropic.py b/src/strands/models/anthropic.py index d70ed0c8c..e91cd4422 100644 --- a/src/strands/models/anthropic.py +++ b/src/strands/models/anthropic.py @@ -382,8 +382,8 @@ def structured_output( """Get structured output from the model. Args: - output_model(Type[BaseModel]): The output model to use for the agent. - prompt(Messages): The prompt messages to use for the agent. + output_model: The output model to use for the agent. + prompt: The prompt messages to use for the agent. Yields: Model events with the last being the structured output. diff --git a/src/strands/models/bedrock.py b/src/strands/models/bedrock.py index 31d57fd61..c9b753ec7 100644 --- a/src/strands/models/bedrock.py +++ b/src/strands/models/bedrock.py @@ -525,8 +525,8 @@ def structured_output( """Get structured output from the model. Args: - output_model(Type[BaseModel]): The output model to use for the agent. - prompt(Messages): The prompt messages to use for the agent. + output_model: The output model to use for the agent. + prompt: The prompt messages to use for the agent. Yields: Model events with the last being the structured output. diff --git a/src/strands/models/litellm.py b/src/strands/models/litellm.py index 8c5020637..691887b54 100644 --- a/src/strands/models/litellm.py +++ b/src/strands/models/litellm.py @@ -110,8 +110,8 @@ def structured_output( """Get structured output from the model. Args: - output_model(Type[BaseModel]): The output model to use for the agent. - prompt(Messages): The prompt messages to use for the agent. + output_model: The output model to use for the agent. + prompt: The prompt messages to use for the agent. Yields: Model events with the last being the structured output. diff --git a/src/strands/models/llamaapi.py b/src/strands/models/llamaapi.py index 751b1b1b7..74c098e36 100644 --- a/src/strands/models/llamaapi.py +++ b/src/strands/models/llamaapi.py @@ -395,8 +395,8 @@ def structured_output( """Get structured output from the model. Args: - output_model(Type[BaseModel]): The output model to use for the agent. - prompt(Messages): The prompt messages to use for the agent. + output_model: The output model to use for the agent. + prompt: The prompt messages to use for the agent. Yields: Model events with the last being the structured output. diff --git a/src/strands/models/ollama.py b/src/strands/models/ollama.py index 431b1f45b..1c834bf6e 100644 --- a/src/strands/models/ollama.py +++ b/src/strands/models/ollama.py @@ -321,8 +321,8 @@ def structured_output( """Get structured output from the model. Args: - output_model(Type[BaseModel]): The output model to use for the agent. - prompt(Messages): The prompt messages to use for the agent. + output_model: The output model to use for the agent. + prompt: The prompt messages to use for the agent. Yields: Model events with the last being the structured output. diff --git a/src/strands/models/openai.py b/src/strands/models/openai.py index 6f761c7c2..eb58ae41c 100644 --- a/src/strands/models/openai.py +++ b/src/strands/models/openai.py @@ -145,8 +145,8 @@ def structured_output( """Get structured output from the model. Args: - output_model(Type[BaseModel]): The output model to use for the agent. - prompt(Messages): The prompt messages to use for the agent. + output_model: The output model to use for the agent. + prompt: The prompt messages to use for the agent. Yields: Model events with the last being the structured output. diff --git a/src/strands/tools/decorator.py b/src/strands/tools/decorator.py index 33aa3dd6d..46a6320ad 100644 --- a/src/strands/tools/decorator.py +++ b/src/strands/tools/decorator.py @@ -530,15 +530,14 @@ def tool( # type: ignore ```python @tool def my_tool(name: str, count: int = 1) -> str: - '''Does something useful with the provided parameters. - - Args: - name: The name to process - count: Number of times to process (default: 1) - - Returns: - A message with the result - ''' + # Does something useful with the provided parameters. + # + # Parameters: + # name: The name to process + # count: Number of times to process (default: 1) + # + # Returns: + # A message with the result return f"Processed {name} {count} times" agent = Agent(tools=[my_tool]) diff --git a/src/strands/types/models/model.py b/src/strands/types/models/model.py index 0a289cf53..930217a12 100644 --- a/src/strands/types/models/model.py +++ b/src/strands/types/models/model.py @@ -50,8 +50,8 @@ def structured_output( """Get structured output from the model. Args: - output_model(Type[BaseModel]): The output model to use for the agent. - prompt(Messages): The prompt messages to use for the agent. + output_model: The output model to use for the agent. + prompt: The prompt messages to use for the agent. Yields: Model events with the last being the structured output. diff --git a/src/strands/types/models/openai.py b/src/strands/types/models/openai.py index bfd5e5d1e..25830bc39 100644 --- a/src/strands/types/models/openai.py +++ b/src/strands/types/models/openai.py @@ -303,8 +303,8 @@ def structured_output( """Get structured output from the model. Args: - output_model(Type[BaseModel]): The output model to use for the agent. - prompt(Messages): The prompt to use for the agent. + output_model: The output model to use for the agent. + prompt: The prompt to use for the agent. Yields: Model events with the last being the structured output.