Skip to content

Fix #968 by upgrading openai package to the latest #1034

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 12, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ requires-python = ">=3.9"
license = "MIT"
authors = [{ name = "OpenAI", email = "[email protected]" }]
dependencies = [
"openai>=1.87.0",
"openai>=1.93.1, <2",
"pydantic>=2.10, <3",
"griffe>=1.5.6, <2",
"typing-extensions>=4.12.2, <5",
Expand Down
7 changes: 5 additions & 2 deletions src/agents/model_settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,14 @@ def validate_from_none(value: None) -> _Omit:
serialization=core_schema.plain_serializer_function_ser_schema(lambda instance: None),
)

@dataclass
class MCPToolChoice:
server_label: str
name: str

Omit = Annotated[_Omit, _OmitTypeAnnotation]
Headers: TypeAlias = Mapping[str, Union[str, Omit]]
ToolChoice: TypeAlias = Union[Literal["auto", "required", "none"], str, None]

ToolChoice: TypeAlias = Union[Literal["auto", "required", "none"], str, MCPToolChoice, None]

@dataclass
class ModelSettings:
Expand Down
5 changes: 4 additions & 1 deletion src/agents/models/chatcmpl_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,17 +44,20 @@
from ..exceptions import AgentsException, UserError
from ..handoffs import Handoff
from ..items import TResponseInputItem, TResponseOutputItem
from ..model_settings import MCPToolChoice
from ..tool import FunctionTool, Tool
from .fake_id import FAKE_RESPONSES_ID


class Converter:
@classmethod
def convert_tool_choice(
cls, tool_choice: Literal["auto", "required", "none"] | str | None
cls, tool_choice: Literal["auto", "required", "none"] | str | MCPToolChoice | None
) -> ChatCompletionToolChoiceOptionParam | NotGiven:
if tool_choice is None:
return NOT_GIVEN
elif isinstance(tool_choice, MCPToolChoice):
raise UserError("MCPToolChoice is not supported for Chat Completions models")
elif tool_choice == "auto":
return "auto"
elif tool_choice == "required":
Expand Down
15 changes: 11 additions & 4 deletions src/agents/models/openai_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from ..handoffs import Handoff
from ..items import ItemHelpers, ModelResponse, TResponseInputItem
from ..logger import logger
from ..model_settings import MCPToolChoice
from ..tool import (
CodeInterpreterTool,
ComputerTool,
Expand Down Expand Up @@ -303,10 +304,16 @@ class ConvertedTools:
class Converter:
@classmethod
def convert_tool_choice(
cls, tool_choice: Literal["auto", "required", "none"] | str | None
cls, tool_choice: Literal["auto", "required", "none"] | str | MCPToolChoice | None
) -> response_create_params.ToolChoice | NotGiven:
if tool_choice is None:
return NOT_GIVEN
elif isinstance(tool_choice, MCPToolChoice):
return {
"server_label": tool_choice.server_label,
"type": "mcp",
"name": tool_choice.name,
}
elif tool_choice == "required":
return "required"
elif tool_choice == "auto":
Expand Down Expand Up @@ -334,9 +341,9 @@ def convert_tool_choice(
"type": "code_interpreter",
}
elif tool_choice == "mcp":
return {
"type": "mcp",
}
# Note that this is still here for backwards compatibility,
# but migrating to MCPToolChoice is recommended.
return { "type": "mcp" } # type: ignore [typeddict-item]
else:
return {
"type": "function",
Expand Down
13 changes: 12 additions & 1 deletion tests/model_settings/test_serialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from pydantic import TypeAdapter
from pydantic_core import to_json

from agents.model_settings import ModelSettings
from agents.model_settings import MCPToolChoice, ModelSettings


def verify_serialization(model_settings: ModelSettings) -> None:
Expand All @@ -29,6 +29,17 @@ def test_basic_serialization() -> None:
verify_serialization(model_settings)


def test_mcp_tool_choice_serialization() -> None:
"""Tests whether ModelSettings with MCPToolChoice can be serialized to a JSON string."""
# First, lets create a ModelSettings instance
model_settings = ModelSettings(
temperature=0.5,
tool_choice=MCPToolChoice(server_label="mcp", name="mcp_tool"),
)
# Now, lets serialize the ModelSettings instance to a JSON string
verify_serialization(model_settings)


def test_all_fields_serialization() -> None:
"""Tests whether ModelSettings can be serialized to a JSON string."""

Expand Down
13 changes: 11 additions & 2 deletions tests/test_items_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,10 @@
)
from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
from openai.types.responses.response_function_tool_call_param import ResponseFunctionToolCallParam
from openai.types.responses.response_function_web_search import ResponseFunctionWebSearch
from openai.types.responses.response_function_web_search import (
ActionSearch,
ResponseFunctionWebSearch,
)
from openai.types.responses.response_function_web_search_param import ResponseFunctionWebSearchParam
from openai.types.responses.response_output_message import ResponseOutputMessage
from openai.types.responses.response_output_message_param import ResponseOutputMessageParam
Expand Down Expand Up @@ -225,14 +228,20 @@ def test_to_input_items_for_file_search_call() -> None:

def test_to_input_items_for_web_search_call() -> None:
"""A web search tool call output should produce the same dict as a web search input."""
ws_call = ResponseFunctionWebSearch(id="w1", status="completed", type="web_search_call")
ws_call = ResponseFunctionWebSearch(
id="w1",
action=ActionSearch(type="search", query="query"),
status="completed",
type="web_search_call",
)
resp = ModelResponse(output=[ws_call], usage=Usage(), response_id=None)
input_items = resp.to_input_items()
assert isinstance(input_items, list) and len(input_items) == 1
expected: ResponseFunctionWebSearchParam = {
"id": "w1",
"status": "completed",
"type": "web_search_call",
"action": {"type": "search", "query": "query"},
}
assert input_items[0] == expected

Expand Down
8 changes: 7 additions & 1 deletion tests/test_run_step_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
ResponseFunctionWebSearch,
)
from openai.types.responses.response_computer_tool_call import ActionClick
from openai.types.responses.response_function_web_search import ActionSearch
from openai.types.responses.response_reasoning_item import ResponseReasoningItem, Summary
from pydantic import BaseModel

Expand Down Expand Up @@ -306,7 +307,12 @@ async def test_file_search_tool_call_parsed_correctly():
@pytest.mark.asyncio
async def test_function_web_search_tool_call_parsed_correctly():
agent = Agent(name="test")
web_search_call = ResponseFunctionWebSearch(id="w1", status="completed", type="web_search_call")
web_search_call = ResponseFunctionWebSearch(
id="w1",
action=ActionSearch(type="search", query="query"),
status="completed",
type="web_search_call",
)
response = ModelResponse(
output=[get_text_message("hello"), web_search_call],
usage=Usage(),
Expand Down
8 changes: 4 additions & 4 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.