3939 ResponseReasoningItemParam ,
4040)
4141from openai .types .responses .response_input_param import FunctionCallOutput , ItemReference , Message
42- from openai .types .responses .response_reasoning_item import Summary
42+ from openai .types .responses .response_reasoning_item import Content , Summary
4343
4444from ..agent_output import AgentOutputSchemaBase
4545from ..exceptions import AgentsException , UserError
@@ -93,24 +93,38 @@ def convert_response_format(
9393 def message_to_output_items (cls , message : ChatCompletionMessage ) -> list [TResponseOutputItem ]:
9494 items : list [TResponseOutputItem ] = []
9595
96- # Handle reasoning content if available
96+ # Check if message is agents.extentions.models.litellm_model.InternalChatCompletionMessage
97+ # We can't actually import it here because litellm is an optional dependency
98+ # So we use hasattr to check for reasoning_content and thinking_blocks
9799 if hasattr (message , "reasoning_content" ) and message .reasoning_content :
98100 reasoning_item = ResponseReasoningItem (
99101 id = FAKE_RESPONSES_ID ,
100102 summary = [Summary (text = message .reasoning_content , type = "summary_text" )],
101103 type = "reasoning" ,
102104 )
103105
104- # Store full thinking blocks for Anthropic compatibility
106+ # Store thinking blocks for Anthropic compatibility
105107 if hasattr (message , "thinking_blocks" ) and message .thinking_blocks :
106- # Store thinking blocks in the reasoning item's content
107- # Convert thinking blocks to Content objects
108- from openai .types .responses .response_reasoning_item import Content
109-
110- reasoning_item .content = [
111- Content (text = str (block .get ("thinking" , "" )), type = "reasoning_text" )
112- for block in message .thinking_blocks
113- ]
108+ # Store thinking text in content and signature in encrypted_content
109+ reasoning_item .content = []
110+ signature = None
111+ for block in message .thinking_blocks :
112+ if isinstance (block , dict ):
113+ thinking_text = block .get ("thinking" , "" )
114+ if thinking_text :
115+ reasoning_item .content .append (
116+ Content (text = thinking_text , type = "reasoning_text" )
117+ )
118+ # Store the signature if present
119+ if block .get ("signature" ):
120+ signature = block .get ("signature" )
121+
122+ # Store only the last signature in encrypted_content
123+ # If there are multiple thinking blocks, this should be a problem.
124+ # In practice, there should only be one signature for the entire reasoning step.
125+ # Tested with: claude-sonnet-4-20250514
126+ if signature :
127+ reasoning_item .encrypted_content = signature
114128
115129 items .append (reasoning_item )
116130
@@ -325,6 +339,7 @@ def items_to_messages(
325339
326340 result : list [ChatCompletionMessageParam ] = []
327341 current_assistant_msg : ChatCompletionAssistantMessageParam | None = None
342+ pending_thinking_blocks : list [dict [str , str ]] | None = None
328343
329344 def flush_assistant_message () -> None :
330345 nonlocal current_assistant_msg
@@ -336,10 +351,17 @@ def flush_assistant_message() -> None:
336351 current_assistant_msg = None
337352
338353 def ensure_assistant_message () -> ChatCompletionAssistantMessageParam :
339- nonlocal current_assistant_msg
354+ nonlocal current_assistant_msg , pending_thinking_blocks
340355 if current_assistant_msg is None :
341356 current_assistant_msg = ChatCompletionAssistantMessageParam (role = "assistant" )
342357 current_assistant_msg ["tool_calls" ] = []
358+
359+ # If we have pending thinking blocks, use them as the content
360+ # This is required for Anthropic API tool calls with interleaved thinking
361+ if pending_thinking_blocks :
362+ current_assistant_msg ["content" ] = pending_thinking_blocks # type: ignore
363+ pending_thinking_blocks = None # Clear after using
364+
343365 return current_assistant_msg
344366
345367 for item in items :
@@ -483,9 +505,28 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
483505 f"Encountered an item_reference, which is not supported: { item_ref } "
484506 )
485507
486- # 7) reasoning message => not handled
487- elif cls .maybe_reasoning_message (item ):
488- pass
508+ # 7) reasoning message => extract thinking blocks if present
509+ elif reasoning_item := cls .maybe_reasoning_message (item ):
510+ # Reconstruct thinking blocks from content (text) and encrypted_content (signature)
511+ content_items = reasoning_item .get ("content" , [])
512+ signature = reasoning_item .get ("encrypted_content" )
513+
514+ if content_items :
515+ # Reconstruct thinking blocks from content and signature
516+ pending_thinking_blocks = []
517+ for content_item in content_items :
518+ if (
519+ isinstance (content_item , dict )
520+ and content_item .get ("type" ) == "reasoning_text"
521+ ):
522+ thinking_block = {
523+ "type" : "thinking" ,
524+ "thinking" : content_item .get ("text" , "" ),
525+ }
526+ # Add signature if available
527+ if signature :
528+ thinking_block ["signature" ] = signature
529+ pending_thinking_blocks .append (thinking_block )
489530
490531 # 8) If we haven't recognized it => fail or ignore
491532 else :
0 commit comments