Skip to content

Commit fc5be63

Browse files
fix: resolve type errors in OpenRouter error response handling
1 parent 67dfea1 commit fc5be63

File tree

2 files changed

+82
-9
lines changed

2 files changed

+82
-9
lines changed

pydantic_ai_slim/pydantic_ai/models/_openai_compat.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from collections.abc import AsyncIterable, AsyncIterator, Callable, Mapping
1010
from dataclasses import dataclass, field, replace
1111
from datetime import datetime
12-
from typing import Any, Literal, overload
12+
from typing import Any, Literal, cast, overload
1313

1414
from pydantic import ValidationError
1515
from typing_extensions import assert_never
@@ -27,7 +27,7 @@
2727
ToolCallPart,
2828
)
2929

30-
from .. import UnexpectedModelBehavior, _utils, usage
30+
from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
3131
from .._output import OutputObjectDefinition
3232
from .._thinking_part import split_content_into_text_and_thinking
3333
from .._utils import guard_tool_call_id as _guard_tool_call_id, now_utc as _now_utc, number_to_datetime
@@ -277,6 +277,18 @@ def process_response(
277277
if not isinstance(response, chat.ChatCompletion):
278278
raise UnexpectedModelBehavior('Invalid response from OpenAI chat completions endpoint, expected JSON data')
279279

280+
if hasattr(response, 'error'):
281+
error_attr = getattr(response, 'error', None)
282+
if error_attr and isinstance(error_attr, dict):
283+
error_dict = cast(dict[str, Any], error_attr)
284+
error_code = error_dict.get('code')
285+
status_code = error_code if isinstance(error_code, int) else 500
286+
raise ModelHTTPError(
287+
status_code=status_code,
288+
model_name=getattr(model, 'model_name', 'unknown'),
289+
body={'error': error_dict},
290+
)
291+
280292
if response.created:
281293
timestamp = number_to_datetime(response.created)
282294
else:

tests/models/test_openrouter.py

Lines changed: 68 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,13 @@
11
import json
22
import os
33
from typing import Any, Literal, cast
4-
from unittest.mock import patch
4+
from unittest.mock import AsyncMock, MagicMock, patch
55

66
import pydantic_core
77
import pytest
88
from inline_snapshot import snapshot
99

10-
from pydantic_ai import Agent
10+
from pydantic_ai import Agent, ModelHTTPError
1111
from pydantic_ai.messages import (
1212
ImageUrl,
1313
ModelMessage,
@@ -31,6 +31,7 @@
3131

3232
with try_import() as imports_successful:
3333
from openai.types.chat import (
34+
ChatCompletion,
3435
ChatCompletionChunk,
3536
ChatCompletionMessage,
3637
)
@@ -93,8 +94,6 @@ def chunk(choices: list[ChunkChoice]) -> ChatCompletionChunk:
9394
def test_openrouter_model_init():
9495
c = completion_message(ChatCompletionMessage(content='test', role='assistant'))
9596
mock_client = MockOpenAI.create_mock(c)
96-
from pydantic_ai.providers.openrouter import OpenRouterProvider
97-
9897
provider = OpenRouterProvider(openai_client=mock_client)
9998
model = OpenRouterModel('google/gemini-2.5-flash-lite', provider=provider)
10099
assert model.model_name == 'google/gemini-2.5-flash-lite'
@@ -418,9 +417,6 @@ async def test_openrouter_with_reasoning_settings(allow_model_requests: None):
418417

419418
async def test_openrouter_model_custom_base_url(allow_model_requests: None):
420419
"""Test OpenRouterModel with provider."""
421-
# Test with provider using default base URL
422-
from pydantic_ai.providers.openrouter import OpenRouterProvider
423-
424420
provider = OpenRouterProvider(api_key='test-key')
425421
model = OpenRouterModel('openai/gpt-4o', provider=provider)
426422
assert model.model_name == 'openai/gpt-4o'
@@ -573,3 +569,68 @@ async def test_openrouter_user_prompt_mixed_content(allow_model_requests: None):
573569

574570
assert result['role'] == 'user'
575571
assert result['content'] == 'Hello, here is an image: and some more text.'
572+
573+
574+
async def test_openrouter_error_response_with_error_key(allow_model_requests: None):
575+
"""Test that OpenRouter error responses with 'error' key are properly handled.
576+
577+
Regression test for issue #2323 where OpenRouter returns HTTP 200 with an error
578+
object in the body (e.g., from upstream provider failures like Chutes).
579+
"""
580+
with patch('pydantic_ai.models.openrouter.OpenRouterProvider') as mock_provider_class:
581+
mock_provider = MagicMock()
582+
mock_provider.client = AsyncMock()
583+
mock_provider.model_profile = MagicMock(return_value=MagicMock())
584+
mock_provider_class.return_value = mock_provider
585+
586+
model = OpenRouterModel('deepseek/deepseek-chat-v3-0324:free')
587+
588+
error_response = ChatCompletion.model_construct(
589+
id='error-response',
590+
choices=[],
591+
created=1234567890,
592+
model='deepseek/deepseek-chat-v3-0324:free',
593+
object='chat.completion',
594+
)
595+
setattr(error_response, 'error', {'message': 'Internal Server Error', 'code': 500})
596+
597+
with patch('pydantic_ai.models.openrouter.completions_create', new_callable=AsyncMock) as mock_create:
598+
mock_create.return_value = error_response
599+
600+
messages: list[ModelMessage] = [ModelRequest(parts=[UserPromptPart(content='test')])]
601+
model_params = cast(ModelRequestParameters, {})
602+
603+
with pytest.raises(ModelHTTPError, match='status_code: 500.*Internal Server Error'):
604+
await model.request(messages, None, model_params)
605+
606+
607+
async def test_openrouter_error_response_with_none_error(allow_model_requests: None):
608+
"""Test that responses with error=None are handled gracefully."""
609+
c = completion_message(ChatCompletionMessage(content='Success', role='assistant'))
610+
setattr(c, 'error', None)
611+
612+
mock_client = MockOpenAI.create_mock(c)
613+
model = create_openrouter_model('openai/gpt-4o', mock_client)
614+
615+
messages: list[ModelMessage] = [ModelRequest([UserPromptPart(content='test')])]
616+
model_request_parameters = ModelRequestParameters(function_tools=[], output_tools=[], allow_text_output=True)
617+
618+
response = await model.request(messages, None, model_request_parameters)
619+
assert isinstance(response.parts[0], TextPart)
620+
assert response.parts[0].content == 'Success'
621+
622+
623+
async def test_openrouter_error_response_with_non_dict_error(allow_model_requests: None):
624+
"""Test that responses with non-dict error values are handled gracefully."""
625+
c = completion_message(ChatCompletionMessage(content='Success', role='assistant'))
626+
setattr(c, 'error', 'some error string')
627+
628+
mock_client = MockOpenAI.create_mock(c)
629+
model = create_openrouter_model('openai/gpt-4o', mock_client)
630+
631+
messages: list[ModelMessage] = [ModelRequest([UserPromptPart(content='test')])]
632+
model_request_parameters = ModelRequestParameters(function_tools=[], output_tools=[], allow_text_output=True)
633+
634+
response = await model.request(messages, None, model_request_parameters)
635+
assert isinstance(response.parts[0], TextPart)
636+
assert response.parts[0].content == 'Success'

0 commit comments

Comments
 (0)