|
1 | 1 | import json |
2 | 2 | import os |
3 | 3 | from typing import Any, Literal, cast |
4 | | -from unittest.mock import patch |
| 4 | +from unittest.mock import AsyncMock, MagicMock, patch |
5 | 5 |
|
6 | 6 | import pydantic_core |
7 | 7 | import pytest |
8 | 8 | from inline_snapshot import snapshot |
9 | 9 |
|
10 | | -from pydantic_ai import Agent |
| 10 | +from pydantic_ai import Agent, ModelHTTPError |
11 | 11 | from pydantic_ai.messages import ( |
12 | 12 | ImageUrl, |
13 | 13 | ModelMessage, |
|
31 | 31 |
|
32 | 32 | with try_import() as imports_successful: |
33 | 33 | from openai.types.chat import ( |
| 34 | + ChatCompletion, |
34 | 35 | ChatCompletionChunk, |
35 | 36 | ChatCompletionMessage, |
36 | 37 | ) |
@@ -93,8 +94,6 @@ def chunk(choices: list[ChunkChoice]) -> ChatCompletionChunk: |
93 | 94 | def test_openrouter_model_init(): |
94 | 95 | c = completion_message(ChatCompletionMessage(content='test', role='assistant')) |
95 | 96 | mock_client = MockOpenAI.create_mock(c) |
96 | | - from pydantic_ai.providers.openrouter import OpenRouterProvider |
97 | | - |
98 | 97 | provider = OpenRouterProvider(openai_client=mock_client) |
99 | 98 | model = OpenRouterModel('google/gemini-2.5-flash-lite', provider=provider) |
100 | 99 | assert model.model_name == 'google/gemini-2.5-flash-lite' |
@@ -418,9 +417,6 @@ async def test_openrouter_with_reasoning_settings(allow_model_requests: None): |
418 | 417 |
|
419 | 418 | async def test_openrouter_model_custom_base_url(allow_model_requests: None): |
420 | 419 | """Test OpenRouterModel with provider.""" |
421 | | - # Test with provider using default base URL |
422 | | - from pydantic_ai.providers.openrouter import OpenRouterProvider |
423 | | - |
424 | 420 | provider = OpenRouterProvider(api_key='test-key') |
425 | 421 | model = OpenRouterModel('openai/gpt-4o', provider=provider) |
426 | 422 | assert model.model_name == 'openai/gpt-4o' |
@@ -573,3 +569,68 @@ async def test_openrouter_user_prompt_mixed_content(allow_model_requests: None): |
573 | 569 |
|
574 | 570 | assert result['role'] == 'user' |
575 | 571 | assert result['content'] == 'Hello, here is an image: and some more text.' |
| 572 | + |
| 573 | + |
| 574 | +async def test_openrouter_error_response_with_error_key(allow_model_requests: None): |
| 575 | + """Test that OpenRouter error responses with 'error' key are properly handled. |
| 576 | +
|
| 577 | + Regression test for issue #2323 where OpenRouter returns HTTP 200 with an error |
| 578 | + object in the body (e.g., from upstream provider failures like Chutes). |
| 579 | + """ |
| 580 | + with patch('pydantic_ai.models.openrouter.OpenRouterProvider') as mock_provider_class: |
| 581 | + mock_provider = MagicMock() |
| 582 | + mock_provider.client = AsyncMock() |
| 583 | + mock_provider.model_profile = MagicMock(return_value=MagicMock()) |
| 584 | + mock_provider_class.return_value = mock_provider |
| 585 | + |
| 586 | + model = OpenRouterModel('deepseek/deepseek-chat-v3-0324:free') |
| 587 | + |
| 588 | + error_response = ChatCompletion.model_construct( |
| 589 | + id='error-response', |
| 590 | + choices=[], |
| 591 | + created=1234567890, |
| 592 | + model='deepseek/deepseek-chat-v3-0324:free', |
| 593 | + object='chat.completion', |
| 594 | + ) |
| 595 | + setattr(error_response, 'error', {'message': 'Internal Server Error', 'code': 500}) |
| 596 | + |
| 597 | + with patch('pydantic_ai.models.openrouter.completions_create', new_callable=AsyncMock) as mock_create: |
| 598 | + mock_create.return_value = error_response |
| 599 | + |
| 600 | + messages: list[ModelMessage] = [ModelRequest(parts=[UserPromptPart(content='test')])] |
| 601 | + model_params = cast(ModelRequestParameters, {}) |
| 602 | + |
| 603 | + with pytest.raises(ModelHTTPError, match='status_code: 500.*Internal Server Error'): |
| 604 | + await model.request(messages, None, model_params) |
| 605 | + |
| 606 | + |
| 607 | +async def test_openrouter_error_response_with_none_error(allow_model_requests: None): |
| 608 | + """Test that responses with error=None are handled gracefully.""" |
| 609 | + c = completion_message(ChatCompletionMessage(content='Success', role='assistant')) |
| 610 | + setattr(c, 'error', None) |
| 611 | + |
| 612 | + mock_client = MockOpenAI.create_mock(c) |
| 613 | + model = create_openrouter_model('openai/gpt-4o', mock_client) |
| 614 | + |
| 615 | + messages: list[ModelMessage] = [ModelRequest([UserPromptPart(content='test')])] |
| 616 | + model_request_parameters = ModelRequestParameters(function_tools=[], output_tools=[], allow_text_output=True) |
| 617 | + |
| 618 | + response = await model.request(messages, None, model_request_parameters) |
| 619 | + assert isinstance(response.parts[0], TextPart) |
| 620 | + assert response.parts[0].content == 'Success' |
| 621 | + |
| 622 | + |
| 623 | +async def test_openrouter_error_response_with_non_dict_error(allow_model_requests: None): |
| 624 | + """Test that responses with non-dict error values are handled gracefully.""" |
| 625 | + c = completion_message(ChatCompletionMessage(content='Success', role='assistant')) |
| 626 | + setattr(c, 'error', 'some error string') |
| 627 | + |
| 628 | + mock_client = MockOpenAI.create_mock(c) |
| 629 | + model = create_openrouter_model('openai/gpt-4o', mock_client) |
| 630 | + |
| 631 | + messages: list[ModelMessage] = [ModelRequest([UserPromptPart(content='test')])] |
| 632 | + model_request_parameters = ModelRequestParameters(function_tools=[], output_tools=[], allow_text_output=True) |
| 633 | + |
| 634 | + response = await model.request(messages, None, model_request_parameters) |
| 635 | + assert isinstance(response.parts[0], TextPart) |
| 636 | + assert response.parts[0].content == 'Success' |
0 commit comments