diff --git a/docs/models/overview.md b/docs/models/overview.md index 38536dd630..f1b7eb0781 100644 --- a/docs/models/overview.md +++ b/docs/models/overview.md @@ -180,7 +180,7 @@ contains all the exceptions encountered during the `run` execution. === "Python >=3.11" ```python {title="fallback_model_failure.py" py="3.11"} - from pydantic_ai import Agent, ModelHTTPError + from pydantic_ai import Agent, ModelAPIError from pydantic_ai.models.anthropic import AnthropicModel from pydantic_ai.models.fallback import FallbackModel from pydantic_ai.models.openai import OpenAIChatModel @@ -192,7 +192,7 @@ contains all the exceptions encountered during the `run` execution. agent = Agent(fallback_model) try: response = agent.run_sync('What is the capital of France?') - except* ModelHTTPError as exc_group: + except* ModelAPIError as exc_group: for exc in exc_group.exceptions: print(exc) ``` @@ -206,7 +206,7 @@ contains all the exceptions encountered during the `run` execution. ```python {title="fallback_model_failure.py" noqa="F821" test="skip"} from exceptiongroup import catch - from pydantic_ai import Agent, ModelHTTPError + from pydantic_ai import Agent, ModelAPIError from pydantic_ai.models.anthropic import AnthropicModel from pydantic_ai.models.fallback import FallbackModel from pydantic_ai.models.openai import OpenAIChatModel @@ -222,10 +222,11 @@ contains all the exceptions encountered during the `run` execution. fallback_model = FallbackModel(openai_model, anthropic_model) agent = Agent(fallback_model) - with catch({ModelHTTPError: model_status_error_handler}): + with catch({ModelAPIError: model_status_error_handler}): response = agent.run_sync('What is the capital of France?') ``` By default, the `FallbackModel` only moves on to the next model if the current model raises a +[`ModelAPIError`][pydantic_ai.exceptions.ModelAPIError], which includes [`ModelHTTPError`][pydantic_ai.exceptions.ModelHTTPError]. You can customize this behavior by passing a custom `fallback_on` argument to the `FallbackModel` constructor. diff --git a/pydantic_ai_slim/pydantic_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/__init__.py index ec0137f856..f33a0ad3ec 100644 --- a/pydantic_ai_slim/pydantic_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/__init__.py @@ -24,6 +24,7 @@ CallDeferred, FallbackExceptionGroup, IncompleteToolCall, + ModelAPIError, ModelHTTPError, ModelRetry, UnexpectedModelBehavior, @@ -126,6 +127,7 @@ 'CallDeferred', 'ApprovalRequired', 'ModelRetry', + 'ModelAPIError', 'ModelHTTPError', 'FallbackExceptionGroup', 'IncompleteToolCall', diff --git a/pydantic_ai_slim/pydantic_ai/exceptions.py b/pydantic_ai_slim/pydantic_ai/exceptions.py index afeb8c524f..0b4500502c 100644 --- a/pydantic_ai_slim/pydantic_ai/exceptions.py +++ b/pydantic_ai_slim/pydantic_ai/exceptions.py @@ -22,6 +22,7 @@ 'AgentRunError', 'UnexpectedModelBehavior', 'UsageLimitExceeded', + 'ModelAPIError', 'ModelHTTPError', 'IncompleteToolCall', 'FallbackExceptionGroup', @@ -151,27 +152,31 @@ def __str__(self) -> str: return self.message -class ModelHTTPError(AgentRunError): +class ModelAPIError(AgentRunError): + """Raised when a model provider API request fails.""" + + model_name: str + """The name of the model associated with the error.""" + + def __init__(self, model_name: str, message: str): + self.model_name = model_name + super().__init__(message) + + +class ModelHTTPError(ModelAPIError): """Raised when an model provider response has a status code of 4xx or 5xx.""" status_code: int """The HTTP status code returned by the API.""" - model_name: str - """The name of the model associated with the error.""" - body: object | None """The body of the response, if available.""" - message: str - """The error message with the status code and response body, if available.""" - def __init__(self, status_code: int, model_name: str, body: object | None = None): self.status_code = status_code - self.model_name = model_name self.body = body message = f'status_code: {status_code}, model_name: {model_name}, body: {body}' - super().__init__(message) + super().__init__(model_name=model_name, message=message) class FallbackExceptionGroup(ExceptionGroup[Any]): diff --git a/pydantic_ai_slim/pydantic_ai/models/anthropic.py b/pydantic_ai_slim/pydantic_ai/models/anthropic.py index e32888a439..342c141b9d 100644 --- a/pydantic_ai_slim/pydantic_ai/models/anthropic.py +++ b/pydantic_ai_slim/pydantic_ai/models/anthropic.py @@ -14,7 +14,7 @@ from .._run_context import RunContext from .._utils import guard_tool_call_id as _guard_tool_call_id from ..builtin_tools import CodeExecutionTool, MCPServerTool, MemoryTool, WebSearchTool -from ..exceptions import UserError +from ..exceptions import ModelAPIError, UserError from ..messages import ( BinaryContent, BuiltinToolCallPart, @@ -55,7 +55,14 @@ try: - from anthropic import NOT_GIVEN, APIStatusError, AsyncAnthropicBedrock, AsyncStream, omit as OMIT + from anthropic import ( + NOT_GIVEN, + APIConnectionError, + APIStatusError, + AsyncAnthropicBedrock, + AsyncStream, + omit as OMIT, + ) from anthropic.types.beta import ( BetaBase64PDFBlockParam, BetaBase64PDFSourceParam, @@ -358,7 +365,9 @@ async def _messages_create( except APIStatusError as e: if (status_code := e.status_code) >= 400: raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.body) from e - raise # pragma: lax no cover + raise ModelAPIError(model_name=self.model_name, message=e.message) from e # pragma: lax no cover + except APIConnectionError as e: + raise ModelAPIError(model_name=self.model_name, message=e.message) from e async def _messages_count_tokens( self, @@ -395,7 +404,9 @@ async def _messages_count_tokens( except APIStatusError as e: if (status_code := e.status_code) >= 400: raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.body) from e - raise # pragma: lax no cover + raise ModelAPIError(model_name=self.model_name, message=e.message) from e # pragma: lax no cover + except APIConnectionError as e: + raise ModelAPIError(model_name=self.model_name, message=e.message) from e def _process_response(self, response: BetaMessage) -> ModelResponse: """Process a non-streamed response, and prepare a message to return.""" diff --git a/pydantic_ai_slim/pydantic_ai/models/bedrock.py b/pydantic_ai_slim/pydantic_ai/models/bedrock.py index acb98e5ec0..99a174d513 100644 --- a/pydantic_ai_slim/pydantic_ai/models/bedrock.py +++ b/pydantic_ai_slim/pydantic_ai/models/bedrock.py @@ -41,7 +41,7 @@ usage, ) from pydantic_ai._run_context import RunContext -from pydantic_ai.exceptions import ModelHTTPError, UserError +from pydantic_ai.exceptions import ModelAPIError, ModelHTTPError, UserError from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse, download_item from pydantic_ai.providers import Provider, infer_provider from pydantic_ai.providers.bedrock import BedrockModelProfile @@ -312,8 +312,10 @@ async def count_tokens( try: response = await anyio.to_thread.run_sync(functools.partial(self.client.count_tokens, **params)) except ClientError as e: - status_code = e.response.get('ResponseMetadata', {}).get('HTTPStatusCode', 500) - raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.response) from e + status_code = e.response.get('ResponseMetadata', {}).get('HTTPStatusCode') + if isinstance(status_code, int): + raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.response) from e + raise ModelAPIError(model_name=self.model_name, message=str(e)) from e return usage.RequestUsage(input_tokens=response['inputTokens']) @asynccontextmanager @@ -459,8 +461,10 @@ async def _messages_create( else: model_response = await anyio.to_thread.run_sync(functools.partial(self.client.converse, **params)) except ClientError as e: - status_code = e.response.get('ResponseMetadata', {}).get('HTTPStatusCode', 500) - raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.response) from e + status_code = e.response.get('ResponseMetadata', {}).get('HTTPStatusCode') + if isinstance(status_code, int): + raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.response) from e + raise ModelAPIError(model_name=self.model_name, message=str(e)) from e return model_response @staticmethod diff --git a/pydantic_ai_slim/pydantic_ai/models/cohere.py b/pydantic_ai_slim/pydantic_ai/models/cohere.py index 24bb9353c7..60ae329065 100644 --- a/pydantic_ai_slim/pydantic_ai/models/cohere.py +++ b/pydantic_ai_slim/pydantic_ai/models/cohere.py @@ -6,7 +6,7 @@ from typing_extensions import assert_never -from pydantic_ai.exceptions import UserError +from pydantic_ai.exceptions import ModelAPIError, UserError from .. import ModelHTTPError, usage from .._utils import generate_tool_call_id as _generate_tool_call_id, guard_tool_call_id as _guard_tool_call_id @@ -195,7 +195,7 @@ async def _chat( except ApiError as e: if (status_code := e.status_code) and status_code >= 400: raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.body) from e - raise # pragma: lax no cover + raise ModelAPIError(model_name=self.model_name, message=str(e)) from e def _process_response(self, response: V2ChatResponse) -> ModelResponse: """Process a non-streamed response, and prepare a message to return.""" diff --git a/pydantic_ai_slim/pydantic_ai/models/fallback.py b/pydantic_ai_slim/pydantic_ai/models/fallback.py index 682ab90ea6..67151a07b9 100644 --- a/pydantic_ai_slim/pydantic_ai/models/fallback.py +++ b/pydantic_ai_slim/pydantic_ai/models/fallback.py @@ -11,7 +11,7 @@ from pydantic_ai._run_context import RunContext from pydantic_ai.models.instrumented import InstrumentedModel -from ..exceptions import FallbackExceptionGroup, ModelHTTPError +from ..exceptions import FallbackExceptionGroup, ModelAPIError from ..profiles import ModelProfile from . import KnownModelName, Model, ModelRequestParameters, StreamedResponse, infer_model @@ -36,7 +36,7 @@ def __init__( self, default_model: Model | KnownModelName | str, *fallback_models: Model | KnownModelName | str, - fallback_on: Callable[[Exception], bool] | tuple[type[Exception], ...] = (ModelHTTPError,), + fallback_on: Callable[[Exception], bool] | tuple[type[Exception], ...] = (ModelAPIError,), ): """Initialize a fallback model instance. diff --git a/pydantic_ai_slim/pydantic_ai/models/google.py b/pydantic_ai_slim/pydantic_ai/models/google.py index 4c02c11407..750b6e15b0 100644 --- a/pydantic_ai_slim/pydantic_ai/models/google.py +++ b/pydantic_ai_slim/pydantic_ai/models/google.py @@ -14,7 +14,7 @@ from .._output import OutputObjectDefinition from .._run_context import RunContext from ..builtin_tools import CodeExecutionTool, ImageGenerationTool, UrlContextTool, WebSearchTool -from ..exceptions import ModelHTTPError, UserError +from ..exceptions import ModelAPIError, ModelHTTPError, UserError from ..messages import ( BinaryContent, BuiltinToolCallPart, @@ -410,7 +410,7 @@ async def _generate_content( model_name=self._model_name, body=cast(Any, e.details), # pyright: ignore[reportUnknownMemberType] ) from e - raise # pragma: lax no cover + raise ModelAPIError(model_name=self._model_name, message=str(e)) from e async def _build_content_and_config( self, diff --git a/pydantic_ai_slim/pydantic_ai/models/groq.py b/pydantic_ai_slim/pydantic_ai/models/groq.py index 67c27a19c2..64f7ddcf85 100644 --- a/pydantic_ai_slim/pydantic_ai/models/groq.py +++ b/pydantic_ai_slim/pydantic_ai/models/groq.py @@ -16,7 +16,7 @@ from .._thinking_part import split_content_into_text_and_thinking from .._utils import generate_tool_call_id, guard_tool_call_id as _guard_tool_call_id, number_to_datetime from ..builtin_tools import WebSearchTool -from ..exceptions import UserError +from ..exceptions import ModelAPIError, UserError from ..messages import ( BinaryContent, BuiltinToolCallPart, @@ -52,7 +52,7 @@ ) try: - from groq import NOT_GIVEN, APIError, APIStatusError, AsyncGroq, AsyncStream + from groq import NOT_GIVEN, APIConnectionError, APIError, APIStatusError, AsyncGroq, AsyncStream from groq.types import chat from groq.types.chat.chat_completion_content_part_image_param import ImageURL from groq.types.chat.chat_completion_message import ExecutedTool @@ -314,7 +314,9 @@ async def _completions_create( except APIStatusError as e: if (status_code := e.status_code) >= 400: raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.body) from e - raise # pragma: lax no cover + raise ModelAPIError(model_name=self.model_name, message=e.message) from e # pragma: no cover + except APIConnectionError as e: + raise ModelAPIError(model_name=self.model_name, message=e.message) from e def _process_response(self, response: chat.ChatCompletion) -> ModelResponse: """Process a non-streamed response, and prepare a message to return.""" diff --git a/pydantic_ai_slim/pydantic_ai/models/mistral.py b/pydantic_ai_slim/pydantic_ai/models/mistral.py index 770c8ff6ca..cefa28e9dc 100644 --- a/pydantic_ai_slim/pydantic_ai/models/mistral.py +++ b/pydantic_ai_slim/pydantic_ai/models/mistral.py @@ -13,7 +13,7 @@ from .. import ModelHTTPError, UnexpectedModelBehavior, _utils from .._run_context import RunContext from .._utils import generate_tool_call_id as _generate_tool_call_id, now_utc as _now_utc, number_to_datetime -from ..exceptions import UserError +from ..exceptions import ModelAPIError, UserError from ..messages import ( BinaryContent, BuiltinToolCallPart, @@ -246,7 +246,7 @@ async def _completions_create( except SDKError as e: if (status_code := e.status_code) >= 400: raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.body) from e - raise # pragma: lax no cover + raise ModelAPIError(model_name=self.model_name, message=e.message) from e assert response, 'A unexpected empty response from Mistral.' return response diff --git a/pydantic_ai_slim/pydantic_ai/models/openai.py b/pydantic_ai_slim/pydantic_ai/models/openai.py index 33a281974d..6db3742320 100644 --- a/pydantic_ai_slim/pydantic_ai/models/openai.py +++ b/pydantic_ai_slim/pydantic_ai/models/openai.py @@ -14,7 +14,7 @@ from pydantic_core import to_json from typing_extensions import assert_never, deprecated -from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage +from .. import ModelAPIError, ModelHTTPError, UnexpectedModelBehavior, _utils, usage from .._output import DEFAULT_OUTPUT_TOOL_NAME, OutputObjectDefinition from .._run_context import RunContext from .._thinking_part import split_content_into_text_and_thinking @@ -55,7 +55,7 @@ from . import Model, ModelRequestParameters, StreamedResponse, check_allow_model_requests, download_item, get_user_agent try: - from openai import NOT_GIVEN, APIStatusError, AsyncOpenAI, AsyncStream + from openai import NOT_GIVEN, APIConnectionError, APIStatusError, AsyncOpenAI, AsyncStream from openai.types import AllModels, chat, responses from openai.types.chat import ( ChatCompletionChunk, @@ -549,6 +549,8 @@ async def _completions_create( if (status_code := e.status_code) >= 400: raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.body) from e raise # pragma: lax no cover + except APIConnectionError as e: + raise ModelAPIError(model_name=self.model_name, message=e.message) from e def _validate_completion(self, response: chat.ChatCompletion) -> chat.ChatCompletion: """Hook that validates chat completions before processing. @@ -1351,6 +1353,8 @@ async def _responses_create( if (status_code := e.status_code) >= 400: raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.body) from e raise # pragma: lax no cover + except APIConnectionError as e: + raise ModelAPIError(model_name=self.model_name, message=e.message) from e def _get_reasoning(self, model_settings: OpenAIResponsesModelSettings) -> Reasoning | Omit: reasoning_effort = model_settings.get('openai_reasoning_effort', None) diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py index 1d0d64918f..1a19af03e0 100644 --- a/tests/models/test_anthropic.py +++ b/tests/models/test_anthropic.py @@ -24,6 +24,7 @@ DocumentUrl, FinalResultEvent, ImageUrl, + ModelAPIError, ModelHTTPError, ModelMessage, ModelRequest, @@ -61,7 +62,7 @@ from .mock_async_stream import MockAsyncStream with try_import() as imports_successful: - from anthropic import NOT_GIVEN, APIStatusError, AsyncAnthropic + from anthropic import NOT_GIVEN, APIConnectionError, APIStatusError, AsyncAnthropic from anthropic.lib.tools import BetaAbstractMemoryTool from anthropic.resources.beta import AsyncBeta from anthropic.types.beta import ( @@ -144,7 +145,7 @@ def beta(self) -> AsyncBeta: @cached_property def messages(self) -> Any: - return type('Messages', (), {'create': self.messages_create}) + return type('Messages', (), {'create': self.messages_create, 'count_tokens': self.messages_count_tokens}) @classmethod def create_mock(cls, messages_: MockAnthropicMessage | Sequence[MockAnthropicMessage]) -> AsyncAnthropic: @@ -180,6 +181,11 @@ async def messages_create( self.index += 1 return response + async def messages_count_tokens(self, *_args: Any, **_kwargs: Any) -> Any: + if self.messages_ is not None: + raise_if_exception(self.messages_ if not isinstance(self.messages_, Sequence) else self.messages_[0]) + return None # pragma: no cover + def completion_message(content: list[BetaContentBlock], usage: BetaUsage) -> BetaMessage: return BetaMessage( @@ -1205,6 +1211,36 @@ def test_model_status_error(allow_model_requests: None) -> None: ) +def test_model_connection_error(allow_model_requests: None) -> None: + mock_client = MockAnthropic.create_mock( + APIConnectionError( + message='Connection to https://api.anthropic.com timed out', + request=httpx.Request('POST', 'https://api.anthropic.com/v1/messages'), + ) + ) + m = AnthropicModel('claude-sonnet-4-5', provider=AnthropicProvider(anthropic_client=mock_client)) + agent = Agent(m) + with pytest.raises(ModelAPIError) as exc_info: + agent.run_sync('hello') + assert exc_info.value.model_name == 'claude-sonnet-4-5' + assert 'Connection to https://api.anthropic.com timed out' in str(exc_info.value.message) + + +async def test_count_tokens_connection_error(allow_model_requests: None) -> None: + mock_client = MockAnthropic.create_mock( + APIConnectionError( + message='Connection to https://api.anthropic.com timed out', + request=httpx.Request('POST', 'https://api.anthropic.com/v1/messages'), + ) + ) + m = AnthropicModel('claude-sonnet-4-5', provider=AnthropicProvider(anthropic_client=mock_client)) + agent = Agent(m) + with pytest.raises(ModelAPIError) as exc_info: + await agent.run('hello', usage_limits=UsageLimits(input_tokens_limit=20, count_tokens_before_request=True)) + assert exc_info.value.model_name == 'claude-sonnet-4-5' + assert 'Connection to https://api.anthropic.com timed out' in str(exc_info.value.message) + + async def test_document_binary_content_input( allow_model_requests: None, anthropic_api_key: str, document_content: BinaryContent ): diff --git a/tests/models/test_bedrock.py b/tests/models/test_bedrock.py index cce18a9227..f13aaff4fb 100644 --- a/tests/models/test_bedrock.py +++ b/tests/models/test_bedrock.py @@ -1,9 +1,11 @@ from __future__ import annotations as _annotations import datetime +from types import SimpleNamespace from typing import Any import pytest +from botocore.exceptions import ClientError from inline_snapshot import snapshot from typing_extensions import TypedDict @@ -33,11 +35,13 @@ VideoUrl, ) from pydantic_ai.agent import Agent -from pydantic_ai.exceptions import ModelHTTPError, ModelRetry, UsageLimitExceeded +from pydantic_ai.exceptions import ModelAPIError, ModelHTTPError, ModelRetry, UsageLimitExceeded from pydantic_ai.messages import AgentStreamEvent from pydantic_ai.models import ModelRequestParameters from pydantic_ai.models.bedrock import BedrockConverseModel, BedrockModelSettings from pydantic_ai.models.openai import OpenAIResponsesModel, OpenAIResponsesModelSettings +from pydantic_ai.profiles import DEFAULT_PROFILE +from pydantic_ai.providers import Provider from pydantic_ai.providers.bedrock import BedrockProvider from pydantic_ai.providers.openai import OpenAIProvider from pydantic_ai.run import AgentRunResult, AgentRunResultEvent @@ -56,6 +60,53 @@ ] +class _StubBedrockClient: + """Minimal Bedrock client that always raises the provided error.""" + + def __init__(self, error: ClientError): + self._error = error + self.meta = SimpleNamespace(endpoint_url='https://bedrock.stub') + + def converse(self, **_: Any) -> None: + raise self._error + + def converse_stream(self, **_: Any) -> None: + raise self._error + + def count_tokens(self, **_: Any) -> None: + raise self._error + + +class _StubBedrockProvider(Provider[Any]): + """Provider implementation backed by the stub client.""" + + def __init__(self, client: _StubBedrockClient): + self._client = client + + @property + def name(self) -> str: + return 'bedrock-stub' + + @property + def base_url(self) -> str: + return 'https://bedrock.stub' + + @property + def client(self) -> _StubBedrockClient: + return self._client + + def model_profile(self, model_name: str): + return DEFAULT_PROFILE + + +def _bedrock_model_with_client_error(error: ClientError) -> BedrockConverseModel: + """Instantiate a BedrockConverseModel wired to always raise the given error.""" + return BedrockConverseModel( + 'us.amazon.nova-micro-v1:0', + provider=_StubBedrockProvider(_StubBedrockClient(error)), + ) + + async def test_bedrock_model(allow_model_requests: None, bedrock_provider: BedrockProvider): model = BedrockConverseModel('us.amazon.nova-micro-v1:0', provider=bedrock_provider) assert model.base_url == 'https://bedrock-runtime.us-east-1.amazonaws.com' @@ -153,6 +204,55 @@ async def test_bedrock_count_tokens_error(allow_model_requests: None, bedrock_pr assert exc_info.value.body.get('Error', {}).get('Message') == 'The provided model identifier is invalid.' # type: ignore[union-attr] +async def test_bedrock_request_non_http_error(): + error = ClientError({'Error': {'Code': 'TestException', 'Message': 'broken connection'}}, 'converse') + model = _bedrock_model_with_client_error(error) + params = ModelRequestParameters() + + with pytest.raises(ModelAPIError) as exc_info: + await model.request([ModelRequest.user_text_prompt('hi')], None, params) + + assert exc_info.value.message == snapshot( + 'An error occurred (TestException) when calling the converse operation: broken connection' + ) + + +async def test_bedrock_count_tokens_non_http_error(): + error = ClientError({'Error': {'Code': 'TestException', 'Message': 'broken connection'}}, 'count_tokens') + model = _bedrock_model_with_client_error(error) + params = ModelRequestParameters() + + with pytest.raises(ModelAPIError) as exc_info: + await model.count_tokens([ModelRequest.user_text_prompt('hi')], None, params) + + assert exc_info.value.message == snapshot( + 'An error occurred (TestException) when calling the count_tokens operation: broken connection' + ) + + +async def test_bedrock_stream_non_http_error(): + error = ClientError({'Error': {'Code': 'TestException', 'Message': 'broken connection'}}, 'converse_stream') + model = _bedrock_model_with_client_error(error) + params = ModelRequestParameters() + + with pytest.raises(ModelAPIError) as exc_info: + async with model.request_stream([ModelRequest.user_text_prompt('hi')], None, params) as stream: + async for _ in stream: + pass + + assert 'broken connection' in exc_info.value.message + + +async def test_stub_provider_properties(): + # tests the test utility itself... + error = ClientError({'Error': {'Code': 'TestException', 'Message': 'test'}}, 'converse') + model = _bedrock_model_with_client_error(error) + provider = model._provider # pyright: ignore[reportPrivateUsage] + + assert provider.name == 'bedrock-stub' + assert provider.base_url == 'https://bedrock.stub' + + @pytest.mark.parametrize( ('model_name', 'expected'), [ diff --git a/tests/models/test_cohere.py b/tests/models/test_cohere.py index b3560a8da1..07cb6ae9b9 100644 --- a/tests/models/test_cohere.py +++ b/tests/models/test_cohere.py @@ -12,6 +12,7 @@ from pydantic_ai import ( Agent, ImageUrl, + ModelAPIError, ModelHTTPError, ModelRequest, ModelResponse, @@ -399,6 +400,20 @@ def test_model_status_error(allow_model_requests: None) -> None: assert str(exc_info.value) == snapshot("status_code: 500, model_name: command-r, body: {'error': 'test error'}") +def test_model_non_http_error(allow_model_requests: None) -> None: + mock_client = MockAsyncClientV2.create_mock( + ApiError( + status_code=None, + body={'error': 'connection error'}, + ) + ) + m = CohereModel('command-r', provider=CohereProvider(cohere_client=mock_client)) + agent = Agent(m) + with pytest.raises(ModelAPIError) as exc_info: + agent.run_sync('hello') + assert exc_info.value.model_name == 'command-r' + + @pytest.mark.vcr() async def test_request_simple_success_with_vcr(allow_model_requests: None, co_api_key: str): m = CohereModel('command-r7b-12-2024', provider=CohereProvider(api_key=co_api_key)) diff --git a/tests/models/test_fallback.py b/tests/models/test_fallback.py index d03726330a..2d58fae71a 100644 --- a/tests/models/test_fallback.py +++ b/tests/models/test_fallback.py @@ -15,6 +15,7 @@ from pydantic_ai import ( Agent, + ModelAPIError, ModelHTTPError, ModelMessage, ModelProfile, @@ -570,6 +571,18 @@ async def test_fallback_condition_tuple() -> None: assert response.output == 'success' +async def test_fallback_connection_error() -> None: + def connection_error_response(_model_messages: list[ModelMessage], _agent_info: AgentInfo) -> ModelResponse: + raise ModelAPIError(model_name='test-connection-model', message='Connection timed out') + + connection_error_model = FunctionModel(connection_error_response) + fallback_model = FallbackModel(connection_error_model, success_model) + agent = Agent(model=fallback_model) + + response = await agent.run('hello') + assert response.output == 'success' + + async def test_fallback_model_settings_merge(): """Test that FallbackModel properly merges model settings from wrapped model and runtime settings.""" diff --git a/tests/models/test_google.py b/tests/models/test_google.py index 3cdada196e..be613a5649 100644 --- a/tests/models/test_google.py +++ b/tests/models/test_google.py @@ -45,7 +45,7 @@ ) from pydantic_ai.agent import Agent from pydantic_ai.builtin_tools import CodeExecutionTool, ImageGenerationTool, UrlContextTool, WebSearchTool -from pydantic_ai.exceptions import ModelHTTPError, ModelRetry, UnexpectedModelBehavior, UserError +from pydantic_ai.exceptions import ModelAPIError, ModelHTTPError, ModelRetry, UnexpectedModelBehavior, UserError from pydantic_ai.messages import ( BuiltinToolCallEvent, # pyright: ignore[reportDeprecated] BuiltinToolResultEvent, # pyright: ignore[reportDeprecated] @@ -3825,6 +3825,23 @@ async def test_google_api_errors_are_handled( assert error_response['error']['message'] in str(exc_info.value.body) +async def test_google_api_non_http_error( + allow_model_requests: None, + google_provider: GoogleProvider, + mocker: MockerFixture, +): + model = GoogleModel('gemini-1.5-flash', provider=google_provider) + mocked_error = errors.APIError(302, {'error': {'code': 302, 'message': 'Redirect', 'status': 'REDIRECT'}}) + mocker.patch.object(model.client.aio.models, 'generate_content', side_effect=mocked_error) + + agent = Agent(model=model) + + with pytest.raises(ModelAPIError) as exc_info: + await agent.run('This prompt will trigger the mocked error.') + + assert exc_info.value.model_name == 'gemini-1.5-flash' + + async def test_google_model_retrying_after_empty_response(allow_model_requests: None, google_provider: GoogleProvider): message_history = [ ModelRequest(parts=[UserPromptPart(content='Hi')]), diff --git a/tests/models/test_groq.py b/tests/models/test_groq.py index 515892d58c..dd3395750e 100644 --- a/tests/models/test_groq.py +++ b/tests/models/test_groq.py @@ -22,6 +22,7 @@ BuiltinToolReturnPart, FinalResultEvent, ImageUrl, + ModelAPIError, ModelHTTPError, ModelRequest, ModelResponse, @@ -51,7 +52,7 @@ from .mock_async_stream import MockAsyncStream with try_import() as imports_successful: - from groq import APIStatusError, AsyncGroq + from groq import APIConnectionError, APIStatusError, AsyncGroq from groq.types import chat from groq.types.chat.chat_completion import Choice from groq.types.chat.chat_completion_chunk import ( @@ -694,6 +695,21 @@ def test_model_status_error(allow_model_requests: None) -> None: ) +def test_model_connection_error(allow_model_requests: None) -> None: + mock_client = MockGroq.create_mock( + APIConnectionError( + message='Connection to https://api.groq.com timed out', + request=httpx.Request('POST', 'https://api.groq.com/v1/chat/completions'), + ) + ) + m = GroqModel('llama-3.3-70b-versatile', provider=GroqProvider(groq_client=mock_client)) + agent = Agent(m) + with pytest.raises(ModelAPIError) as exc_info: + agent.run_sync('hello') + assert exc_info.value.model_name == 'llama-3.3-70b-versatile' + assert 'Connection to https://api.groq.com timed out' in str(exc_info.value.message) + + async def test_init_with_provider(): provider = GroqProvider(api_key='api-key') model = GroqModel('llama3-8b-8192', provider=provider) diff --git a/tests/models/test_mistral.py b/tests/models/test_mistral.py index 4a39791309..4ae21ad221 100644 --- a/tests/models/test_mistral.py +++ b/tests/models/test_mistral.py @@ -28,7 +28,7 @@ VideoUrl, ) from pydantic_ai.agent import Agent -from pydantic_ai.exceptions import ModelHTTPError, ModelRetry +from pydantic_ai.exceptions import ModelAPIError, ModelHTTPError, ModelRetry from pydantic_ai.usage import RequestUsage from ..conftest import IsDatetime, IsNow, IsStr, raise_if_exception, try_import @@ -2167,6 +2167,21 @@ def test_model_status_error(allow_model_requests: None) -> None: assert str(exc_info.value) == snapshot('status_code: 500, model_name: mistral-large-latest, body: test error') +def test_model_non_http_error(allow_model_requests: None) -> None: + mock_client = MockMistralAI.create_mock( + SDKError( + 'Connection error', + status_code=300, + body='redirect', + ) + ) + m = MistralModel('mistral-large-latest', provider=MistralProvider(mistral_client=mock_client)) + agent = Agent(m) + with pytest.raises(ModelAPIError) as exc_info: + agent.run_sync('hello') + assert exc_info.value.model_name == 'mistral-large-latest' + + async def test_mistral_model_instructions(allow_model_requests: None, mistral_api_key: str): c = completion_message(MistralAssistantMessage(content='world', role='assistant')) mock_client = MockMistralAI.create_mock(c) diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py index 190c541ff6..004d91ec5a 100644 --- a/tests/models/test_openai.py +++ b/tests/models/test_openai.py @@ -20,6 +20,7 @@ CachePoint, DocumentUrl, ImageUrl, + ModelAPIError, ModelHTTPError, ModelProfile, ModelRequest, @@ -56,7 +57,7 @@ ) with try_import() as imports_successful: - from openai import APIStatusError, AsyncOpenAI + from openai import APIConnectionError, APIStatusError, AsyncOpenAI from openai.types import chat from openai.types.chat.chat_completion import ChoiceLogprobs from openai.types.chat.chat_completion_chunk import ( @@ -1146,6 +1147,36 @@ def test_model_status_error(allow_model_requests: None) -> None: assert str(exc_info.value) == snapshot("status_code: 500, model_name: gpt-4o, body: {'error': 'test error'}") +def test_model_connection_error(allow_model_requests: None) -> None: + mock_client = MockOpenAI.create_mock( + APIConnectionError( + message='Connection to http://localhost:11434/v1 timed out', + request=httpx.Request('POST', 'http://localhost:11434/v1'), + ) + ) + m = OpenAIChatModel('gpt-4o', provider=OpenAIProvider(openai_client=mock_client)) + agent = Agent(m) + with pytest.raises(ModelAPIError) as exc_info: + agent.run_sync('hello') + assert exc_info.value.model_name == 'gpt-4o' + assert 'Connection to http://localhost:11434/v1 timed out' in str(exc_info.value.message) + + +def test_responses_model_connection_error(allow_model_requests: None) -> None: + mock_client = MockOpenAIResponses.create_mock( + APIConnectionError( + message='Connection to http://localhost:11434/v1 timed out', + request=httpx.Request('POST', 'http://localhost:11434/v1'), + ) + ) + m = OpenAIResponsesModel('o3-mini', provider=OpenAIProvider(openai_client=mock_client)) + agent = Agent(m) + with pytest.raises(ModelAPIError) as exc_info: + agent.run_sync('hello') + assert exc_info.value.model_name == 'o3-mini' + assert 'Connection to http://localhost:11434/v1 timed out' in str(exc_info.value.message) + + @pytest.mark.parametrize('model_name', ['o3-mini', 'gpt-4o-mini', 'gpt-4.5-preview']) async def test_max_completion_tokens(allow_model_requests: None, model_name: str, openai_api_key: str): m = OpenAIChatModel(model_name, provider=OpenAIProvider(api_key=openai_api_key)) diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py index 68c08d8b71..c5e27b5706 100644 --- a/tests/test_exceptions.py +++ b/tests/test_exceptions.py @@ -11,6 +11,7 @@ ApprovalRequired, CallDeferred, IncompleteToolCall, + ModelAPIError, ModelHTTPError, UnexpectedModelBehavior, UsageLimitExceeded, @@ -28,6 +29,7 @@ lambda: AgentRunError('test'), lambda: UnexpectedModelBehavior('test'), lambda: UsageLimitExceeded('test'), + lambda: ModelAPIError('model', 'test message'), lambda: ModelHTTPError(500, 'model'), lambda: IncompleteToolCall('test'), ], @@ -39,6 +41,7 @@ 'AgentRunError', 'UnexpectedModelBehavior', 'UsageLimitExceeded', + 'ModelAPIError', 'ModelHTTPError', 'IncompleteToolCall', ],