Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/models/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ print(response.all_messages())
model_name='claude-3-5-sonnet-latest',
timestamp=datetime.datetime(...),
kind='response',
provider_request_id=None,
provider_response_id=None,
),
]
"""
Expand Down
11 changes: 8 additions & 3 deletions pydantic_ai_slim/pydantic_ai/messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -938,7 +938,7 @@ class ModelResponse:
For OpenAI models, this may include 'logprobs', 'finish_reason', etc.
"""

provider_request_id: str | None = None
provider_response_id: str | None = None
"""request ID as specified by the model provider. This can be used to track the specific request to the model."""

def price(self) -> genai_types.PriceCalculation:
Expand Down Expand Up @@ -1026,9 +1026,14 @@ def vendor_details(self) -> dict[str, Any] | None:
return self.provider_details

@property
@deprecated('`vendor_id` is deprecated, use `provider_request_id` instead')
@deprecated('`vendor_id` is deprecated, use `provider_response_id` instead')
def vendor_id(self) -> str | None:
return self.provider_request_id
return self.provider_response_id

@property
@deprecated('`provider_request_id` is deprecated, use `provider_response_id` instead')
def provider_request_id(self) -> str | None:
return self.provider_response_id

__repr__ = _utils.dataclasses_no_defaults_repr

Expand Down
2 changes: 1 addition & 1 deletion pydantic_ai_slim/pydantic_ai/models/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ def _process_response(self, response: BetaMessage) -> ModelResponse:
items,
usage=_map_usage(response),
model_name=response.model,
provider_request_id=response.id,
provider_response_id=response.id,
provider_name=self._provider.name,
)

Expand Down
8 changes: 6 additions & 2 deletions pydantic_ai_slim/pydantic_ai/models/bedrock.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,9 +301,13 @@ async def _process_response(self, response: ConverseResponseTypeDef) -> ModelRes
input_tokens=response['usage']['inputTokens'],
output_tokens=response['usage']['outputTokens'],
)
vendor_id = response.get('ResponseMetadata', {}).get('RequestId', None)
response_id = response.get('ResponseMetadata', {}).get('RequestId', None)
return ModelResponse(
items, usage=u, model_name=self.model_name, provider_request_id=vendor_id, provider_name=self._provider.name
items,
usage=u,
model_name=self.model_name,
provider_response_id=response_id,
provider_name=self._provider.name,
)

@overload
Expand Down
2 changes: 1 addition & 1 deletion pydantic_ai_slim/pydantic_ai/models/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -690,7 +690,7 @@ def _process_response_from_parts(
f'Unsupported response from Gemini, expected all parts to be function calls or text, got: {part!r}'
)
return ModelResponse(
parts=items, usage=usage, model_name=model_name, provider_request_id=vendor_id, provider_details=vendor_details
parts=items, usage=usage, model_name=model_name, provider_response_id=vendor_id, provider_details=vendor_details
)


Expand Down
2 changes: 1 addition & 1 deletion pydantic_ai_slim/pydantic_ai/models/google.py
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@ def _process_response_from_parts(
parts=items,
model_name=model_name,
usage=usage,
provider_request_id=vendor_id,
provider_response_id=vendor_id,
provider_details=vendor_details,
provider_name=provider_name,
)
Expand Down
2 changes: 1 addition & 1 deletion pydantic_ai_slim/pydantic_ai/models/groq.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ def _process_response(self, response: chat.ChatCompletion) -> ModelResponse:
usage=_map_usage(response),
model_name=response.model,
timestamp=timestamp,
provider_request_id=response.id,
provider_response_id=response.id,
provider_name=self._provider.name,
)

Expand Down
2 changes: 1 addition & 1 deletion pydantic_ai_slim/pydantic_ai/models/huggingface.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ def _process_response(self, response: ChatCompletionOutput) -> ModelResponse:
usage=_map_usage(response),
model_name=response.model,
timestamp=timestamp,
provider_request_id=response.id,
provider_response_id=response.id,
provider_name=self._provider.name,
)

Expand Down
2 changes: 1 addition & 1 deletion pydantic_ai_slim/pydantic_ai/models/mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,7 @@ def _process_response(self, response: MistralChatCompletionResponse) -> ModelRes
usage=_map_usage(response),
model_name=response.model,
timestamp=timestamp,
provider_request_id=response.id,
provider_response_id=response.id,
provider_name=self._provider.name,
)

Expand Down
4 changes: 2 additions & 2 deletions pydantic_ai_slim/pydantic_ai/models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -517,7 +517,7 @@ def _process_response(self, response: chat.ChatCompletion | str) -> ModelRespons
model_name=response.model,
timestamp=timestamp,
provider_details=vendor_details,
provider_request_id=response.id,
provider_response_id=response.id,
provider_name=self._provider.name,
)

Expand Down Expand Up @@ -831,7 +831,7 @@ def _process_response(self, response: responses.Response) -> ModelResponse:
items,
usage=_map_usage(response),
model_name=response.model,
provider_request_id=response.id,
provider_response_id=response.id,
timestamp=timestamp,
provider_name=self._provider.name,
)
Expand Down
44 changes: 22 additions & 22 deletions tests/models/test_anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ async def test_sync_request_text_response(allow_model_requests: None):
model_name='claude-3-5-haiku-123',
timestamp=IsNow(tz=timezone.utc),
provider_name='anthropic',
provider_request_id='123',
provider_response_id='123',
),
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
ModelResponse(
Expand All @@ -214,7 +214,7 @@ async def test_sync_request_text_response(allow_model_requests: None):
model_name='claude-3-5-haiku-123',
timestamp=IsNow(tz=timezone.utc),
provider_name='anthropic',
provider_request_id='123',
provider_response_id='123',
),
]
)
Expand Down Expand Up @@ -303,7 +303,7 @@ async def test_request_structured_response(allow_model_requests: None):
model_name='claude-3-5-haiku-123',
timestamp=IsNow(tz=timezone.utc),
provider_name='anthropic',
provider_request_id='123',
provider_response_id='123',
),
ModelRequest(
parts=[
Expand Down Expand Up @@ -368,7 +368,7 @@ async def get_location(loc_name: str) -> str:
model_name='claude-3-5-haiku-123',
timestamp=IsNow(tz=timezone.utc),
provider_name='anthropic',
provider_request_id='123',
provider_response_id='123',
),
ModelRequest(
parts=[
Expand All @@ -392,7 +392,7 @@ async def get_location(loc_name: str) -> str:
model_name='claude-3-5-haiku-123',
timestamp=IsNow(tz=timezone.utc),
provider_name='anthropic',
provider_request_id='123',
provider_response_id='123',
),
ModelRequest(
parts=[
Expand All @@ -410,7 +410,7 @@ async def get_location(loc_name: str) -> str:
model_name='claude-3-5-haiku-123',
timestamp=IsNow(tz=timezone.utc),
provider_name='anthropic',
provider_request_id='123',
provider_response_id='123',
),
]
)
Expand Down Expand Up @@ -757,7 +757,7 @@ async def get_image() -> BinaryContent:
model_name='claude-3-5-sonnet-20241022',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id='msg_01Kwjzggomz7bv9og51qGFuH',
provider_response_id='msg_01Kwjzggomz7bv9og51qGFuH',
),
ModelRequest(
parts=[
Expand Down Expand Up @@ -795,7 +795,7 @@ async def get_image() -> BinaryContent:
model_name='claude-3-5-sonnet-20241022',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id='msg_015btMBYLTuDnMP7zAeuHQGi',
provider_response_id='msg_015btMBYLTuDnMP7zAeuHQGi',
),
]
)
Expand Down Expand Up @@ -917,7 +917,7 @@ def simple_instructions():
model_name='claude-3-opus-20240229',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id='msg_01Fg1JVgvCYUHWsxrj9GkpEv',
provider_response_id='msg_01Fg1JVgvCYUHWsxrj9GkpEv',
),
]
)
Expand Down Expand Up @@ -965,7 +965,7 @@ async def test_anthropic_model_thinking_part(allow_model_requests: None, anthrop
model_name='claude-3-7-sonnet-20250219',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id='msg_01BnZvs3naGorn93wjjCDwbd',
provider_response_id='msg_01BnZvs3naGorn93wjjCDwbd',
),
]
)
Expand All @@ -992,7 +992,7 @@ async def test_anthropic_model_thinking_part(allow_model_requests: None, anthrop
model_name='claude-3-7-sonnet-20250219',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id=IsStr(),
provider_response_id=IsStr(),
),
ModelRequest(
parts=[
Expand Down Expand Up @@ -1035,7 +1035,7 @@ async def test_anthropic_model_thinking_part(allow_model_requests: None, anthrop
model_name='claude-3-7-sonnet-20250219',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id=IsStr(),
provider_response_id=IsStr(),
),
]
)
Expand Down Expand Up @@ -1465,7 +1465,7 @@ async def test_anthropic_web_search_tool(allow_model_requests: None, anthropic_a
model_name='claude-3-5-sonnet-20241022',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id='msg_01W2YfD2EF8BbAqLRr8ftH4W',
provider_response_id='msg_01W2YfD2EF8BbAqLRr8ftH4W',
),
]
)
Expand Down Expand Up @@ -1521,7 +1521,7 @@ async def test_anthropic_code_execution_tool(allow_model_requests: None, anthrop
model_name='claude-sonnet-4-20250514',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id='msg_01RJnbK7VMxvS2SyvtyJAQVU',
provider_response_id='msg_01RJnbK7VMxvS2SyvtyJAQVU',
),
]
)
Expand Down Expand Up @@ -1571,7 +1571,7 @@ async def test_anthropic_server_tool_pass_history_to_another_provider(
model_name='gpt-4.1-2025-04-14',
timestamp=IsDatetime(),
provider_name='openai',
provider_request_id='resp_689dc4abe31c81968ed493d15d8810fe0afe80ec3d42722e',
provider_response_id='resp_689dc4abe31c81968ed493d15d8810fe0afe80ec3d42722e',
),
]
)
Expand Down Expand Up @@ -1687,7 +1687,7 @@ async def get_user_country() -> str:
model_name='claude-3-5-sonnet-20241022',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id='msg_012TXW181edhmR5JCsQRsBKx',
provider_response_id='msg_012TXW181edhmR5JCsQRsBKx',
),
ModelRequest(
parts=[
Expand Down Expand Up @@ -1720,7 +1720,7 @@ async def get_user_country() -> str:
model_name='claude-3-5-sonnet-20241022',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id='msg_01K4Fzcf1bhiyLzHpwLdrefj',
provider_response_id='msg_01K4Fzcf1bhiyLzHpwLdrefj',
),
ModelRequest(
parts=[
Expand Down Expand Up @@ -1785,7 +1785,7 @@ async def get_user_country() -> str:
model_name='claude-3-5-sonnet-20241022',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id='msg_01MsqUB7ZyhjGkvepS1tCXp3',
provider_response_id='msg_01MsqUB7ZyhjGkvepS1tCXp3',
),
ModelRequest(
parts=[
Expand Down Expand Up @@ -1816,7 +1816,7 @@ async def get_user_country() -> str:
model_name='claude-3-5-sonnet-20241022',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id='msg_0142umg4diSckrDtV9vAmmPL',
provider_response_id='msg_0142umg4diSckrDtV9vAmmPL',
),
]
)
Expand Down Expand Up @@ -1874,7 +1874,7 @@ async def get_user_country() -> str:
model_name='claude-3-5-sonnet-20241022',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id='msg_018YiNXULHGpoKoHkTt6GivG',
provider_response_id='msg_018YiNXULHGpoKoHkTt6GivG',
),
ModelRequest(
parts=[
Expand Down Expand Up @@ -1908,7 +1908,7 @@ async def get_user_country() -> str:
model_name='claude-3-5-sonnet-20241022',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id='msg_01WiRVmLhCrJbJZRqmAWKv3X',
provider_response_id='msg_01WiRVmLhCrJbJZRqmAWKv3X',
),
]
)
Expand Down Expand Up @@ -1966,7 +1966,7 @@ class CountryLanguage(BaseModel):
model_name='claude-3-5-sonnet-20241022',
timestamp=IsDatetime(),
provider_name='anthropic',
provider_request_id='msg_01N2PwwVQo2aBtt6UFhMDtEX',
provider_response_id='msg_01N2PwwVQo2aBtt6UFhMDtEX',
),
]
)
Expand Down
4 changes: 2 additions & 2 deletions tests/models/test_cohere.py
Original file line number Diff line number Diff line change
Expand Up @@ -443,7 +443,7 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key
model_name='o3-mini-2025-01-31',
timestamp=IsDatetime(),
provider_name='openai',
provider_request_id='resp_680739f4ad748191bd11096967c37c8b048efc3f8b2a068e',
provider_response_id='resp_680739f4ad748191bd11096967c37c8b048efc3f8b2a068e',
),
]
)
Expand All @@ -468,7 +468,7 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key
model_name='o3-mini-2025-01-31',
timestamp=IsDatetime(),
provider_name='openai',
provider_request_id='resp_680739f4ad748191bd11096967c37c8b048efc3f8b2a068e',
provider_response_id='resp_680739f4ad748191bd11096967c37c8b048efc3f8b2a068e',
),
ModelRequest(
parts=[
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_deepseek.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ async def test_deepseek_model_thinking_part(allow_model_requests: None, deepseek
model_name='deepseek-reasoner',
timestamp=IsDatetime(),
provider_name='deepseek',
provider_request_id='181d9669-2b3a-445e-bd13-2ebff2c378f6',
provider_response_id='181d9669-2b3a-445e-bd13-2ebff2c378f6',
),
]
)
Expand Down
Loading