Skip to content

Commit c22a9f4

Browse files
committed
feat: Add id and finish_reason to ModelResponse
1 parent 6207ac6 commit c22a9f4

27 files changed

+373
-191
lines changed

pydantic_ai_slim/pydantic_ai/messages.py

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -789,8 +789,38 @@ class ModelResponse:
789789
For OpenAI models, this may include 'logprobs', 'finish_reason', etc.
790790
"""
791791

792-
vendor_id: str | None = None
793-
"""Vendor ID as specified by the model provider. This can be used to track the specific request to the model."""
792+
id: str | None = None
793+
"""Unique identifier for the model response, e.g. as returned by the model provider (OpenAI, etc)."""
794+
795+
finish_reason: str | None = None
796+
"""The reason the model finished generating this response, e.g. 'stop', 'length', etc."""
797+
798+
@property
799+
def vendor_id(self) -> str | None:
800+
"""Vendor ID as specified by the model provider. This can be used to track the specific request to the model.
801+
802+
This is deprecated, use `id` instead.
803+
"""
804+
import warnings
805+
warnings.warn('vendor_id is deprecated, use id instead', DeprecationWarning, stacklevel=2)
806+
return self.id
807+
808+
@vendor_id.setter
809+
def vendor_id(self, value: str | None) -> None:
810+
"""Set the vendor ID.
811+
812+
This is deprecated, use `id` instead.
813+
"""
814+
import warnings
815+
warnings.warn('vendor_id is deprecated, use id instead', DeprecationWarning, stacklevel=2)
816+
self.id = value
817+
818+
def __post_init__(self) -> None:
819+
"""Ensure vendor_details contains finish_reason for backward compatibility."""
820+
if self.finish_reason and self.vendor_details is None:
821+
self.vendor_details = {}
822+
if self.finish_reason and self.vendor_details is not None:
823+
self.vendor_details['finish_reason'] = self.finish_reason
794824

795825
def otel_events(self, settings: InstrumentationSettings) -> list[Event]:
796826
"""Return OpenTelemetry events for the response."""

pydantic_ai_slim/pydantic_ai/models/anthropic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,7 @@ def _process_response(self, response: BetaMessage) -> ModelResponse:
282282
)
283283
)
284284

285-
return ModelResponse(items, usage=_map_usage(response), model_name=response.model, vendor_id=response.id)
285+
return ModelResponse(items, usage=_map_usage(response), model_name=response.model, id=response.id)
286286

287287
async def _process_streamed_response(self, response: AsyncStream[BetaRawMessageStreamEvent]) -> StreamedResponse:
288288
peekable_response = _utils.PeekableAsyncStream(response)

pydantic_ai_slim/pydantic_ai/models/bedrock.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ async def _process_response(self, response: ConverseResponseTypeDef) -> ModelRes
296296
total_tokens=response['usage']['totalTokens'],
297297
)
298298
vendor_id = response.get('ResponseMetadata', {}).get('RequestId', None)
299-
return ModelResponse(items, usage=u, model_name=self.model_name, vendor_id=vendor_id)
299+
return ModelResponse(items, usage=u, model_name=self.model_name, id=vendor_id)
300300

301301
@overload
302302
async def _messages_create(

pydantic_ai_slim/pydantic_ai/models/function.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,8 @@ async def request(
137137
if not response.usage.has_values(): # pragma: no branch
138138
response.usage = _estimate_usage(chain(messages, [response]))
139139
response.usage.requests = 1
140+
response.id = getattr(response, 'id', None)
141+
response.finish_reason = getattr(response, 'finish_reason', None)
140142
return response
141143

142144
@asynccontextmanager

pydantic_ai_slim/pydantic_ai/models/gemini.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -273,15 +273,16 @@ def _process_response(self, response: _GeminiResponse) -> ModelResponse:
273273
parts = response['candidates'][0]['content']['parts']
274274
vendor_id = response.get('vendor_id', None)
275275
finish_reason = response['candidates'][0].get('finish_reason')
276+
vendor_details = {}
276277
if finish_reason:
277-
vendor_details = {'finish_reason': finish_reason}
278+
vendor_details['finish_reason'] = finish_reason
278279
usage = _metadata_as_usage(response)
279280
usage.requests = 1
280281
return _process_response_from_parts(
281282
parts,
282283
response.get('model_version', self._model_name),
283284
usage,
284-
vendor_id=vendor_id,
285+
id=vendor_id,
285286
vendor_details=vendor_details,
286287
)
287288

@@ -662,7 +663,7 @@ def _process_response_from_parts(
662663
parts: Sequence[_GeminiPartUnion],
663664
model_name: GeminiModelName,
664665
usage: usage.Usage,
665-
vendor_id: str | None,
666+
id: str | None,
666667
vendor_details: dict[str, Any] | None = None,
667668
) -> ModelResponse:
668669
items: list[ModelResponsePart] = []
@@ -680,9 +681,7 @@ def _process_response_from_parts(
680681
raise UnexpectedModelBehavior(
681682
f'Unsupported response from Gemini, expected all parts to be function calls or text, got: {part!r}'
682683
)
683-
return ModelResponse(
684-
parts=items, usage=usage, model_name=model_name, vendor_id=vendor_id, vendor_details=vendor_details
685-
)
684+
return ModelResponse(parts=items, usage=usage, model_name=model_name, id=id, vendor_details=vendor_details)
686685

687686

688687
class _GeminiFunctionCall(TypedDict):

pydantic_ai_slim/pydantic_ai/models/google.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -322,7 +322,7 @@ def _process_response(self, response: GenerateContentResponse) -> ModelResponse:
322322
usage = _metadata_as_usage(response)
323323
usage.requests = 1
324324
return _process_response_from_parts(
325-
parts, response.model_version or self._model_name, usage, vendor_id=vendor_id, vendor_details=vendor_details
325+
parts, response.model_version or self._model_name, usage, id=vendor_id, vendor_details=vendor_details
326326
)
327327

328328
async def _process_streamed_response(self, response: AsyncIterator[GenerateContentResponse]) -> StreamedResponse:
@@ -506,7 +506,7 @@ def _process_response_from_parts(
506506
parts: list[Part],
507507
model_name: GoogleModelName,
508508
usage: usage.Usage,
509-
vendor_id: str | None,
509+
id: str | None,
510510
vendor_details: dict[str, Any] | None = None,
511511
) -> ModelResponse:
512512
items: list[ModelResponsePart] = []
@@ -526,9 +526,7 @@ def _process_response_from_parts(
526526
raise UnexpectedModelBehavior(
527527
f'Unsupported response from Gemini, expected all parts to be function calls or text, got: {part!r}'
528528
)
529-
return ModelResponse(
530-
parts=items, model_name=model_name, usage=usage, vendor_id=vendor_id, vendor_details=vendor_details
531-
)
529+
return ModelResponse(parts=items, model_name=model_name, usage=usage, id=id, vendor_details=vendor_details)
532530

533531

534532
def _function_declaration_from_tool(tool: ToolDefinition) -> FunctionDeclarationDict:

pydantic_ai_slim/pydantic_ai/models/groq.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ def _process_response(self, response: chat.ChatCompletion) -> ModelResponse:
266266
for c in choice.message.tool_calls:
267267
items.append(ToolCallPart(tool_name=c.function.name, args=c.function.arguments, tool_call_id=c.id))
268268
return ModelResponse(
269-
items, usage=_map_usage(response), model_name=response.model, timestamp=timestamp, vendor_id=response.id
269+
items, usage=_map_usage(response), model_name=response.model, timestamp=timestamp, id=response.id
270270
)
271271

272272
async def _process_streamed_response(self, response: AsyncStream[chat.ChatCompletionChunk]) -> GroqStreamedResponse:

pydantic_ai_slim/pydantic_ai/models/huggingface.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ def _process_response(self, response: ChatCompletionOutput) -> ModelResponse:
253253
usage=_map_usage(response),
254254
model_name=response.model,
255255
timestamp=timestamp,
256-
vendor_id=response.id,
256+
id=response.id,
257257
)
258258

259259
async def _process_streamed_response(self, response: AsyncIterable[ChatCompletionStreamOutput]) -> StreamedResponse:

pydantic_ai_slim/pydantic_ai/models/mistral.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -341,7 +341,7 @@ def _process_response(self, response: MistralChatCompletionResponse) -> ModelRes
341341
parts.append(tool)
342342

343343
return ModelResponse(
344-
parts, usage=_map_usage(response), model_name=response.model, timestamp=timestamp, vendor_id=response.id
344+
parts, usage=_map_usage(response), model_name=response.model, timestamp=timestamp, id=response.id
345345
)
346346

347347
async def _process_streamed_response(

pydantic_ai_slim/pydantic_ai/models/openai.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -419,7 +419,8 @@ def _process_response(self, response: chat.ChatCompletion | str) -> ModelRespons
419419
model_name=response.model,
420420
timestamp=timestamp,
421421
vendor_details=vendor_details,
422-
vendor_id=response.id,
422+
id=response.id,
423+
finish_reason=choice.finish_reason,
423424
)
424425

425426
async def _process_streamed_response(self, response: AsyncStream[ChatCompletionChunk]) -> OpenAIStreamedResponse:
@@ -706,7 +707,7 @@ def _process_response(self, response: responses.Response) -> ModelResponse:
706707
items,
707708
usage=_map_usage(response),
708709
model_name=response.model,
709-
vendor_id=response.id,
710+
id=response.id,
710711
timestamp=timestamp,
711712
)
712713

0 commit comments

Comments
 (0)