Skip to content

Commit 40d9442

Browse files
committed
feat: Add id and finish_reason to ModelResponse
1 parent 6207ac6 commit 40d9442

27 files changed

+381
-187
lines changed

pydantic_ai_slim/pydantic_ai/messages.py

Lines changed: 40 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -789,8 +789,46 @@ class ModelResponse:
789789
For OpenAI models, this may include 'logprobs', 'finish_reason', etc.
790790
"""
791791

792-
vendor_id: str | None = None
793-
"""Vendor ID as specified by the model provider. This can be used to track the specific request to the model."""
792+
id: str | None = None
793+
"""Unique identifier for the model response, e.g. as returned by the model provider (OpenAI, etc)."""
794+
795+
finish_reason: str | None = None
796+
"""The reason the model finished generating this response, e.g. 'stop', 'length', etc."""
797+
798+
@property
799+
def vendor_id(self) -> str | None:
800+
"""Vendor ID as specified by the model provider. This can be used to track the specific request to the model.
801+
802+
This is deprecated, use `id` instead.
803+
"""
804+
import warnings
805+
warnings.warn(
806+
"vendor_id is deprecated, use id instead",
807+
DeprecationWarning,
808+
stacklevel=2
809+
)
810+
return self.id
811+
812+
@vendor_id.setter
813+
def vendor_id(self, value: str | None) -> None:
814+
"""Set the vendor ID.
815+
816+
This is deprecated, use `id` instead.
817+
"""
818+
import warnings
819+
warnings.warn(
820+
"vendor_id is deprecated, use id instead",
821+
DeprecationWarning,
822+
stacklevel=2
823+
)
824+
self.id = value
825+
826+
def __post_init__(self) -> None:
827+
"""Ensure vendor_details contains finish_reason for backward compatibility."""
828+
if self.finish_reason and self.vendor_details is None:
829+
self.vendor_details = {}
830+
if self.finish_reason and self.vendor_details is not None:
831+
self.vendor_details['finish_reason'] = self.finish_reason
794832

795833
def otel_events(self, settings: InstrumentationSettings) -> list[Event]:
796834
"""Return OpenTelemetry events for the response."""

pydantic_ai_slim/pydantic_ai/models/anthropic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,7 @@ def _process_response(self, response: BetaMessage) -> ModelResponse:
282282
)
283283
)
284284

285-
return ModelResponse(items, usage=_map_usage(response), model_name=response.model, vendor_id=response.id)
285+
return ModelResponse(items, usage=_map_usage(response), model_name=response.model, id=response.id)
286286

287287
async def _process_streamed_response(self, response: AsyncStream[BetaRawMessageStreamEvent]) -> StreamedResponse:
288288
peekable_response = _utils.PeekableAsyncStream(response)

pydantic_ai_slim/pydantic_ai/models/bedrock.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ async def _process_response(self, response: ConverseResponseTypeDef) -> ModelRes
296296
total_tokens=response['usage']['totalTokens'],
297297
)
298298
vendor_id = response.get('ResponseMetadata', {}).get('RequestId', None)
299-
return ModelResponse(items, usage=u, model_name=self.model_name, vendor_id=vendor_id)
299+
return ModelResponse(items, usage=u, model_name=self.model_name, id=vendor_id)
300300

301301
@overload
302302
async def _messages_create(

pydantic_ai_slim/pydantic_ai/models/function.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,8 @@ async def request(
137137
if not response.usage.has_values(): # pragma: no branch
138138
response.usage = _estimate_usage(chain(messages, [response]))
139139
response.usage.requests = 1
140+
response.id = getattr(response, 'id', None)
141+
response.finish_reason = getattr(response, 'finish_reason', None)
140142
return response
141143

142144
@asynccontextmanager

pydantic_ai_slim/pydantic_ai/models/gemini.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -273,15 +273,16 @@ def _process_response(self, response: _GeminiResponse) -> ModelResponse:
273273
parts = response['candidates'][0]['content']['parts']
274274
vendor_id = response.get('vendor_id', None)
275275
finish_reason = response['candidates'][0].get('finish_reason')
276+
vendor_details = {}
276277
if finish_reason:
277-
vendor_details = {'finish_reason': finish_reason}
278+
vendor_details['finish_reason'] = finish_reason
278279
usage = _metadata_as_usage(response)
279280
usage.requests = 1
280281
return _process_response_from_parts(
281282
parts,
282283
response.get('model_version', self._model_name),
283284
usage,
284-
vendor_id=vendor_id,
285+
id=vendor_id,
285286
vendor_details=vendor_details,
286287
)
287288

@@ -662,7 +663,7 @@ def _process_response_from_parts(
662663
parts: Sequence[_GeminiPartUnion],
663664
model_name: GeminiModelName,
664665
usage: usage.Usage,
665-
vendor_id: str | None,
666+
id: str | None,
666667
vendor_details: dict[str, Any] | None = None,
667668
) -> ModelResponse:
668669
items: list[ModelResponsePart] = []
@@ -681,7 +682,7 @@ def _process_response_from_parts(
681682
f'Unsupported response from Gemini, expected all parts to be function calls or text, got: {part!r}'
682683
)
683684
return ModelResponse(
684-
parts=items, usage=usage, model_name=model_name, vendor_id=vendor_id, vendor_details=vendor_details
685+
parts=items, usage=usage, model_name=model_name, id=id, vendor_details=vendor_details
685686
)
686687

687688

pydantic_ai_slim/pydantic_ai/models/google.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -322,7 +322,7 @@ def _process_response(self, response: GenerateContentResponse) -> ModelResponse:
322322
usage = _metadata_as_usage(response)
323323
usage.requests = 1
324324
return _process_response_from_parts(
325-
parts, response.model_version or self._model_name, usage, vendor_id=vendor_id, vendor_details=vendor_details
325+
parts, response.model_version or self._model_name, usage, id=vendor_id, vendor_details=vendor_details
326326
)
327327

328328
async def _process_streamed_response(self, response: AsyncIterator[GenerateContentResponse]) -> StreamedResponse:
@@ -506,7 +506,7 @@ def _process_response_from_parts(
506506
parts: list[Part],
507507
model_name: GoogleModelName,
508508
usage: usage.Usage,
509-
vendor_id: str | None,
509+
id: str | None,
510510
vendor_details: dict[str, Any] | None = None,
511511
) -> ModelResponse:
512512
items: list[ModelResponsePart] = []
@@ -527,7 +527,7 @@ def _process_response_from_parts(
527527
f'Unsupported response from Gemini, expected all parts to be function calls or text, got: {part!r}'
528528
)
529529
return ModelResponse(
530-
parts=items, model_name=model_name, usage=usage, vendor_id=vendor_id, vendor_details=vendor_details
530+
parts=items, model_name=model_name, usage=usage, id=id, vendor_details=vendor_details
531531
)
532532

533533

pydantic_ai_slim/pydantic_ai/models/groq.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ def _process_response(self, response: chat.ChatCompletion) -> ModelResponse:
266266
for c in choice.message.tool_calls:
267267
items.append(ToolCallPart(tool_name=c.function.name, args=c.function.arguments, tool_call_id=c.id))
268268
return ModelResponse(
269-
items, usage=_map_usage(response), model_name=response.model, timestamp=timestamp, vendor_id=response.id
269+
items, usage=_map_usage(response), model_name=response.model, timestamp=timestamp, id=response.id
270270
)
271271

272272
async def _process_streamed_response(self, response: AsyncStream[chat.ChatCompletionChunk]) -> GroqStreamedResponse:

pydantic_ai_slim/pydantic_ai/models/huggingface.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ def _process_response(self, response: ChatCompletionOutput) -> ModelResponse:
253253
usage=_map_usage(response),
254254
model_name=response.model,
255255
timestamp=timestamp,
256-
vendor_id=response.id,
256+
id=response.id,
257257
)
258258

259259
async def _process_streamed_response(self, response: AsyncIterable[ChatCompletionStreamOutput]) -> StreamedResponse:

pydantic_ai_slim/pydantic_ai/models/mistral.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -341,7 +341,7 @@ def _process_response(self, response: MistralChatCompletionResponse) -> ModelRes
341341
parts.append(tool)
342342

343343
return ModelResponse(
344-
parts, usage=_map_usage(response), model_name=response.model, timestamp=timestamp, vendor_id=response.id
344+
parts, usage=_map_usage(response), model_name=response.model, timestamp=timestamp, id=response.id
345345
)
346346

347347
async def _process_streamed_response(

pydantic_ai_slim/pydantic_ai/models/openai.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -419,7 +419,8 @@ def _process_response(self, response: chat.ChatCompletion | str) -> ModelRespons
419419
model_name=response.model,
420420
timestamp=timestamp,
421421
vendor_details=vendor_details,
422-
vendor_id=response.id,
422+
id=response.id,
423+
finish_reason=choice.finish_reason,
423424
)
424425

425426
async def _process_streamed_response(self, response: AsyncStream[ChatCompletionChunk]) -> OpenAIStreamedResponse:
@@ -706,7 +707,7 @@ def _process_response(self, response: responses.Response) -> ModelResponse:
706707
items,
707708
usage=_map_usage(response),
708709
model_name=response.model,
709-
vendor_id=response.id,
710+
id=response.id,
710711
timestamp=timestamp,
711712
)
712713

0 commit comments

Comments
 (0)