Skip to content

Commit 28051fe

Browse files
committed
feat: Add id and finish_reason to ModelResponse
1 parent 6207ac6 commit 28051fe

File tree

4 files changed

+36
-5
lines changed

4 files changed

+36
-5
lines changed

pydantic_ai_slim/pydantic_ai/messages.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -792,6 +792,12 @@ class ModelResponse:
792792
vendor_id: str | None = None
793793
"""Vendor ID as specified by the model provider. This can be used to track the specific request to the model."""
794794

795+
id: str | None = None
796+
"""Unique identifier for the model response, e.g. as returned by the model provider (OpenAI, etc)."""
797+
798+
finish_reason: str | None = None
799+
"""The reason the model finished generating this response, e.g. 'stop', 'length', etc."""
800+
795801
def otel_events(self, settings: InstrumentationSettings) -> list[Event]:
796802
"""Return OpenTelemetry events for the response."""
797803
result: list[Event] = []

pydantic_ai_slim/pydantic_ai/models/function.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,8 @@ async def request(
137137
if not response.usage.has_values(): # pragma: no branch
138138
response.usage = _estimate_usage(chain(messages, [response]))
139139
response.usage.requests = 1
140+
response.id = getattr(response, 'id', None)
141+
response.finish_reason = getattr(response, 'finish_reason', None)
140142
return response
141143

142144
@asynccontextmanager

pydantic_ai_slim/pydantic_ai/models/openai.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -420,6 +420,8 @@ def _process_response(self, response: chat.ChatCompletion | str) -> ModelRespons
420420
timestamp=timestamp,
421421
vendor_details=vendor_details,
422422
vendor_id=response.id,
423+
id=response.id,
424+
finish_reason=choice.finish_reason,
423425
)
424426

425427
async def _process_streamed_response(self, response: AsyncStream[ChatCompletionChunk]) -> OpenAIStreamedResponse:

pydantic_ai_slim/pydantic_ai/models/test.py

Lines changed: 26 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -227,23 +227,44 @@ def _request(
227227
output[part.tool_name] = part.content
228228
if output:
229229
return ModelResponse(
230-
parts=[TextPart(pydantic_core.to_json(output).decode())], model_name=self._model_name
230+
parts=[TextPart(pydantic_core.to_json(output).decode())],
231+
model_name=self._model_name,
232+
id=None,
233+
finish_reason=None,
231234
)
232235
else:
233-
return ModelResponse(parts=[TextPart('success (no tool calls)')], model_name=self._model_name)
236+
return ModelResponse(
237+
parts=[TextPart('success (no tool calls)')],
238+
model_name=self._model_name,
239+
id=None,
240+
finish_reason=None,
241+
)
234242
else:
235-
return ModelResponse(parts=[TextPart(response_text)], model_name=self._model_name)
243+
return ModelResponse(
244+
parts=[TextPart(response_text)],
245+
model_name=self._model_name,
246+
id=None,
247+
finish_reason=None,
248+
)
236249
else:
237250
assert output_tools, 'No output tools provided'
238251
custom_output_args = output_wrapper.value
239252
output_tool = output_tools[self.seed % len(output_tools)]
240253
if custom_output_args is not None:
241254
return ModelResponse(
242-
parts=[ToolCallPart(output_tool.name, custom_output_args)], model_name=self._model_name
255+
parts=[ToolCallPart(output_tool.name, custom_output_args)],
256+
model_name=self._model_name,
257+
id=None,
258+
finish_reason=None,
243259
)
244260
else:
245261
response_args = self.gen_tool_args(output_tool)
246-
return ModelResponse(parts=[ToolCallPart(output_tool.name, response_args)], model_name=self._model_name)
262+
return ModelResponse(
263+
parts=[ToolCallPart(output_tool.name, response_args)],
264+
model_name=self._model_name,
265+
id=None,
266+
finish_reason=None,
267+
)
247268

248269

249270
@dataclass

0 commit comments

Comments
 (0)