Skip to content

Commit ca45f8a

Browse files
committed
WIP build reasoning_details from ThinkingParts
1 parent ee93121 commit ca45f8a

File tree

2 files changed

+45
-42
lines changed

2 files changed

+45
-42
lines changed

pydantic_ai_slim/pydantic_ai/models/openrouter.py

Lines changed: 43 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ class BaseReasoningDetail(BaseModel):
243243
"""Common fields shared across all reasoning detail types."""
244244

245245
id: str | None = None
246-
format: Literal['unknown', 'openai-responses-v1', 'anthropic-claude-v1']
246+
format: Literal['unknown', 'openai-responses-v1', 'anthropic-claude-v1', 'xai-responses-v1']
247247
index: int | None
248248

249249

@@ -320,7 +320,7 @@ def _openrouter_settings_to_openai_settings(model_settings: OpenRouterModelSetti
320320
Returns:
321321
An 'OpenAIChatModelSettings' object with equivalent settings.
322322
"""
323-
extra_body = model_settings['extra_body']
323+
extra_body = model_settings.get('extra_body', {})
324324

325325
if models := model_settings.pop('openrouter_models', None):
326326
extra_body['models'] = models
@@ -386,39 +386,51 @@ def _process_response(self, response: ChatCompletion | str) -> ModelResponse:
386386
# This is done because 'super()._process_response' reads 'reasoning' to create a ThinkingPart.
387387
# but this method will also create a ThinkingPart using 'reasoning_details'; Delete 'reasoning' to avoid duplication
388388
if choice.message.reasoning is not None:
389-
setattr(response.choices[0].message, 'reasoning', None)
389+
delattr(response.choices[0].message, 'reasoning')
390390

391391
model_response = super()._process_response(response=response)
392392

393-
provider_details: dict[str, Any] = {}
393+
provider_details = model_response.provider_details or {}
394394
provider_details['downstream_provider'] = native_response.provider
395395
provider_details['native_finish_reason'] = choice.native_finish_reason
396396

397397
if reasoning_details := choice.message.reasoning_details:
398-
provider_details['reasoning_details'] = [detail.model_dump() for detail in reasoning_details]
399-
400398
reasoning = reasoning_details[0]
401399

402-
assert isinstance(model_response.parts, list)
400+
new_parts: list[ThinkingPart] = []
401+
403402
if isinstance(reasoning, ReasoningText):
404-
model_response.parts.insert(
405-
0,
403+
new_parts.append(
406404
ThinkingPart(
407405
id=reasoning.id,
408406
content=reasoning.text,
409407
signature=reasoning.signature,
410408
provider_name=native_response.provider,
411-
),
409+
)
412410
)
413411
elif isinstance(reasoning, ReasoningSummary):
414-
model_response.parts.insert(
415-
0,
412+
new_parts.append(
416413
ThinkingPart(
417414
id=reasoning.id,
418415
content=reasoning.summary,
419416
provider_name=native_response.provider,
420417
),
421418
)
419+
else:
420+
new_parts.append(
421+
ThinkingPart(
422+
id=reasoning.id,
423+
content='',
424+
signature=reasoning.data,
425+
provider_name=native_response.provider,
426+
),
427+
)
428+
429+
# TODO: Find a better way to store these attributes
430+
new_parts[0].openrouter_type = reasoning.type
431+
new_parts[0].openrouter_format = reasoning.format
432+
433+
model_response.parts = [*new_parts, *model_response.parts]
422434

423435
model_response.provider_details = provider_details
424436

@@ -430,8 +442,24 @@ async def _map_messages(self, messages: list[ModelMessage]) -> list[ChatCompleti
430442

431443
for message, openai_message in zip(messages, openai_messages):
432444
if isinstance(message, ModelResponse):
433-
provider_details = cast(dict[str, Any], message.provider_details)
434-
if reasoning_details := provider_details.get('reasoning_details', None): # pragma: lax no cover
435-
openai_message['reasoning_details'] = reasoning_details # type: ignore[reportGeneralTypeIssue]
445+
for part in message.parts:
446+
if isinstance(part, ThinkingPart):
447+
reasoning_detail: dict[str, Any] = {
448+
'type': part.openrouter_type,
449+
'id': part.id,
450+
'format': part.openrouter_format,
451+
'index': 0,
452+
}
453+
454+
match part.openrouter_type:
455+
case 'reasoning.summary':
456+
reasoning_detail['summary'] = part.content
457+
case 'reasoning.text':
458+
reasoning_detail['text'] = part.content
459+
reasoning_detail['signature'] = part.signature
460+
case 'reasoning.encrypted':
461+
reasoning_detail['data'] = part.signature
462+
463+
openai_message['reasoning_details'] = [reasoning_detail]
436464

437465
return openai_messages

tests/models/test_openrouter.py

Lines changed: 2 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -92,33 +92,8 @@ async def test_openrouter_with_reasoning(allow_model_requests: None, openrouter_
9292
Let me structure this information in a clear, friendly manner that addresses the user's question while inviting further interaction.\
9393
"""
9494
)
95-
assert response.provider_details is not None
96-
assert response.provider_details['reasoning_details'] == snapshot(
97-
[
98-
{
99-
'id': None,
100-
'format': 'unknown',
101-
'index': 0,
102-
'text': """\
103-
Let me process this query about who I am. First, I should consider what the user really wants to know - they're likely seeking to understand my identity and capabilities as an AI assistant.
104-
105-
I need to be clear and accurate about my nature. I'm a GLM large language model developed by Zhipu AI, not a human. This distinction is fundamental to our interaction.
106-
107-
Looking at my core functions, I should highlight my ability to engage in natural conversations, answer questions, and assist with various tasks. My training involves processing vast amounts of text data, which enables me to understand and generate human-like responses.
108-
109-
It's important to mention my commitment to being helpful, harmless, and honest. These principles guide my interactions and ensure I provide appropriate assistance.
110-
111-
I should also emphasize my continuous learning aspect. While I don't store personal data, I'm regularly updated to improve my capabilities and knowledge base.
112-
113-
The response should be welcoming and encourage further questions about specific areas where I can help. This creates an open dialogue and shows my willingness to assist with various topics.
114-
115-
Let me structure this information in a clear, friendly manner that addresses the user's question while inviting further interaction.\
116-
""",
117-
'type': 'reasoning.text',
118-
'signature': None,
119-
}
120-
]
121-
)
95+
assert thinking_part.openrouter_type == snapshot('reasoning.text')
96+
assert thinking_part.openrouter_format == snapshot('unknown')
12297

12398

12499
async def test_openrouter_errors_raised(allow_model_requests: None, openrouter_api_key: str) -> None:

0 commit comments

Comments
 (0)