Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 14 additions & 5 deletions pydantic_ai_slim/pydantic_ai/models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -722,6 +722,7 @@ class _MapModelResponseContext:

texts: list[str] = field(default_factory=list)
tool_calls: list[ChatCompletionMessageFunctionToolCallParam] = field(default_factory=list)
reasoning_texts: list[str] = field(default_factory=list)

def map_assistant_message(self, message: ModelResponse) -> chat.ChatCompletionAssistantMessageParam:
for item in message.parts:
Expand Down Expand Up @@ -757,6 +758,9 @@ def _into_message_param(self) -> chat.ChatCompletionAssistantMessageParam:
message_param['content'] = None
if self.tool_calls:
message_param['tool_calls'] = self.tool_calls
if self.reasoning_texts:
if reasoning_field := OpenAIModelProfile.from_profile(self._model.profile).openai_chat_reasoning_field:
message_param[reasoning_field] = '\n\n'.join(self.reasoning_texts)
return message_param

def _map_response_text_part(self, item: TextPart) -> None:
Expand All @@ -773,11 +777,16 @@ def _map_response_thinking_part(self, item: ThinkingPart) -> None:
This method serves as a hook that can be overridden by subclasses
to implement custom logic for handling thinking parts.
"""
# NOTE: DeepSeek `reasoning_content` field should NOT be sent back per https://api-docs.deepseek.com/guides/reasoning_model,
# but we currently just send it in `<think>` tags anyway as we don't want DeepSeek-specific checks here.
# If you need this changed, please file an issue.
start_tag, end_tag = self._model.profile.thinking_tags
self.texts.append('\n'.join([start_tag, item.content, end_tag]))
profile = OpenAIModelProfile.from_profile(self._model.profile)
if profile.openai_chat_reasoning_field:
self.reasoning_texts.append(item.content)
else:
# Existing fallback logic
# NOTE: DeepSeek `reasoning_content` field should NOT be sent back per https://api-docs.deepseek.com/guides/reasoning_model,
# but we currently just send it in `<think>` tags anyway as we don't want DeepSeek-specific checks here.
# If you need this changed, please file an issue.
start_tag, end_tag = self._model.profile.thinking_tags
self.texts.append('\n'.join([start_tag, item.content, end_tag]))

def _map_response_tool_call_part(self, item: ToolCallPart) -> None:
"""Maps a `ToolCallPart` to the response context.
Expand Down
9 changes: 6 additions & 3 deletions pydantic_ai_slim/pydantic_ai/profiles/moonshotai.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
from __future__ import annotations as _annotations

from . import ModelProfile
from .openai import OpenAIModelProfile


def moonshotai_model_profile(model_name: str) -> ModelProfile | None:
def moonshotai_model_profile(model_name: str) -> OpenAIModelProfile | None:
"""Get the model profile for a MoonshotAI model."""
return ModelProfile(ignore_streamed_leading_whitespace=True)
return OpenAIModelProfile(
ignore_streamed_leading_whitespace=True,
openai_chat_reasoning_field='reasoning_content',
)
7 changes: 7 additions & 0 deletions pydantic_ai_slim/pydantic_ai/profiles/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,13 @@ class OpenAIModelProfile(ModelProfile):
See https://github.com/pydantic/pydantic-ai/issues/3245 for more details.
"""

openai_chat_reasoning_field: str | None = None
"""The field name to use for sending reasoning content back to the model.

If set, reasoning content will be sent in this field of the request message
instead of being embedded in the content.
"""

def __post_init__(self): # pragma: no cover
if not self.openai_supports_sampling_settings:
warnings.warn(
Expand Down
33 changes: 33 additions & 0 deletions tests/models/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -3116,3 +3116,36 @@ async def test_cache_point_filtering_responses_model():
assert len(msg['content']) == 2
assert msg['content'][0]['text'] == 'text before' # type: ignore[reportUnknownArgumentType]
assert msg['content'][1]['text'] == 'text after' # type: ignore[reportUnknownArgumentType]


async def test_openai_chat_reasoning_field(allow_model_requests: None):
"""Test that reasoning content is sent in the configured field when `openai_chat_reasoning_field` is set."""
# Mock a response that will trigger a thinking part in history
c = completion_message(ChatCompletionMessage(content='world', role='assistant'))
mock_client = MockOpenAI.create_mock(c)

# Configure model with custom profile setting the reasoning field
profile = OpenAIModelProfile(openai_chat_reasoning_field='reasoning_content')
m = OpenAIChatModel('gpt-4o', provider=OpenAIProvider(openai_client=mock_client), profile=profile)

# Create a history with a ThinkingPart
history = [
ModelResponse(
parts=[ThinkingPart(content='thinking about it'), TextPart(content='hello')],
model_name='gpt-4o',
timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc),
provider_name='openai',
)
]

agent = Agent(m)
await agent.run('next', message_history=history)

# Verify the request sent to OpenAI has the extra field
kwargs = get_mock_chat_completion_kwargs(mock_client)[0]
messages = kwargs['messages']

# The history message (index 0) should have the reasoning_content field
assert messages[0]['role'] == 'assistant'
assert messages[0]['content'] == 'hello'
assert messages[0]['reasoning_content'] == 'thinking about it'
Loading