diff --git a/pydantic_ai_slim/pydantic_ai/models/openai.py b/pydantic_ai_slim/pydantic_ai/models/openai.py index 6db3742320..73a05c9fde 100644 --- a/pydantic_ai_slim/pydantic_ai/models/openai.py +++ b/pydantic_ai_slim/pydantic_ai/models/openai.py @@ -722,6 +722,7 @@ class _MapModelResponseContext: texts: list[str] = field(default_factory=list) tool_calls: list[ChatCompletionMessageFunctionToolCallParam] = field(default_factory=list) + reasoning_texts: list[str] = field(default_factory=list) def map_assistant_message(self, message: ModelResponse) -> chat.ChatCompletionAssistantMessageParam: for item in message.parts: @@ -757,6 +758,9 @@ def _into_message_param(self) -> chat.ChatCompletionAssistantMessageParam: message_param['content'] = None if self.tool_calls: message_param['tool_calls'] = self.tool_calls + if self.reasoning_texts: + if reasoning_field := OpenAIModelProfile.from_profile(self._model.profile).openai_chat_reasoning_field: + message_param[reasoning_field] = '\n\n'.join(self.reasoning_texts) return message_param def _map_response_text_part(self, item: TextPart) -> None: @@ -773,11 +777,16 @@ def _map_response_thinking_part(self, item: ThinkingPart) -> None: This method serves as a hook that can be overridden by subclasses to implement custom logic for handling thinking parts. """ - # NOTE: DeepSeek `reasoning_content` field should NOT be sent back per https://api-docs.deepseek.com/guides/reasoning_model, - # but we currently just send it in `` tags anyway as we don't want DeepSeek-specific checks here. - # If you need this changed, please file an issue. - start_tag, end_tag = self._model.profile.thinking_tags - self.texts.append('\n'.join([start_tag, item.content, end_tag])) + profile = OpenAIModelProfile.from_profile(self._model.profile) + if profile.openai_chat_reasoning_field: + self.reasoning_texts.append(item.content) + else: + # Existing fallback logic + # NOTE: DeepSeek `reasoning_content` field should NOT be sent back per https://api-docs.deepseek.com/guides/reasoning_model, + # but we currently just send it in `` tags anyway as we don't want DeepSeek-specific checks here. + # If you need this changed, please file an issue. + start_tag, end_tag = self._model.profile.thinking_tags + self.texts.append('\n'.join([start_tag, item.content, end_tag])) def _map_response_tool_call_part(self, item: ToolCallPart) -> None: """Maps a `ToolCallPart` to the response context. diff --git a/pydantic_ai_slim/pydantic_ai/profiles/moonshotai.py b/pydantic_ai_slim/pydantic_ai/profiles/moonshotai.py index 9b4ffb4bc3..df45f2766d 100644 --- a/pydantic_ai_slim/pydantic_ai/profiles/moonshotai.py +++ b/pydantic_ai_slim/pydantic_ai/profiles/moonshotai.py @@ -1,8 +1,11 @@ from __future__ import annotations as _annotations -from . import ModelProfile +from .openai import OpenAIModelProfile -def moonshotai_model_profile(model_name: str) -> ModelProfile | None: +def moonshotai_model_profile(model_name: str) -> OpenAIModelProfile | None: """Get the model profile for a MoonshotAI model.""" - return ModelProfile(ignore_streamed_leading_whitespace=True) + return OpenAIModelProfile( + ignore_streamed_leading_whitespace=True, + openai_chat_reasoning_field='reasoning_content', + ) diff --git a/pydantic_ai_slim/pydantic_ai/profiles/openai.py b/pydantic_ai_slim/pydantic_ai/profiles/openai.py index a3cd83d3e8..2118fac18d 100644 --- a/pydantic_ai_slim/pydantic_ai/profiles/openai.py +++ b/pydantic_ai_slim/pydantic_ai/profiles/openai.py @@ -51,6 +51,13 @@ class OpenAIModelProfile(ModelProfile): See https://github.com/pydantic/pydantic-ai/issues/3245 for more details. """ + openai_chat_reasoning_field: str | None = None + """The field name to use for sending reasoning content back to the model. + + If set, reasoning content will be sent in this field of the request message + instead of being embedded in the content. + """ + def __post_init__(self): # pragma: no cover if not self.openai_supports_sampling_settings: warnings.warn( diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py index 004d91ec5a..11dd0742e7 100644 --- a/tests/models/test_openai.py +++ b/tests/models/test_openai.py @@ -3116,3 +3116,36 @@ async def test_cache_point_filtering_responses_model(): assert len(msg['content']) == 2 assert msg['content'][0]['text'] == 'text before' # type: ignore[reportUnknownArgumentType] assert msg['content'][1]['text'] == 'text after' # type: ignore[reportUnknownArgumentType] + + +async def test_openai_chat_reasoning_field(allow_model_requests: None): + """Test that reasoning content is sent in the configured field when `openai_chat_reasoning_field` is set.""" + # Mock a response that will trigger a thinking part in history + c = completion_message(ChatCompletionMessage(content='world', role='assistant')) + mock_client = MockOpenAI.create_mock(c) + + # Configure model with custom profile setting the reasoning field + profile = OpenAIModelProfile(openai_chat_reasoning_field='reasoning_content') + m = OpenAIChatModel('gpt-4o', provider=OpenAIProvider(openai_client=mock_client), profile=profile) + + # Create a history with a ThinkingPart + history = [ + ModelResponse( + parts=[ThinkingPart(content='thinking about it'), TextPart(content='hello')], + model_name='gpt-4o', + timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc), + provider_name='openai', + ) + ] + + agent = Agent(m) + await agent.run('next', message_history=history) + + # Verify the request sent to OpenAI has the extra field + kwargs = get_mock_chat_completion_kwargs(mock_client)[0] + messages = kwargs['messages'] + + # The history message (index 0) should have the reasoning_content field + assert messages[0]['role'] == 'assistant' + assert messages[0]['content'] == 'hello' + assert messages[0]['reasoning_content'] == 'thinking about it'