Skip to content

Commit 69a8476

Browse files
KludexDouweM
andauthored
Deprecate OpenAIModelProfile.openai_supports_sampling_settings (#2730)
Co-authored-by: Douwe Maan <[email protected]>
1 parent 61b077b commit 69a8476

File tree

2 files changed

+35
-23
lines changed

2 files changed

+35
-23
lines changed

pydantic_ai_slim/pydantic_ai/models/openai.py

Lines changed: 12 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -409,13 +409,6 @@ async def _completions_create(
409409
for setting in unsupported_model_settings:
410410
model_settings.pop(setting, None)
411411

412-
# TODO(Marcelo): Deprecate this in favor of `openai_unsupported_model_settings`.
413-
sampling_settings = (
414-
model_settings
415-
if OpenAIModelProfile.from_profile(self.profile).openai_supports_sampling_settings
416-
else OpenAIChatModelSettings()
417-
)
418-
419412
try:
420413
extra_headers = model_settings.get('extra_headers', {})
421414
extra_headers.setdefault('User-Agent', get_user_agent())
@@ -437,13 +430,13 @@ async def _completions_create(
437430
web_search_options=web_search_options or NOT_GIVEN,
438431
service_tier=model_settings.get('openai_service_tier', NOT_GIVEN),
439432
prediction=model_settings.get('openai_prediction', NOT_GIVEN),
440-
temperature=sampling_settings.get('temperature', NOT_GIVEN),
441-
top_p=sampling_settings.get('top_p', NOT_GIVEN),
442-
presence_penalty=sampling_settings.get('presence_penalty', NOT_GIVEN),
443-
frequency_penalty=sampling_settings.get('frequency_penalty', NOT_GIVEN),
444-
logit_bias=sampling_settings.get('logit_bias', NOT_GIVEN),
445-
logprobs=sampling_settings.get('openai_logprobs', NOT_GIVEN),
446-
top_logprobs=sampling_settings.get('openai_top_logprobs', NOT_GIVEN),
433+
temperature=model_settings.get('temperature', NOT_GIVEN),
434+
top_p=model_settings.get('top_p', NOT_GIVEN),
435+
presence_penalty=model_settings.get('presence_penalty', NOT_GIVEN),
436+
frequency_penalty=model_settings.get('frequency_penalty', NOT_GIVEN),
437+
logit_bias=model_settings.get('logit_bias', NOT_GIVEN),
438+
logprobs=model_settings.get('openai_logprobs', NOT_GIVEN),
439+
top_logprobs=model_settings.get('openai_top_logprobs', NOT_GIVEN),
447440
extra_headers=extra_headers,
448441
extra_body=model_settings.get('extra_body'),
449442
)
@@ -918,11 +911,9 @@ async def _responses_create(
918911
text = text or {}
919912
text['verbosity'] = verbosity
920913

921-
sampling_settings = (
922-
model_settings
923-
if OpenAIModelProfile.from_profile(self.profile).openai_supports_sampling_settings
924-
else OpenAIResponsesModelSettings()
925-
)
914+
unsupported_model_settings = OpenAIModelProfile.from_profile(self.profile).openai_unsupported_model_settings
915+
for setting in unsupported_model_settings:
916+
model_settings.pop(setting, None)
926917

927918
try:
928919
extra_headers = model_settings.get('extra_headers', {})
@@ -936,8 +927,8 @@ async def _responses_create(
936927
tool_choice=tool_choice or NOT_GIVEN,
937928
max_output_tokens=model_settings.get('max_tokens', NOT_GIVEN),
938929
stream=stream,
939-
temperature=sampling_settings.get('temperature', NOT_GIVEN),
940-
top_p=sampling_settings.get('top_p', NOT_GIVEN),
930+
temperature=model_settings.get('temperature', NOT_GIVEN),
931+
top_p=model_settings.get('top_p', NOT_GIVEN),
941932
truncation=model_settings.get('openai_truncation', NOT_GIVEN),
942933
timeout=model_settings.get('timeout', NOT_GIVEN),
943934
service_tier=model_settings.get('openai_service_tier', NOT_GIVEN),

pydantic_ai_slim/pydantic_ai/profiles/openai.py

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from __future__ import annotations as _annotations
22

33
import re
4+
import warnings
45
from collections.abc import Sequence
56
from dataclasses import dataclass
67
from typing import Any, Literal
@@ -21,7 +22,6 @@ class OpenAIModelProfile(ModelProfile):
2122
openai_supports_strict_tool_definition: bool = True
2223
"""This can be set by a provider or user if the OpenAI-"compatible" API doesn't support strict tool definitions."""
2324

24-
# TODO(Marcelo): Deprecate this in favor of `openai_unsupported_model_settings`.
2525
openai_supports_sampling_settings: bool = True
2626
"""Turn off to don't send sampling settings like `temperature` and `top_p` to models that don't support them, like OpenAI's o-series reasoning models."""
2727

@@ -38,6 +38,14 @@ class OpenAIModelProfile(ModelProfile):
3838
openai_system_prompt_role: OpenAISystemPromptRole | None = None
3939
"""The role to use for the system prompt message. If not provided, defaults to `'system'`."""
4040

41+
def __post_init__(self): # pragma: no cover
42+
if not self.openai_supports_sampling_settings:
43+
warnings.warn(
44+
'The `openai_supports_sampling_settings` has no effect, and it will be removed in future versions. '
45+
'Use `openai_unsupported_model_settings` instead.',
46+
DeprecationWarning,
47+
)
48+
4149

4250
def openai_model_profile(model_name: str) -> ModelProfile:
4351
"""Get the model profile for an OpenAI model."""
@@ -46,6 +54,19 @@ def openai_model_profile(model_name: str) -> ModelProfile:
4654
# We leave it in here for all models because the `default_structured_output_mode` is `'tool'`, so `native` is only used
4755
# when the user specifically uses the `NativeOutput` marker, so an error from the API is acceptable.
4856

57+
if is_reasoning_model:
58+
openai_unsupported_model_settings = (
59+
'temperature',
60+
'top_p',
61+
'presence_penalty',
62+
'frequency_penalty',
63+
'logit_bias',
64+
'logprobs',
65+
'top_logprobs',
66+
)
67+
else:
68+
openai_unsupported_model_settings = ()
69+
4970
# The o1-mini model doesn't support the `system` role, so we default to `user`.
5071
# See https://github.com/pydantic/pydantic-ai/issues/974 for more details.
5172
openai_system_prompt_role = 'user' if model_name.startswith('o1-mini') else None
@@ -54,7 +75,7 @@ def openai_model_profile(model_name: str) -> ModelProfile:
5475
json_schema_transformer=OpenAIJsonSchemaTransformer,
5576
supports_json_schema_output=True,
5677
supports_json_object_output=True,
57-
openai_supports_sampling_settings=not is_reasoning_model,
78+
openai_unsupported_model_settings=openai_unsupported_model_settings,
5879
openai_system_prompt_role=openai_system_prompt_role,
5980
)
6081

0 commit comments

Comments
 (0)