Skip to content

Commit 0024412

Browse files
authored
feat(openai): minimal and verbosity (#32455)
1 parent 6727d6e commit 0024412

File tree

9 files changed

+354
-55
lines changed

9 files changed

+354
-55
lines changed

libs/langchain/tests/unit_tests/chat_models/test_base.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,32 @@ def test_init_unknown_provider() -> None:
6868
clear=True,
6969
)
7070
def test_configurable() -> None:
71+
"""Test configurable chat model behavior without default parameters.
72+
73+
Verifies that a configurable chat model initialized without default parameters:
74+
- Has access to all standard runnable methods (``invoke``, ``stream``, etc.)
75+
- Blocks access to non-configurable methods until configuration is provided
76+
- Supports declarative operations (``bind_tools``) without mutating original model
77+
- Can chain declarative operations and configuration to access full functionality
78+
- Properly resolves to the configured model type when parameters are provided
79+
80+
Example:
81+
82+
.. python::
83+
84+
# This creates a configurable model without specifying which model
85+
model = init_chat_model()
86+
87+
# This will FAIL - no model specified yet
88+
model.get_num_tokens("hello") # AttributeError!
89+
90+
# This works - provides model at runtime
91+
response = model.invoke(
92+
"Hello",
93+
config={"configurable": {"model": "gpt-4o"}}
94+
)
95+
96+
"""
7197
model = init_chat_model()
7298

7399
for method in (
@@ -125,6 +151,7 @@ def test_configurable() -> None:
125151
"presence_penalty": None,
126152
"reasoning": None,
127153
"reasoning_effort": None,
154+
"verbosity": None,
128155
"frequency_penalty": None,
129156
"include": None,
130157
"seed": None,
@@ -170,6 +197,32 @@ def test_configurable() -> None:
170197
clear=True,
171198
)
172199
def test_configurable_with_default() -> None:
200+
"""Test configurable chat model behavior with default parameters.
201+
202+
Verifies that a configurable chat model initialized with default parameters:
203+
- Has access to all standard runnable methods (``invoke``, ``stream``, etc.)
204+
- Provides immediate access to non-configurable methods (e.g. ``get_num_tokens``)
205+
- Supports model switching through runtime configuration using ``config_prefix``
206+
- Maintains proper model identity and attributes when reconfigured
207+
- Can be used in chains with different model providers via configuration
208+
209+
Example:
210+
211+
.. python::
212+
213+
# This creates a configurable model with default parameters (model)
214+
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
215+
216+
# This works immediately - uses default gpt-4o
217+
tokens = model.get_num_tokens("hello")
218+
219+
# This also works - switches to Claude at runtime
220+
response = model.invoke(
221+
"Hello",
222+
config={"configurable": {"my_model_model": "claude-3-sonnet-20240229"}}
223+
)
224+
225+
""" # noqa: E501
173226
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
174227
for method in (
175228
"invoke",

libs/langchain_v1/tests/unit_tests/chat_models/test_chat_models.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,32 @@ def test_init_unknown_provider() -> None:
6868
clear=True,
6969
)
7070
def test_configurable() -> None:
71+
"""Test configurable chat model behavior without default parameters.
72+
73+
Verifies that a configurable chat model initialized without default parameters:
74+
- Has access to all standard runnable methods (``invoke``, ``stream``, etc.)
75+
- Blocks access to non-configurable methods until configuration is provided
76+
- Supports declarative operations (``bind_tools``) without mutating original model
77+
- Can chain declarative operations and configuration to access full functionality
78+
- Properly resolves to the configured model type when parameters are provided
79+
80+
Example:
81+
82+
.. python::
83+
84+
# This creates a configurable model without specifying which model
85+
model = init_chat_model()
86+
87+
# This will FAIL - no model specified yet
88+
model.get_num_tokens("hello") # AttributeError!
89+
90+
# This works - provides model at runtime
91+
response = model.invoke(
92+
"Hello",
93+
config={"configurable": {"model": "gpt-4o"}}
94+
)
95+
96+
"""
7197
model = init_chat_model()
7298

7399
for method in (
@@ -125,6 +151,7 @@ def test_configurable() -> None:
125151
"presence_penalty": None,
126152
"reasoning": None,
127153
"reasoning_effort": None,
154+
"verbosity": None,
128155
"frequency_penalty": None,
129156
"include": None,
130157
"seed": None,
@@ -170,6 +197,32 @@ def test_configurable() -> None:
170197
clear=True,
171198
)
172199
def test_configurable_with_default() -> None:
200+
"""Test configurable chat model behavior with default parameters.
201+
202+
Verifies that a configurable chat model initialized with default parameters:
203+
- Has access to all standard runnable methods (``invoke``, ``stream``, etc.)
204+
- Provides immediate access to non-configurable methods (e.g. ``get_num_tokens``)
205+
- Supports model switching through runtime configuration using ``config_prefix``
206+
- Maintains proper model identity and attributes when reconfigured
207+
- Can be used in chains with different model providers via configuration
208+
209+
Example:
210+
211+
.. python::
212+
213+
# This creates a configurable model with default parameters (model)
214+
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
215+
216+
# This works immediately - uses default gpt-4o
217+
tokens = model.get_num_tokens("hello")
218+
219+
# This also works - switches to Claude at runtime
220+
response = model.invoke(
221+
"Hello",
222+
config={"configurable": {"my_model_model": "claude-3-sonnet-20240229"}}
223+
)
224+
225+
""" # noqa: E501
173226
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
174227
for method in (
175228
"invoke",

libs/partners/openai/langchain_openai/chat_models/base.py

Lines changed: 43 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -458,8 +458,7 @@ class BaseChatOpenAI(BaseChatModel):
458458
alias="api_key", default_factory=secret_from_env("OPENAI_API_KEY", default=None)
459459
)
460460
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
461-
"""Base URL path for API requests, leave blank if not using a proxy or service
462-
emulator."""
461+
"""Base URL path for API requests, leave blank if not using a proxy or service emulator.""" # noqa: E501
463462
openai_organization: Optional[str] = Field(default=None, alias="organization")
464463
"""Automatically inferred from env var ``OPENAI_ORG_ID`` if not provided."""
465464
# to support explicit proxy for OpenAI
@@ -507,8 +506,9 @@ class BaseChatOpenAI(BaseChatModel):
507506
508507
Reasoning models only, like OpenAI o1, o3, and o4-mini.
509508
510-
Currently supported values are low, medium, and high. Reducing reasoning effort
511-
can result in faster responses and fewer tokens used on reasoning in a response.
509+
Currently supported values are ``'minimal'``, ``'low'``, ``'medium'``, and
510+
``'high'``. Reducing reasoning effort can result in faster responses and fewer
511+
tokens used on reasoning in a response.
512512
513513
.. versionadded:: 0.2.14
514514
"""
@@ -527,6 +527,17 @@ class BaseChatOpenAI(BaseChatModel):
527527
528528
.. versionadded:: 0.3.24
529529
530+
"""
531+
verbosity: Optional[str] = None
532+
"""Controls the verbosity level of responses for reasoning models. For use with the
533+
Responses API.
534+
535+
Currently supported values are ``'low'``, ``'medium'``, and ``'high'``.
536+
537+
Controls how detailed the model's responses are.
538+
539+
.. versionadded:: 0.3.28
540+
530541
"""
531542
tiktoken_model_name: Optional[str] = None
532543
"""The model name to pass to tiktoken when using this class.
@@ -654,6 +665,7 @@ class BaseChatOpenAI(BaseChatModel):
654665
llm = ChatOpenAI(
655666
model="o4-mini",
656667
use_responses_api=True,
668+
output_version="responses/v1",
657669
)
658670
llm.invoke([HumanMessage("How are you?")], previous_response_id="resp_123")
659671
@@ -701,10 +713,24 @@ def build_extra(cls, values: dict[str, Any]) -> Any:
701713
@model_validator(mode="before")
702714
@classmethod
703715
def validate_temperature(cls, values: dict[str, Any]) -> Any:
704-
"""Currently o1 models only allow temperature=1."""
716+
"""Validate temperature parameter for different models.
717+
718+
- o1 models only allow temperature=1
719+
- gpt-5 models only allow temperature=1 or unset (defaults to 1)
720+
"""
705721
model = values.get("model_name") or values.get("model") or ""
722+
723+
# For o1 models, set temperature=1 if not provided
706724
if model.startswith("o1") and "temperature" not in values:
707725
values["temperature"] = 1
726+
727+
# For gpt-5 models, handle temperature restrictions
728+
if model.startswith("gpt-5"):
729+
temperature = values.get("temperature")
730+
if temperature is not None and temperature != 1:
731+
# For gpt-5, only temperature=1 is supported, so remove non-defaults
732+
values.pop("temperature", None)
733+
708734
return values
709735

710736
@model_validator(mode="after")
@@ -805,6 +831,7 @@ def _default_params(self) -> dict[str, Any]:
805831
"temperature": self.temperature,
806832
"reasoning_effort": self.reasoning_effort,
807833
"reasoning": self.reasoning,
834+
"verbosity": self.verbosity,
808835
"include": self.include,
809836
"service_tier": self.service_tier,
810837
"truncation": self.truncation,
@@ -1178,6 +1205,7 @@ def _get_request_payload(
11781205
kwargs["stop"] = stop
11791206

11801207
payload = {**self._default_params, **kwargs}
1208+
11811209
if self._use_responses_api(payload):
11821210
if self.use_previous_response_id:
11831211
last_messages, previous_response_id = _get_last_messages(messages)
@@ -2366,7 +2394,11 @@ class GetPopulation(BaseModel):
23662394
23672395
from langchain_openai import ChatOpenAI
23682396
2369-
llm = ChatOpenAI(model="gpt-4.1-mini", use_responses_api=True)
2397+
llm = ChatOpenAI(
2398+
model="gpt-4.1-mini",
2399+
use_responses_api=True,
2400+
output_version="responses/v1",
2401+
)
23702402
response = llm.invoke("Hi, I'm Bob.")
23712403
response.text()
23722404
@@ -3486,6 +3518,11 @@ def _construct_responses_api_payload(
34863518
if "reasoning_effort" in payload and "reasoning" not in payload:
34873519
payload["reasoning"] = {"effort": payload.pop("reasoning_effort")}
34883520

3521+
# Remove temperature parameter for models that don't support it in responses API
3522+
model = payload.get("model", "")
3523+
if model.startswith("gpt-5"):
3524+
payload.pop("temperature", None)
3525+
34893526
payload["input"] = _construct_responses_api_input(messages)
34903527
if tools := payload.pop("tools", None):
34913528
new_tools: list = []

0 commit comments

Comments
 (0)