@@ -458,8 +458,7 @@ class BaseChatOpenAI(BaseChatModel):
458
458
alias = "api_key" , default_factory = secret_from_env ("OPENAI_API_KEY" , default = None )
459
459
)
460
460
openai_api_base : Optional [str ] = Field (default = None , alias = "base_url" )
461
- """Base URL path for API requests, leave blank if not using a proxy or service
462
- emulator."""
461
+ """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" # noqa: E501
463
462
openai_organization : Optional [str ] = Field (default = None , alias = "organization" )
464
463
"""Automatically inferred from env var ``OPENAI_ORG_ID`` if not provided."""
465
464
# to support explicit proxy for OpenAI
@@ -507,8 +506,9 @@ class BaseChatOpenAI(BaseChatModel):
507
506
508
507
Reasoning models only, like OpenAI o1, o3, and o4-mini.
509
508
510
- Currently supported values are low, medium, and high. Reducing reasoning effort
511
- can result in faster responses and fewer tokens used on reasoning in a response.
509
+ Currently supported values are ``'minimal'``, ``'low'``, ``'medium'``, and
510
+ ``'high'``. Reducing reasoning effort can result in faster responses and fewer
511
+ tokens used on reasoning in a response.
512
512
513
513
.. versionadded:: 0.2.14
514
514
"""
@@ -527,6 +527,17 @@ class BaseChatOpenAI(BaseChatModel):
527
527
528
528
.. versionadded:: 0.3.24
529
529
530
+ """
531
+ verbosity : Optional [str ] = None
532
+ """Controls the verbosity level of responses for reasoning models. For use with the
533
+ Responses API.
534
+
535
+ Currently supported values are ``'low'``, ``'medium'``, and ``'high'``.
536
+
537
+ Controls how detailed the model's responses are.
538
+
539
+ .. versionadded:: 0.3.28
540
+
530
541
"""
531
542
tiktoken_model_name : Optional [str ] = None
532
543
"""The model name to pass to tiktoken when using this class.
@@ -654,6 +665,7 @@ class BaseChatOpenAI(BaseChatModel):
654
665
llm = ChatOpenAI(
655
666
model="o4-mini",
656
667
use_responses_api=True,
668
+ output_version="responses/v1",
657
669
)
658
670
llm.invoke([HumanMessage("How are you?")], previous_response_id="resp_123")
659
671
@@ -701,10 +713,24 @@ def build_extra(cls, values: dict[str, Any]) -> Any:
701
713
@model_validator (mode = "before" )
702
714
@classmethod
703
715
def validate_temperature (cls , values : dict [str , Any ]) -> Any :
704
- """Currently o1 models only allow temperature=1."""
716
+ """Validate temperature parameter for different models.
717
+
718
+ - o1 models only allow temperature=1
719
+ - gpt-5 models only allow temperature=1 or unset (defaults to 1)
720
+ """
705
721
model = values .get ("model_name" ) or values .get ("model" ) or ""
722
+
723
+ # For o1 models, set temperature=1 if not provided
706
724
if model .startswith ("o1" ) and "temperature" not in values :
707
725
values ["temperature" ] = 1
726
+
727
+ # For gpt-5 models, handle temperature restrictions
728
+ if model .startswith ("gpt-5" ):
729
+ temperature = values .get ("temperature" )
730
+ if temperature is not None and temperature != 1 :
731
+ # For gpt-5, only temperature=1 is supported, so remove non-defaults
732
+ values .pop ("temperature" , None )
733
+
708
734
return values
709
735
710
736
@model_validator (mode = "after" )
@@ -805,6 +831,7 @@ def _default_params(self) -> dict[str, Any]:
805
831
"temperature" : self .temperature ,
806
832
"reasoning_effort" : self .reasoning_effort ,
807
833
"reasoning" : self .reasoning ,
834
+ "verbosity" : self .verbosity ,
808
835
"include" : self .include ,
809
836
"service_tier" : self .service_tier ,
810
837
"truncation" : self .truncation ,
@@ -1178,6 +1205,7 @@ def _get_request_payload(
1178
1205
kwargs ["stop" ] = stop
1179
1206
1180
1207
payload = {** self ._default_params , ** kwargs }
1208
+
1181
1209
if self ._use_responses_api (payload ):
1182
1210
if self .use_previous_response_id :
1183
1211
last_messages , previous_response_id = _get_last_messages (messages )
@@ -2366,7 +2394,11 @@ class GetPopulation(BaseModel):
2366
2394
2367
2395
from langchain_openai import ChatOpenAI
2368
2396
2369
- llm = ChatOpenAI(model="gpt-4.1-mini", use_responses_api=True)
2397
+ llm = ChatOpenAI(
2398
+ model="gpt-4.1-mini",
2399
+ use_responses_api=True,
2400
+ output_version="responses/v1",
2401
+ )
2370
2402
response = llm.invoke("Hi, I'm Bob.")
2371
2403
response.text()
2372
2404
@@ -3486,6 +3518,11 @@ def _construct_responses_api_payload(
3486
3518
if "reasoning_effort" in payload and "reasoning" not in payload :
3487
3519
payload ["reasoning" ] = {"effort" : payload .pop ("reasoning_effort" )}
3488
3520
3521
+ # Remove temperature parameter for models that don't support it in responses API
3522
+ model = payload .get ("model" , "" )
3523
+ if model .startswith ("gpt-5" ):
3524
+ payload .pop ("temperature" , None )
3525
+
3489
3526
payload ["input" ] = _construct_responses_api_input (messages )
3490
3527
if tools := payload .pop ("tools" , None ):
3491
3528
new_tools : list = []
0 commit comments