@@ -2278,11 +2278,23 @@ class GetPopulation(BaseModel):
2278
2278
`docs <https://python.langchain.com/docs/integrations/chat/openai/>`_ for more
2279
2279
detail.
2280
2280
2281
+ .. note::
2282
+ ``langchain-openai >= 0.3.26`` allows users to opt-in to an updated
2283
+ AIMessage format when using the Responses API. Setting
2284
+
2285
+ .. code-block:: python
2286
+
2287
+ llm = ChatOpenAI(model="...", output_version="responses/v1")
2288
+
2289
+ will format output from reasoning summaries, built-in tool invocations, and
2290
+ other response items into the message's ``content`` field, rather than
2291
+ ``additional_kwargs``. We recommend this format for new applications.
2292
+
2281
2293
.. code-block:: python
2282
2294
2283
2295
from langchain_openai import ChatOpenAI
2284
2296
2285
- llm = ChatOpenAI(model="gpt-4o -mini")
2297
+ llm = ChatOpenAI(model="gpt-4.1 -mini", output_version="responses/v1 ")
2286
2298
2287
2299
tool = {"type": "web_search_preview"}
2288
2300
llm_with_tools = llm.bind_tools([tool])
@@ -2323,7 +2335,7 @@ class GetPopulation(BaseModel):
2323
2335
2324
2336
from langchain_openai import ChatOpenAI
2325
2337
2326
- llm = ChatOpenAI(model="gpt-4o -mini", use_responses_api=True)
2338
+ llm = ChatOpenAI(model="gpt-4.1 -mini", use_responses_api=True)
2327
2339
response = llm.invoke("Hi, I'm Bob.")
2328
2340
response.text()
2329
2341
@@ -2342,11 +2354,34 @@ class GetPopulation(BaseModel):
2342
2354
2343
2355
"Your name is Bob. How can I help you today, Bob?"
2344
2356
2357
+ .. versionadded:: 0.3.26
2358
+
2359
+ You can also initialize ChatOpenAI with :attr:`use_previous_response_id`.
2360
+ Input messages up to the most recent response will then be dropped from request
2361
+ payloads, and ``previous_response_id`` will be set using the ID of the most
2362
+ recent response.
2363
+
2364
+ .. code-block:: python
2365
+
2366
+ llm = ChatOpenAI(model="gpt-4.1-mini", use_previous_response_id=True)
2367
+
2345
2368
.. dropdown:: Reasoning output
2346
2369
2347
2370
OpenAI's Responses API supports `reasoning models <https://platform.openai.com/docs/guides/reasoning?api-mode=responses>`_
2348
2371
that expose a summary of internal reasoning processes.
2349
2372
2373
+ .. note::
2374
+ ``langchain-openai >= 0.3.26`` allows users to opt-in to an updated
2375
+ AIMessage format when using the Responses API. Setting
2376
+
2377
+ .. code-block:: python
2378
+
2379
+ llm = ChatOpenAI(model="...", output_version="responses/v1")
2380
+
2381
+ will format output from reasoning summaries, built-in tool invocations, and
2382
+ other response items into the message's ``content`` field, rather than
2383
+ ``additional_kwargs``. We recommend this format for new applications.
2384
+
2350
2385
.. code-block:: python
2351
2386
2352
2387
from langchain_openai import ChatOpenAI
@@ -2357,24 +2392,23 @@ class GetPopulation(BaseModel):
2357
2392
}
2358
2393
2359
2394
llm = ChatOpenAI(
2360
- model="o4-mini", use_responses_api=True, model_kwargs={"reasoning": reasoning}
2395
+ model="o4-mini", reasoning=reasoning, output_version="responses/v1"
2361
2396
)
2362
2397
response = llm.invoke("What is 3^3?")
2363
2398
2399
+ # Response text
2364
2400
print(f"Output: {response.text()}")
2365
- print(f"Reasoning: {response.additional_kwargs['reasoning']}")
2366
2401
2367
- .. code-block:: none
2402
+ # Reasoning summaries
2403
+ for block in response.content:
2404
+ if block["type"] == "reasoning":
2405
+ for summary in block["summary"]:
2406
+ print(summary["text"])
2368
2407
2369
- Output: 3^3 = 27.
2408
+ .. code-block:: none
2370
2409
2371
- Reasoning: {
2372
- 'id': 'rs_67fffc44b1c08191b6ca9bead6d832590433145b1786f809',
2373
- 'summary': [
2374
- {'text': 'The user wants to know...', 'type': 'summary_text'}
2375
- ],
2376
- 'type': 'reasoning'
2377
- }
2410
+ Output: 3³ = 27
2411
+ Reasoning: The user wants to know...
2378
2412
2379
2413
.. dropdown:: Structured output
2380
2414
0 commit comments