Skip to content

Commit 0bf223d

Browse files
authored
openai[patch]: add attribute to always use previous_response_id (#31734)
1 parent b02bd67 commit 0bf223d

File tree

3 files changed

+170
-1
lines changed

3 files changed

+170
-1
lines changed

libs/langchain/tests/unit_tests/chat_models/test_base.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ def test_configurable() -> None:
138138
"extra_body": None,
139139
"include_response_headers": False,
140140
"stream_usage": False,
141+
"use_previous_response_id": False,
141142
"use_responses_api": None,
142143
},
143144
"kwargs": {

libs/partners/openai/langchain_openai/chat_models/base.py

Lines changed: 68 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -607,6 +607,40 @@ class BaseChatOpenAI(BaseChatModel):
607607
.. versionadded:: 0.3.24
608608
"""
609609

610+
use_previous_response_id: bool = False
611+
"""If True, always pass ``previous_response_id`` using the ID of the most recent
612+
response. Responses API only.
613+
614+
Input messages up to the most recent response will be dropped from request
615+
payloads.
616+
617+
For example, the following two are equivalent:
618+
619+
.. code-block:: python
620+
621+
llm = ChatOpenAI(
622+
model="o4-mini",
623+
use_previous_response_id=True,
624+
)
625+
llm.invoke(
626+
[
627+
HumanMessage("Hello"),
628+
AIMessage("Hi there!", response_metadata={"id": "resp_123"}),
629+
HumanMessage("How are you?"),
630+
]
631+
)
632+
633+
.. code-block:: python
634+
635+
llm = ChatOpenAI(
636+
model="o4-mini",
637+
use_responses_api=True,
638+
)
639+
llm.invoke([HumanMessage("How are you?")], previous_response_id="resp_123")
640+
641+
.. versionadded:: 0.3.26
642+
"""
643+
610644
use_responses_api: Optional[bool] = None
611645
"""Whether to use the Responses API instead of the Chat API.
612646
@@ -1081,6 +1115,8 @@ def _use_responses_api(self, payload: dict) -> bool:
10811115
return True
10821116
elif self.truncation is not None:
10831117
return True
1118+
elif self.use_previous_response_id:
1119+
return True
10841120
else:
10851121
return _use_responses_api(payload)
10861122

@@ -1097,7 +1133,14 @@ def _get_request_payload(
10971133

10981134
payload = {**self._default_params, **kwargs}
10991135
if self._use_responses_api(payload):
1100-
payload = _construct_responses_api_payload(messages, payload)
1136+
if self.use_previous_response_id:
1137+
last_messages, previous_response_id = _get_last_messages(messages)
1138+
payload_to_use = last_messages if previous_response_id else messages
1139+
if previous_response_id:
1140+
payload["previous_response_id"] = previous_response_id
1141+
payload = _construct_responses_api_payload(payload_to_use, payload)
1142+
else:
1143+
payload = _construct_responses_api_payload(messages, payload)
11011144
else:
11021145
payload["messages"] = [_convert_message_to_dict(m) for m in messages]
11031146
return payload
@@ -3202,6 +3245,30 @@ def _use_responses_api(payload: dict) -> bool:
32023245
return bool(uses_builtin_tools or responses_only_args.intersection(payload))
32033246

32043247

3248+
def _get_last_messages(
3249+
messages: Sequence[BaseMessage],
3250+
) -> tuple[Sequence[BaseMessage], Optional[str]]:
3251+
"""
3252+
Return
3253+
1. Every message after the most-recent AIMessage that has a non-empty
3254+
``response_metadata["id"]`` (may be an empty list),
3255+
2. That id.
3256+
3257+
If the most-recent AIMessage does not have an id (or there is no
3258+
AIMessage at all) the entire conversation is returned together with ``None``.
3259+
"""
3260+
for i in range(len(messages) - 1, -1, -1):
3261+
msg = messages[i]
3262+
if isinstance(msg, AIMessage):
3263+
response_id = msg.response_metadata.get("id")
3264+
if response_id:
3265+
return messages[i + 1 :], response_id
3266+
else:
3267+
return messages, None
3268+
3269+
return messages, None
3270+
3271+
32053272
def _construct_responses_api_payload(
32063273
messages: Sequence[BaseMessage], payload: dict
32073274
) -> dict:

libs/partners/openai/tests/unit_tests/chat_models/test_base.py

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
from langchain_core.messages import (
1313
AIMessage,
1414
AIMessageChunk,
15+
BaseMessage,
1516
FunctionMessage,
1617
HumanMessage,
1718
InvalidToolCall,
@@ -59,6 +60,7 @@
5960
_convert_to_openai_response_format,
6061
_create_usage_metadata,
6162
_format_message_content,
63+
_get_last_messages,
6264
_oai_structured_outputs_parser,
6365
)
6466

@@ -2151,3 +2153,102 @@ def test_compat() -> None:
21512153
message_v03_output = _convert_to_v03_ai_message(message)
21522154
assert message_v03_output == message_v03
21532155
assert message_v03_output is not message_v03
2156+
2157+
2158+
def test_get_last_messages() -> None:
2159+
messages: list[BaseMessage] = [HumanMessage("Hello")]
2160+
last_messages, previous_response_id = _get_last_messages(messages)
2161+
assert last_messages == [HumanMessage("Hello")]
2162+
assert previous_response_id is None
2163+
2164+
messages = [
2165+
HumanMessage("Hello"),
2166+
AIMessage("Hi there!", response_metadata={"id": "resp_123"}),
2167+
HumanMessage("How are you?"),
2168+
]
2169+
2170+
last_messages, previous_response_id = _get_last_messages(messages)
2171+
assert last_messages == [HumanMessage("How are you?")]
2172+
assert previous_response_id == "resp_123"
2173+
2174+
messages = [
2175+
HumanMessage("Hello"),
2176+
AIMessage("Hi there!", response_metadata={"id": "resp_123"}),
2177+
HumanMessage("How are you?"),
2178+
AIMessage("Well thanks.", response_metadata={"id": "resp_456"}),
2179+
HumanMessage("Great."),
2180+
]
2181+
last_messages, previous_response_id = _get_last_messages(messages)
2182+
assert last_messages == [HumanMessage("Great.")]
2183+
assert previous_response_id == "resp_456"
2184+
2185+
messages = [
2186+
HumanMessage("Hello"),
2187+
AIMessage("Hi there!", response_metadata={"id": "resp_123"}),
2188+
HumanMessage("What's the weather?"),
2189+
AIMessage(
2190+
"",
2191+
response_metadata={"id": "resp_456"},
2192+
tool_calls=[
2193+
{
2194+
"type": "tool_call",
2195+
"name": "get_weather",
2196+
"id": "call_123",
2197+
"args": {"location": "San Francisco"},
2198+
}
2199+
],
2200+
),
2201+
ToolMessage("It's sunny.", tool_call_id="call_123"),
2202+
]
2203+
last_messages, previous_response_id = _get_last_messages(messages)
2204+
assert last_messages == [ToolMessage("It's sunny.", tool_call_id="call_123")]
2205+
assert previous_response_id == "resp_456"
2206+
2207+
messages = [
2208+
HumanMessage("Hello"),
2209+
AIMessage("Hi there!", response_metadata={"id": "resp_123"}),
2210+
HumanMessage("How are you?"),
2211+
AIMessage("Well thanks.", response_metadata={"id": "resp_456"}),
2212+
HumanMessage("Good."),
2213+
HumanMessage("Great."),
2214+
]
2215+
last_messages, previous_response_id = _get_last_messages(messages)
2216+
assert last_messages == [HumanMessage("Good."), HumanMessage("Great.")]
2217+
assert previous_response_id == "resp_456"
2218+
2219+
messages = [
2220+
HumanMessage("Hello"),
2221+
AIMessage("Hi there!", response_metadata={"id": "resp_123"}),
2222+
]
2223+
last_messages, response_id = _get_last_messages(messages)
2224+
assert last_messages == []
2225+
assert response_id == "resp_123"
2226+
2227+
2228+
def test_get_request_payload_use_previous_response_id() -> None:
2229+
# Default - don't use previous_response ID
2230+
llm = ChatOpenAI(model="o4-mini", use_responses_api=True)
2231+
messages = [
2232+
HumanMessage("Hello"),
2233+
AIMessage("Hi there!", response_metadata={"id": "resp_123"}),
2234+
HumanMessage("How are you?"),
2235+
]
2236+
payload = llm._get_request_payload(messages)
2237+
assert "previous_response_id" not in payload
2238+
assert len(payload["input"]) == 3
2239+
2240+
# Use previous response ID
2241+
llm = ChatOpenAI(
2242+
model="o4-mini",
2243+
# Specifying use_previous_response_id automatically engages Responses API
2244+
use_previous_response_id=True,
2245+
)
2246+
payload = llm._get_request_payload(messages)
2247+
assert payload["previous_response_id"] == "resp_123"
2248+
assert len(payload["input"]) == 1
2249+
2250+
# Check single message
2251+
messages = [HumanMessage("Hello")]
2252+
payload = llm._get_request_payload(messages)
2253+
assert "previous_response_id" not in payload
2254+
assert len(payload["input"]) == 1

0 commit comments

Comments
 (0)