Skip to content

Commit c66fab0

Browse files
committed
Fix #1564 Add conversations API support
1 parent e8d311b commit c66fab0

17 files changed

+97
-22
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ requires-python = ">=3.9"
77
license = "MIT"
88
authors = [{ name = "OpenAI", email = "[email protected]" }]
99
dependencies = [
10-
"openai>=1.99.6,<2",
10+
"openai>=1.101.0,<2",
1111
"pydantic>=2.10, <3",
1212
"griffe>=1.5.6, <2",
1313
"typing-extensions>=4.12.2, <5",

src/agents/extensions/models/litellm_model.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,8 @@ async def get_response(
8282
output_schema: AgentOutputSchemaBase | None,
8383
handoffs: list[Handoff],
8484
tracing: ModelTracing,
85-
previous_response_id: str | None,
85+
previous_response_id: str | None = None, # unused
86+
conversation_id: str | None = None, # unused
8687
prompt: Any | None = None,
8788
) -> ModelResponse:
8889
with generation_span(
@@ -171,7 +172,8 @@ async def stream_response(
171172
output_schema: AgentOutputSchemaBase | None,
172173
handoffs: list[Handoff],
173174
tracing: ModelTracing,
174-
previous_response_id: str | None,
175+
previous_response_id: str | None = None, # unused
176+
conversation_id: str | None = None, # unused
175177
prompt: Any | None = None,
176178
) -> AsyncIterator[TResponseStreamEvent]:
177179
with generation_span(

src/agents/models/interface.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ async def get_response(
4848
tracing: ModelTracing,
4949
*,
5050
previous_response_id: str | None,
51+
conversation_id: str | None,
5152
prompt: ResponsePromptParam | None,
5253
) -> ModelResponse:
5354
"""Get a response from the model.
@@ -62,6 +63,7 @@ async def get_response(
6263
tracing: Tracing configuration.
6364
previous_response_id: the ID of the previous response. Generally not used by the model,
6465
except for the OpenAI Responses API.
66+
conversation_id: The ID of the stored conversation, if any.
6567
prompt: The prompt config to use for the model.
6668
6769
Returns:
@@ -81,6 +83,7 @@ def stream_response(
8183
tracing: ModelTracing,
8284
*,
8385
previous_response_id: str | None,
86+
conversation_id: str | None,
8487
prompt: ResponsePromptParam | None,
8588
) -> AsyncIterator[TResponseStreamEvent]:
8689
"""Stream a response from the model.
@@ -95,6 +98,7 @@ def stream_response(
9598
tracing: Tracing configuration.
9699
previous_response_id: the ID of the previous response. Generally not used by the model,
97100
except for the OpenAI Responses API.
101+
conversation_id: The ID of the stored conversation, if any.
98102
prompt: The prompt config to use for the model.
99103
100104
Returns:

src/agents/models/openai_chatcompletions.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,8 @@ async def get_response(
5555
output_schema: AgentOutputSchemaBase | None,
5656
handoffs: list[Handoff],
5757
tracing: ModelTracing,
58-
previous_response_id: str | None,
58+
previous_response_id: str | None = None, # unused
59+
conversation_id: str | None = None, # unused
5960
prompt: ResponsePromptParam | None = None,
6061
) -> ModelResponse:
6162
with generation_span(
@@ -142,7 +143,8 @@ async def stream_response(
142143
output_schema: AgentOutputSchemaBase | None,
143144
handoffs: list[Handoff],
144145
tracing: ModelTracing,
145-
previous_response_id: str | None,
146+
previous_response_id: str | None = None, # unused
147+
conversation_id: str | None = None, # unused
146148
prompt: ResponsePromptParam | None = None,
147149
) -> AsyncIterator[TResponseStreamEvent]:
148150
"""

src/agents/models/openai_responses.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,8 @@ async def get_response(
7575
output_schema: AgentOutputSchemaBase | None,
7676
handoffs: list[Handoff],
7777
tracing: ModelTracing,
78-
previous_response_id: str | None,
78+
previous_response_id: str | None = None,
79+
conversation_id: str | None = None,
7980
prompt: ResponsePromptParam | None = None,
8081
) -> ModelResponse:
8182
with response_span(disabled=tracing.is_disabled()) as span_response:
@@ -87,7 +88,8 @@ async def get_response(
8788
tools,
8889
output_schema,
8990
handoffs,
90-
previous_response_id,
91+
previous_response_id=previous_response_id,
92+
conversation_id=conversation_id,
9193
stream=False,
9294
prompt=prompt,
9395
)
@@ -150,7 +152,8 @@ async def stream_response(
150152
output_schema: AgentOutputSchemaBase | None,
151153
handoffs: list[Handoff],
152154
tracing: ModelTracing,
153-
previous_response_id: str | None,
155+
previous_response_id: str | None = None, # unused
156+
conversation_id: str | None = None, # unused
154157
prompt: ResponsePromptParam | None = None,
155158
) -> AsyncIterator[ResponseStreamEvent]:
156159
"""
@@ -165,7 +168,8 @@ async def stream_response(
165168
tools,
166169
output_schema,
167170
handoffs,
168-
previous_response_id,
171+
previous_response_id=previous_response_id,
172+
conversation_id=conversation_id,
169173
stream=True,
170174
prompt=prompt,
171175
)
@@ -203,6 +207,7 @@ async def _fetch_response(
203207
output_schema: AgentOutputSchemaBase | None,
204208
handoffs: list[Handoff],
205209
previous_response_id: str | None,
210+
conversation_id: str | None,
206211
stream: Literal[True],
207212
prompt: ResponsePromptParam | None = None,
208213
) -> AsyncStream[ResponseStreamEvent]: ...
@@ -217,6 +222,7 @@ async def _fetch_response(
217222
output_schema: AgentOutputSchemaBase | None,
218223
handoffs: list[Handoff],
219224
previous_response_id: str | None,
225+
conversation_id: str | None,
220226
stream: Literal[False],
221227
prompt: ResponsePromptParam | None = None,
222228
) -> Response: ...
@@ -229,7 +235,8 @@ async def _fetch_response(
229235
tools: list[Tool],
230236
output_schema: AgentOutputSchemaBase | None,
231237
handoffs: list[Handoff],
232-
previous_response_id: str | None,
238+
previous_response_id: str | None = None,
239+
conversation_id: str | None = None,
233240
stream: Literal[True] | Literal[False] = False,
234241
prompt: ResponsePromptParam | None = None,
235242
) -> Response | AsyncStream[ResponseStreamEvent]:
@@ -265,6 +272,7 @@ async def _fetch_response(
265272
f"Tool choice: {tool_choice}\n"
266273
f"Response format: {response_format}\n"
267274
f"Previous response id: {previous_response_id}\n"
275+
f"Conversation id: {conversation_id}\n"
268276
)
269277

270278
extra_args = dict(model_settings.extra_args or {})
@@ -278,6 +286,7 @@ async def _fetch_response(
278286

279287
return await self._client.responses.create(
280288
previous_response_id=self._non_null_or_not_given(previous_response_id),
289+
conversation=self._non_null_or_not_given(conversation_id),
281290
instructions=self._non_null_or_not_given(system_instructions),
282291
model=self.model,
283292
input=list_input,

src/agents/run.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,9 @@ class RunOptions(TypedDict, Generic[TContext]):
199199
previous_response_id: NotRequired[str | None]
200200
"""The ID of the previous response, if any."""
201201

202+
conversation_id: NotRequired[str | None]
203+
"""The ID of the stored conversation, if any."""
204+
202205
session: NotRequired[Session | None]
203206
"""The session for the run."""
204207

@@ -215,6 +218,7 @@ async def run(
215218
hooks: RunHooks[TContext] | None = None,
216219
run_config: RunConfig | None = None,
217220
previous_response_id: str | None = None,
221+
conversation_id: str | None = None,
218222
session: Session | None = None,
219223
) -> RunResult:
220224
"""Run a workflow starting at the given agent. The agent will run in a loop until a final
@@ -239,6 +243,7 @@ async def run(
239243
run_config: Global settings for the entire agent run.
240244
previous_response_id: The ID of the previous response, if using OpenAI models via the
241245
Responses API, this allows you to skip passing in input from the previous turn.
246+
conversation_id: The ID of the stored conversation, if any.
242247
Returns:
243248
A run result containing all the inputs, guardrail results and the output of the last
244249
agent. Agents may perform handoffs, so we don't know the specific type of the output.
@@ -252,6 +257,7 @@ async def run(
252257
hooks=hooks,
253258
run_config=run_config,
254259
previous_response_id=previous_response_id,
260+
conversation_id=conversation_id,
255261
session=session,
256262
)
257263

@@ -266,6 +272,7 @@ def run_sync(
266272
hooks: RunHooks[TContext] | None = None,
267273
run_config: RunConfig | None = None,
268274
previous_response_id: str | None = None,
275+
conversation_id: str | None = None,
269276
session: Session | None = None,
270277
) -> RunResult:
271278
"""Run a workflow synchronously, starting at the given agent. Note that this just wraps the
@@ -293,6 +300,7 @@ def run_sync(
293300
run_config: Global settings for the entire agent run.
294301
previous_response_id: The ID of the previous response, if using OpenAI models via the
295302
Responses API, this allows you to skip passing in input from the previous turn.
303+
conversation_id: The ID of the stored conversation, if any.
296304
Returns:
297305
A run result containing all the inputs, guardrail results and the output of the last
298306
agent. Agents may perform handoffs, so we don't know the specific type of the output.
@@ -306,6 +314,7 @@ def run_sync(
306314
hooks=hooks,
307315
run_config=run_config,
308316
previous_response_id=previous_response_id,
317+
conversation_id=conversation_id,
309318
session=session,
310319
)
311320

@@ -319,6 +328,7 @@ def run_streamed(
319328
hooks: RunHooks[TContext] | None = None,
320329
run_config: RunConfig | None = None,
321330
previous_response_id: str | None = None,
331+
conversation_id: str | None = None,
322332
session: Session | None = None,
323333
) -> RunResultStreaming:
324334
"""Run a workflow starting at the given agent in streaming mode. The returned result object
@@ -344,6 +354,7 @@ def run_streamed(
344354
run_config: Global settings for the entire agent run.
345355
previous_response_id: The ID of the previous response, if using OpenAI models via the
346356
Responses API, this allows you to skip passing in input from the previous turn.
357+
conversation_id: The ID of the stored conversation, if any.
347358
Returns:
348359
A result object that contains data about the run, as well as a method to stream events.
349360
"""
@@ -356,6 +367,7 @@ def run_streamed(
356367
hooks=hooks,
357368
run_config=run_config,
358369
previous_response_id=previous_response_id,
370+
conversation_id=conversation_id,
359371
session=session,
360372
)
361373

@@ -377,6 +389,7 @@ async def run(
377389
hooks = kwargs.get("hooks")
378390
run_config = kwargs.get("run_config")
379391
previous_response_id = kwargs.get("previous_response_id")
392+
conversation_id = kwargs.get("conversation_id")
380393
session = kwargs.get("session")
381394
if hooks is None:
382395
hooks = RunHooks[Any]()
@@ -469,6 +482,7 @@ async def run(
469482
should_run_agent_start_hooks=should_run_agent_start_hooks,
470483
tool_use_tracker=tool_use_tracker,
471484
previous_response_id=previous_response_id,
485+
conversation_id=conversation_id,
472486
),
473487
)
474488
else:
@@ -483,6 +497,7 @@ async def run(
483497
should_run_agent_start_hooks=should_run_agent_start_hooks,
484498
tool_use_tracker=tool_use_tracker,
485499
previous_response_id=previous_response_id,
500+
conversation_id=conversation_id,
486501
)
487502
should_run_agent_start_hooks = False
488503

@@ -549,6 +564,7 @@ def run_sync(
549564
hooks = kwargs.get("hooks")
550565
run_config = kwargs.get("run_config")
551566
previous_response_id = kwargs.get("previous_response_id")
567+
conversation_id = kwargs.get("conversation_id")
552568
session = kwargs.get("session")
553569

554570
return asyncio.get_event_loop().run_until_complete(
@@ -561,6 +577,7 @@ def run_sync(
561577
hooks=hooks,
562578
run_config=run_config,
563579
previous_response_id=previous_response_id,
580+
conversation_id=conversation_id,
564581
)
565582
)
566583

@@ -575,6 +592,7 @@ def run_streamed(
575592
hooks = kwargs.get("hooks")
576593
run_config = kwargs.get("run_config")
577594
previous_response_id = kwargs.get("previous_response_id")
595+
conversation_id = kwargs.get("conversation_id")
578596
session = kwargs.get("session")
579597

580598
if hooks is None:
@@ -629,6 +647,7 @@ def run_streamed(
629647
context_wrapper=context_wrapper,
630648
run_config=run_config,
631649
previous_response_id=previous_response_id,
650+
conversation_id=conversation_id,
632651
session=session,
633652
)
634653
)
@@ -729,6 +748,7 @@ async def _start_streaming(
729748
context_wrapper: RunContextWrapper[TContext],
730749
run_config: RunConfig,
731750
previous_response_id: str | None,
751+
conversation_id: str | None,
732752
session: Session | None,
733753
):
734754
if streamed_result.trace:
@@ -812,6 +832,7 @@ async def _start_streaming(
812832
tool_use_tracker,
813833
all_tools,
814834
previous_response_id,
835+
conversation_id,
815836
)
816837
should_run_agent_start_hooks = False
817838

@@ -914,6 +935,7 @@ async def _run_single_turn_streamed(
914935
tool_use_tracker: AgentToolUseTracker,
915936
all_tools: list[Tool],
916937
previous_response_id: str | None,
938+
conversation_id: str | None,
917939
) -> SingleStepResult:
918940
emitted_tool_call_ids: set[str] = set()
919941

@@ -974,6 +996,7 @@ async def _run_single_turn_streamed(
974996
run_config.tracing_disabled, run_config.trace_include_sensitive_data
975997
),
976998
previous_response_id=previous_response_id,
999+
conversation_id=conversation_id,
9771000
prompt=prompt_config,
9781001
):
9791002
if isinstance(event, ResponseCompletedEvent):
@@ -1082,6 +1105,7 @@ async def _run_single_turn(
10821105
should_run_agent_start_hooks: bool,
10831106
tool_use_tracker: AgentToolUseTracker,
10841107
previous_response_id: str | None,
1108+
conversation_id: str | None,
10851109
) -> SingleStepResult:
10861110
# Ensure we run the hooks before anything else
10871111
if should_run_agent_start_hooks:
@@ -1115,6 +1139,7 @@ async def _run_single_turn(
11151139
run_config,
11161140
tool_use_tracker,
11171141
previous_response_id,
1142+
conversation_id,
11181143
prompt_config,
11191144
)
11201145

@@ -1309,6 +1334,7 @@ async def _get_new_response(
13091334
run_config: RunConfig,
13101335
tool_use_tracker: AgentToolUseTracker,
13111336
previous_response_id: str | None,
1337+
conversation_id: str | None,
13121338
prompt_config: ResponsePromptParam | None,
13131339
) -> ModelResponse:
13141340
# Allow user to modify model input right before the call, if configured
@@ -1343,6 +1369,7 @@ async def _get_new_response(
13431369
run_config.tracing_disabled, run_config.trace_include_sensitive_data
13441370
),
13451371
previous_response_id=previous_response_id,
1372+
conversation_id=conversation_id,
13461373
prompt=prompt_config,
13471374
)
13481375
# If the agent has hooks, we need to call them after the LLM call

0 commit comments

Comments
 (0)