17
17
WebSearchToolParam ,
18
18
response_create_params ,
19
19
)
20
+ from openai .types .responses .response_prompt_param import ResponsePromptParam
20
21
21
22
from .. import _debug
22
23
from ..agent_output import AgentOutputSchemaBase
@@ -74,6 +75,7 @@ async def get_response(
74
75
handoffs : list [Handoff ],
75
76
tracing : ModelTracing ,
76
77
previous_response_id : str | None ,
78
+ prompt : ResponsePromptParam | None = None ,
77
79
) -> ModelResponse :
78
80
with response_span (disabled = tracing .is_disabled ()) as span_response :
79
81
try :
@@ -86,6 +88,7 @@ async def get_response(
86
88
handoffs ,
87
89
previous_response_id ,
88
90
stream = False ,
91
+ prompt = prompt ,
89
92
)
90
93
91
94
if _debug .DONT_LOG_MODEL_DATA :
@@ -141,6 +144,7 @@ async def stream_response(
141
144
handoffs : list [Handoff ],
142
145
tracing : ModelTracing ,
143
146
previous_response_id : str | None ,
147
+ prompt : ResponsePromptParam | None = None ,
144
148
) -> AsyncIterator [ResponseStreamEvent ]:
145
149
"""
146
150
Yields a partial message as it is generated, as well as the usage information.
@@ -156,6 +160,7 @@ async def stream_response(
156
160
handoffs ,
157
161
previous_response_id ,
158
162
stream = True ,
163
+ prompt = prompt ,
159
164
)
160
165
161
166
final_response : Response | None = None
@@ -192,6 +197,7 @@ async def _fetch_response(
192
197
handoffs : list [Handoff ],
193
198
previous_response_id : str | None ,
194
199
stream : Literal [True ],
200
+ prompt : ResponsePromptParam | None = None ,
195
201
) -> AsyncStream [ResponseStreamEvent ]: ...
196
202
197
203
@overload
@@ -205,6 +211,7 @@ async def _fetch_response(
205
211
handoffs : list [Handoff ],
206
212
previous_response_id : str | None ,
207
213
stream : Literal [False ],
214
+ prompt : ResponsePromptParam | None = None ,
208
215
) -> Response : ...
209
216
210
217
async def _fetch_response (
@@ -217,6 +224,7 @@ async def _fetch_response(
217
224
handoffs : list [Handoff ],
218
225
previous_response_id : str | None ,
219
226
stream : Literal [True ] | Literal [False ] = False ,
227
+ prompt : ResponsePromptParam | None = None ,
220
228
) -> Response | AsyncStream [ResponseStreamEvent ]:
221
229
list_input = ItemHelpers .input_to_new_input_list (input )
222
230
@@ -252,6 +260,7 @@ async def _fetch_response(
252
260
input = list_input ,
253
261
include = converted_tools .includes ,
254
262
tools = converted_tools .tools ,
263
+ prompt = self ._non_null_or_not_given (prompt ),
255
264
temperature = self ._non_null_or_not_given (model_settings .temperature ),
256
265
top_p = self ._non_null_or_not_given (model_settings .top_p ),
257
266
truncation = self ._non_null_or_not_given (model_settings .truncation ),
0 commit comments