11from __future__ import annotations
22
3+ import contextlib
4+ import json
35from typing import TYPE_CHECKING , Any , cast
46
57import openai
911from openai .types .completion import Completion
1012from openai .types .create_embedding_response import CreateEmbeddingResponse
1113from openai .types .images_response import ImagesResponse
14+ from openai .types .responses import Response
1215from opentelemetry .sdk .trace import ReadableSpan
1316from opentelemetry .trace import get_current_span
1417
15- from ...utils import handle_internal_errors
18+ from logfire import LogfireSpan
19+
20+ from ...utils import handle_internal_errors , log_internal_error
1621from .types import EndpointConfig , StreamState
1722
1823if TYPE_CHECKING :
@@ -43,37 +48,46 @@ def get_endpoint_config(options: FinalRequestOptions) -> EndpointConfig:
4348
4449 return EndpointConfig (
4550 message_template = 'Chat Completion with {request_data[model]!r}' ,
46- span_data = {'request_data' : json_data },
51+ span_data = {'request_data' : json_data , 'gen_ai.request.model' : json_data [ 'model' ] },
4752 stream_state_cls = OpenaiChatCompletionStreamState ,
4853 )
4954 elif url == '/responses' :
5055 if is_current_agent_span ('Responses API' , 'Responses API with {gen_ai.request.model!r}' ):
5156 return EndpointConfig (message_template = '' , span_data = {})
5257
53- return EndpointConfig ( # pragma: no cover
54- message_template = 'Responses API with {request_data[model]!r}' ,
55- span_data = {'request_data' : json_data },
58+ return EndpointConfig (
59+ message_template = 'Responses API with {gen_ai.request.model!r}' ,
60+ span_data = {
61+ 'gen_ai.request.model' : json_data ['model' ],
62+ 'events' : inputs_to_events (
63+ json_data ['input' ], # type: ignore
64+ json_data .get ('instructions' ), # type: ignore
65+ ),
66+ },
5667 )
5768 elif url == '/completions' :
5869 return EndpointConfig (
5970 message_template = 'Completion with {request_data[model]!r}' ,
60- span_data = {'request_data' : json_data },
71+ span_data = {'request_data' : json_data , 'gen_ai.request.model' : json_data [ 'model' ] },
6172 stream_state_cls = OpenaiCompletionStreamState ,
6273 )
6374 elif url == '/embeddings' :
6475 return EndpointConfig (
6576 message_template = 'Embedding Creation with {request_data[model]!r}' ,
66- span_data = {'request_data' : json_data },
77+ span_data = {'request_data' : json_data , 'gen_ai.request.model' : json_data [ 'model' ] },
6778 )
6879 elif url == '/images/generations' :
6980 return EndpointConfig (
7081 message_template = 'Image Generation with {request_data[model]!r}' ,
71- span_data = {'request_data' : json_data },
82+ span_data = {'request_data' : json_data , 'gen_ai.request.model' : json_data [ 'model' ] },
7283 )
7384 else :
85+ span_data : dict [str , Any ] = {'request_data' : json_data , 'url' : url }
86+ if 'model' in json_data :
87+ span_data ['gen_ai.request.model' ] = json_data ['model' ]
7488 return EndpointConfig (
7589 message_template = 'OpenAI API call to {url!r}' ,
76- span_data = { 'request_data' : json_data , 'url' : url } ,
90+ span_data = span_data ,
7791 )
7892
7993
@@ -147,7 +161,6 @@ def on_response(response: ResponseT, span: LogfireSpan) -> ResponseT:
147161 span .set_attribute ('gen_ai.system' , 'openai' )
148162
149163 if isinstance (response_model := getattr (response , 'model' , None ), str ):
150- span .set_attribute ('gen_ai.request.model' , response_model ) # we don't have the actual request model here
151164 span .set_attribute ('gen_ai.response.model' , response_model )
152165
153166 usage = getattr (response , 'usage' , None )
@@ -171,8 +184,13 @@ def on_response(response: ResponseT, span: LogfireSpan) -> ResponseT:
171184 )
172185 elif isinstance (response , CreateEmbeddingResponse ):
173186 span .set_attribute ('response_data' , {'usage' : usage })
174- elif isinstance (response , ImagesResponse ): # pragma: no branch
187+ elif isinstance (response , ImagesResponse ):
175188 span .set_attribute ('response_data' , {'images' : response .data })
189+ elif isinstance (response , Response ): # pragma: no branch
190+ events = json .loads (span .attributes ['events' ]) # type: ignore
191+ events += responses_output_events (response )
192+ span .set_attribute ('events' , events )
193+
176194 return response
177195
178196
@@ -182,3 +200,103 @@ def is_async_client(client: type[openai.OpenAI] | type[openai.AsyncOpenAI]):
182200 return False
183201 assert issubclass (client , openai .AsyncOpenAI ), f'Expected OpenAI or AsyncOpenAI type, got: { client } '
184202 return True
203+
204+
205+ @handle_internal_errors
206+ def inputs_to_events (inputs : str | list [dict [str , Any ]] | None , instructions : str | None ):
207+ """Generate dictionaries in the style of OTel events from the inputs and instructions to the Responses API."""
208+ events : list [dict [str , Any ]] = []
209+ tool_call_id_to_name : dict [str , str ] = {}
210+ if instructions :
211+ events += [
212+ {
213+ 'event.name' : 'gen_ai.system.message' ,
214+ 'content' : instructions ,
215+ 'role' : 'system' ,
216+ }
217+ ]
218+ if inputs :
219+ if isinstance (inputs , str ):
220+ inputs = [{'role' : 'user' , 'content' : inputs }]
221+ for inp in inputs :
222+ events += input_to_events (inp , tool_call_id_to_name )
223+ return events
224+
225+
226+ @handle_internal_errors
227+ def responses_output_events (response : Response ):
228+ """Generate dictionaries in the style of OTel events from the outputs of the Responses API."""
229+ events : list [dict [str , Any ]] = []
230+ for out in response .output :
231+ for message in input_to_events (
232+ out .model_dump (),
233+ # Outputs don't have tool call responses, so this isn't needed.
234+ tool_call_id_to_name = {},
235+ ):
236+ events .append ({** message , 'role' : 'assistant' })
237+ return events
238+
239+
240+ def input_to_events (inp : dict [str , Any ], tool_call_id_to_name : dict [str , str ]):
241+ """Generate dictionaries in the style of OTel events from one input to the Responses API.
242+
243+ `tool_call_id_to_name` is a mapping from tool call IDs to function names.
244+ It's populated when the input is a tool call and used later to
245+ provide the function name in the event for tool call responses.
246+ """
247+ try :
248+ events : list [dict [str , Any ]] = []
249+ role : str | None = inp .get ('role' )
250+ typ = inp .get ('type' )
251+ content = inp .get ('content' )
252+ if role and typ in (None , 'message' ) and content :
253+ event_name = f'gen_ai.{ role } .message'
254+ if isinstance (content , str ):
255+ events .append ({'event.name' : event_name , 'content' : content , 'role' : role })
256+ else :
257+ for content_item in content :
258+ with contextlib .suppress (KeyError ):
259+ if content_item ['type' ] == 'output_text' : # pragma: no branch
260+ events .append ({'event.name' : event_name , 'content' : content_item ['text' ], 'role' : role })
261+ continue
262+ events .append (unknown_event (content_item )) # pragma: no cover
263+ elif typ == 'function_call' :
264+ tool_call_id_to_name [inp ['call_id' ]] = inp ['name' ]
265+ events .append (
266+ {
267+ 'event.name' : 'gen_ai.assistant.message' ,
268+ 'role' : 'assistant' ,
269+ 'tool_calls' : [
270+ {
271+ 'id' : inp ['call_id' ],
272+ 'type' : 'function' ,
273+ 'function' : {'name' : inp ['name' ], 'arguments' : inp ['arguments' ]},
274+ },
275+ ],
276+ }
277+ )
278+ elif typ == 'function_call_output' :
279+ events .append (
280+ {
281+ 'event.name' : 'gen_ai.tool.message' ,
282+ 'role' : 'tool' ,
283+ 'id' : inp ['call_id' ],
284+ 'content' : inp ['output' ],
285+ 'name' : tool_call_id_to_name .get (inp ['call_id' ], inp .get ('name' , 'unknown' )),
286+ }
287+ )
288+ else :
289+ events .append (unknown_event (inp ))
290+ return events
291+ except Exception : # pragma: no cover
292+ log_internal_error ()
293+ return [unknown_event (inp )]
294+
295+
296+ def unknown_event (inp : dict [str , Any ]):
297+ return {
298+ 'event.name' : 'gen_ai.unknown' ,
299+ 'role' : inp .get ('role' ) or 'unknown' ,
300+ 'content' : f'{ inp .get ("type" )} \n \n See JSON for details' ,
301+ 'data' : inp ,
302+ }
0 commit comments