Skip to content

Commit edb88f2

Browse files
authored
Remove ResponseDataWrapper to reduce magic (#918)
1 parent b1f0478 commit edb88f2

File tree

3 files changed

+57
-49
lines changed

3 files changed

+57
-49
lines changed

logfire/_internal/integrations/llm_providers/openai.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def get_endpoint_config(options: FinalRequestOptions) -> EndpointConfig:
4747
stream_state_cls=OpenaiChatCompletionStreamState,
4848
)
4949
elif url == '/responses':
50-
if is_current_agent_span('Responses API'):
50+
if is_current_agent_span('Responses API', 'Responses API with {gen_ai.request.model!r}'):
5151
return EndpointConfig(message_template='', span_data={})
5252

5353
return EndpointConfig( # pragma: no cover
@@ -77,13 +77,13 @@ def get_endpoint_config(options: FinalRequestOptions) -> EndpointConfig:
7777
)
7878

7979

80-
def is_current_agent_span(span_name: str):
80+
def is_current_agent_span(*span_names: str):
8181
current_span = get_current_span()
8282
return (
8383
isinstance(current_span, ReadableSpan)
8484
and current_span.instrumentation_scope
8585
and current_span.instrumentation_scope.name == 'logfire.openai_agents'
86-
and current_span.name == span_name
86+
and current_span.name in span_names
8787
)
8888

8989

logfire/_internal/integrations/openai_agents.py

Lines changed: 40 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
Trace,
2323
)
2424
from agents.models.openai_responses import OpenAIResponsesModel
25-
from agents.tracing import ResponseSpanData
25+
from agents.tracing import ResponseSpanData, response_span
2626
from agents.tracing.scope import Scope
2727
from agents.tracing.spans import NoOpSpan, SpanData, SpanError, TSpanData
2828
from agents.tracing.traces import NoOpTrace
@@ -77,6 +77,7 @@ def create_span(
7777
if isinstance(span, NoOpSpan):
7878
return span
7979

80+
extra_attributes: dict[str, Any] = {}
8081
if isinstance(span_data, AgentSpanData):
8182
msg_template = 'Agent run: {name!r}'
8283
elif isinstance(span_data, FunctionSpanData):
@@ -85,7 +86,9 @@ def create_span(
8586
msg_template = 'Chat completion with {gen_ai.request.model!r}'
8687
elif isinstance(span_data, ResponseSpanData):
8788
msg_template = 'Responses API'
88-
span_data.__class__ = ResponseDataWrapper
89+
extra_attributes = get_magic_response_attributes()
90+
if 'gen_ai.request.model' in extra_attributes: # pragma: no branch
91+
msg_template += ' with {gen_ai.request.model!r}'
8992
elif isinstance(span_data, GuardrailSpanData):
9093
msg_template = 'Guardrail {name!r} {triggered=}'
9194
elif isinstance(span_data, HandoffSpanData):
@@ -98,6 +101,7 @@ def create_span(
98101
logfire_span = self.logfire_instance.span(
99102
msg_template,
100103
**attributes_from_span_data(span_data, msg_template),
104+
**extra_attributes,
101105
_tags=['LLM'] * isinstance(span_data, GenerationSpanData),
102106
)
103107
helper = LogfireSpanHelper(logfire_span)
@@ -248,8 +252,6 @@ def on_ending(self):
248252
template = logfire_span.message_template
249253
assert template
250254
new_attrs = attributes_from_span_data(self.span_data, template) # type: ignore
251-
if 'gen_ai.request.model' not in template and 'gen_ai.request.model' in new_attrs:
252-
template += ' with {gen_ai.request.model!r}'
253255
try:
254256
message = logfire_format(template, new_attrs, NOOP_SCRUBBER)
255257
except Exception: # pragma: no cover
@@ -301,8 +303,9 @@ def attributes_from_span_data(span_data: SpanData, msg_template: str) -> dict[st
301303
attributes = span_data.export()
302304
if '{type}' not in msg_template and attributes.get('type') == span_data.type:
303305
del attributes['type']
304-
if isinstance(span_data, ResponseDataWrapper):
305-
attributes.update(span_data.extra_attributes)
306+
if isinstance(span_data, ResponseSpanData):
307+
if span_data.response:
308+
attributes.update(get_basic_response_attributes(span_data.response))
306309
if span_data.input:
307310
attributes['raw_input'] = span_data.input
308311
if events := get_response_span_events(span_data):
@@ -333,34 +336,39 @@ def attributes_from_span_data(span_data: SpanData, msg_template: str) -> dict[st
333336
return {}
334337

335338

336-
class ResponseDataWrapper(ResponseSpanData):
337-
# TODO reduce the magic here
338-
_response: Response | None = None
339-
extra_attributes: dict[str, Any] = {}
339+
def get_basic_response_attributes(response: Response):
340+
return {
341+
'gen_ai.response.model': getattr(response, 'model', None),
342+
'response': response,
343+
'gen_ai.system': 'openai',
344+
'gen_ai.operation.name': 'chat',
345+
}
340346

341-
@property
342-
def response(self):
343-
return self._response
344-
345-
@response.setter
346-
def response(self, response: Response):
347-
self._response = response
348-
with handle_internal_errors:
349-
frame = inspect.currentframe()
350-
assert frame
347+
348+
def get_magic_response_attributes() -> dict[str, Any]:
349+
try:
350+
frame = inspect.currentframe()
351+
while frame and frame.f_code != response_span.__code__:
351352
frame = frame.f_back
352-
assert frame
353-
self.extra_attributes = {
354-
'gen_ai.response.model': getattr(response, 'model', None),
355-
'response': response,
356-
'gen_ai.system': 'openai',
357-
'gen_ai.operation.name': 'chat',
358-
}
359-
for name, var in frame.f_locals.items():
360-
if name == 'model_settings' and isinstance(var, ModelSettings):
361-
self.extra_attributes[name] = var
362-
elif name == 'self' and isinstance(var, OpenAIResponsesModel):
363-
self.extra_attributes['gen_ai.request.model'] = var.model
353+
if frame:
354+
frame = frame.f_back
355+
else: # pragma: no cover
356+
return {}
357+
assert frame
358+
359+
result: dict[str, Any] = {}
360+
361+
model_settings = frame.f_locals.get('model_settings')
362+
if isinstance(model_settings, ModelSettings): # pragma: no branch
363+
result['model_settings'] = model_settings
364+
365+
model = frame.f_locals.get('self')
366+
if isinstance(model, OpenAIResponsesModel): # pragma: no branch
367+
result['gen_ai.request.model'] = model.model
368+
return result
369+
except Exception: # pragma: no cover
370+
log_internal_error()
371+
return {}
364372

365373

366374
@handle_internal_errors

tests/otel_integrations/test_openai_agents.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -316,15 +316,15 @@ def random_number() -> int:
316316
assert exporter.exported_spans_as_dict(parse_json_attributes=True) == snapshot(
317317
[
318318
{
319-
'name': 'Responses API',
319+
'name': 'Responses API with {gen_ai.request.model!r}',
320320
'context': {'trace_id': 1, 'span_id': 5, 'is_remote': False},
321321
'parent': {'trace_id': 1, 'span_id': 3, 'is_remote': False},
322322
'start_time': 3000000000,
323323
'end_time': 4000000000,
324324
'attributes': {
325325
'code.filepath': IsStr(),
326326
'code.lineno': 123,
327-
'logfire.msg_template': 'Responses API',
327+
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
328328
'logfire.span_type': 'span',
329329
'logfire.msg': "Responses API with 'gpt-4o'",
330330
'response_id': 'resp_67ced68228748191b31ea5d9172a7b4b',
@@ -586,7 +586,7 @@ def random_number() -> int:
586586
},
587587
},
588588
{
589-
'name': 'Responses API',
589+
'name': 'Responses API with {gen_ai.request.model!r}',
590590
'context': {'trace_id': 1, 'span_id': 13, 'is_remote': False},
591591
'parent': {'trace_id': 1, 'span_id': 11, 'is_remote': False},
592592
'start_time': 11000000000,
@@ -595,7 +595,7 @@ def random_number() -> int:
595595
'code.filepath': 'test_openai_agents.py',
596596
'code.function': 'test_responses',
597597
'code.lineno': 123,
598-
'logfire.msg_template': 'Responses API',
598+
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
599599
'logfire.span_type': 'span',
600600
'logfire.msg': "Responses API with 'gpt-4o'",
601601
'response_id': 'resp_67ced68425f48191a5fb0c2b61cb27dd',
@@ -917,15 +917,15 @@ async def zero_guardrail(_context: Any, _agent: Agent[Any], inp: Any) -> Guardra
917917
},
918918
},
919919
{
920-
'name': 'Responses API',
920+
'name': 'Responses API with {gen_ai.request.model!r}',
921921
'context': {'trace_id': 1, 'span_id': 7, 'is_remote': False},
922922
'parent': {'trace_id': 1, 'span_id': 3, 'is_remote': False},
923923
'start_time': 5000000000,
924924
'end_time': 6000000000,
925925
'attributes': {
926926
'code.filepath': IsStr(),
927927
'code.lineno': 123,
928-
'logfire.msg_template': 'Responses API',
928+
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
929929
'logfire.span_type': 'span',
930930
'logfire.msg': "Responses API with 'gpt-4o'",
931931
'response_id': 'resp_67cee263c6e0819184efdc0fe2624cc8',
@@ -1544,15 +1544,15 @@ async def test_responses_simple(exporter: TestExporter):
15441544
assert exporter.exported_spans_as_dict(parse_json_attributes=True) == snapshot(
15451545
[
15461546
{
1547-
'name': 'Responses API',
1547+
'name': 'Responses API with {gen_ai.request.model!r}',
15481548
'context': {'trace_id': 1, 'span_id': 5, 'is_remote': False},
15491549
'parent': {'trace_id': 1, 'span_id': 3, 'is_remote': False},
15501550
'start_time': 3000000000,
15511551
'end_time': 4000000000,
15521552
'attributes': {
15531553
'code.filepath': IsStr(),
15541554
'code.lineno': 123,
1555-
'logfire.msg_template': 'Responses API',
1555+
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
15561556
'logfire.span_type': 'span',
15571557
'logfire.msg': "Responses API with 'gpt-4o'",
15581558
'response_id': 'resp_67ceee053cdc81919f39173ee02cb88e',
@@ -1723,15 +1723,15 @@ async def test_responses_simple(exporter: TestExporter):
17231723
},
17241724
},
17251725
{
1726-
'name': 'Responses API',
1726+
'name': 'Responses API with {gen_ai.request.model!r}',
17271727
'context': {'trace_id': 1, 'span_id': 9, 'is_remote': False},
17281728
'parent': {'trace_id': 1, 'span_id': 7, 'is_remote': False},
17291729
'start_time': 7000000000,
17301730
'end_time': 8000000000,
17311731
'attributes': {
17321732
'code.filepath': IsStr(),
17331733
'code.lineno': 123,
1734-
'logfire.msg_template': 'Responses API',
1734+
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
17351735
'logfire.span_type': 'span',
17361736
'logfire.msg': "Responses API with 'gpt-4o'",
17371737
'response_id': 'resp_67ceee0623ac819190454bc7af968938',
@@ -1962,15 +1962,15 @@ async def test_file_search(exporter: TestExporter):
19621962
assert exporter.exported_spans_as_dict(parse_json_attributes=True) == snapshot(
19631963
[
19641964
{
1965-
'name': 'Responses API',
1965+
'name': 'Responses API with {gen_ai.request.model!r}',
19661966
'context': {'trace_id': 1, 'span_id': 5, 'is_remote': False},
19671967
'parent': {'trace_id': 1, 'span_id': 3, 'is_remote': False},
19681968
'start_time': 3000000000,
19691969
'end_time': 4000000000,
19701970
'attributes': {
19711971
'code.filepath': IsStr(),
19721972
'code.lineno': 123,
1973-
'logfire.msg_template': 'Responses API',
1973+
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
19741974
'logfire.span_type': 'span',
19751975
'logfire.msg': "Responses API with 'gpt-4o'",
19761976
'response_id': 'resp_67ceff39d5e88191885004de76d26e43',
@@ -2220,15 +2220,15 @@ async def test_file_search(exporter: TestExporter):
22202220
},
22212221
},
22222222
{
2223-
'name': 'Responses API',
2223+
'name': 'Responses API with {gen_ai.request.model!r}',
22242224
'context': {'trace_id': 1, 'span_id': 9, 'is_remote': False},
22252225
'parent': {'trace_id': 1, 'span_id': 7, 'is_remote': False},
22262226
'start_time': 7000000000,
22272227
'end_time': 8000000000,
22282228
'attributes': {
22292229
'code.filepath': IsStr(),
22302230
'code.lineno': 123,
2231-
'logfire.msg_template': 'Responses API',
2231+
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
22322232
'logfire.span_type': 'span',
22332233
'logfire.msg': "Responses API with 'gpt-4o'",
22342234
'response_id': 'resp_67ceff3c84548191b620a2cf4c2e37f2',

0 commit comments

Comments
 (0)