Skip to content

Commit 974ae91

Browse files
committed
remove non official semconvs
1 parent 3a594b7 commit 974ae91

File tree

3 files changed

+3
-138
lines changed

3 files changed

+3
-138
lines changed

packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/shared/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ def _set_response_attributes(span, response):
232232
prompt_tokens_details = dict(usage.get("prompt_tokens_details", {}))
233233
_set_span_attribute(
234234
span,
235-
GenAIAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
235+
SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS,
236236
prompt_tokens_details.get("cached_tokens", 0),
237237
)
238238
return

packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/shared/chat_wrappers.py

Lines changed: 0 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -288,14 +288,6 @@ async def _handle_request(span, kwargs, instance):
288288
if Config.enable_trace_context_propagation:
289289
propagate_trace_context(span, kwargs)
290290

291-
# Reasoning request attributes
292-
reasoning_effort = kwargs.get("reasoning_effort")
293-
_set_span_attribute(
294-
span,
295-
SpanAttributes.LLM_REQUEST_REASONING_EFFORT,
296-
reasoning_effort or ()
297-
)
298-
299291

300292
@dont_throw
301293
def _handle_response(
@@ -327,28 +319,6 @@ def _handle_response(
327319
# span attributes
328320
_set_response_attributes(span, response_dict)
329321

330-
# Reasoning usage attributes
331-
usage = response_dict.get("usage")
332-
reasoning_tokens = None
333-
if usage:
334-
# Support both dict-style and object-style `usage`
335-
tokens_details = (
336-
usage.get("completion_tokens_details") if isinstance(usage, dict)
337-
else getattr(usage, "completion_tokens_details", None)
338-
)
339-
340-
if tokens_details:
341-
reasoning_tokens = (
342-
tokens_details.get("reasoning_tokens", None) if isinstance(tokens_details, dict)
343-
else getattr(tokens_details, "reasoning_tokens", None)
344-
)
345-
346-
_set_span_attribute(
347-
span,
348-
SpanAttributes.LLM_USAGE_REASONING_TOKENS,
349-
reasoning_tokens or 0,
350-
)
351-
352322
if should_emit_events():
353323
if response.choices is not None:
354324
for choice in response.choices:

packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/v1/responses_wrappers.py

Lines changed: 2 additions & 107 deletions
Original file line numberDiff line numberDiff line change
@@ -127,11 +127,6 @@ class TracedData(pydantic.BaseModel):
127127
request_model: Optional[str] = pydantic.Field(default=None)
128128
response_model: Optional[str] = pydantic.Field(default=None)
129129

130-
# Reasoning attributes
131-
request_reasoning_summary: Optional[str] = pydantic.Field(default=None)
132-
request_reasoning_effort: Optional[str] = pydantic.Field(default=None)
133-
response_reasoning_effort: Optional[str] = pydantic.Field(default=None)
134-
135130

136131
responses: dict[str, TracedData] = {}
137132

@@ -192,59 +187,12 @@ def set_data_attributes(traced_response: TracedData, span: Span):
192187
span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.total_tokens
193188
)
194189
if usage.input_tokens_details:
195-
if hasattr(GenAIAttributes, 'GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS'):
196-
_set_span_attribute(
197-
span,
198-
GenAIAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
199-
usage.input_tokens_details.cached_tokens,
200-
)
201-
elif hasattr(GenAIAttributes, 'GEN_AI_USAGE_INPUT_TOKENS_CACHED'):
202-
_set_span_attribute(
203-
span,
204-
GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS_CACHED,
205-
usage.input_tokens_details.cached_tokens,
206-
)
207-
208-
reasoning_tokens = None
209-
tokens_details = (
210-
usage.get("output_tokens_details") if isinstance(usage, dict)
211-
else getattr(usage, "output_tokens_details", None)
212-
)
213-
214-
if tokens_details:
215-
reasoning_tokens = (
216-
tokens_details.get("reasoning_tokens", None) if isinstance(tokens_details, dict)
217-
else getattr(tokens_details, "reasoning_tokens", None)
218-
)
219-
220-
# Only set reasoning tokens if the attribute exists
221-
if hasattr(SpanAttributes, 'LLM_USAGE_REASONING_TOKENS'):
222190
_set_span_attribute(
223191
span,
224-
SpanAttributes.LLM_USAGE_REASONING_TOKENS,
225-
reasoning_tokens or 0,
192+
SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS,
193+
usage.input_tokens_details.cached_tokens,
226194
)
227195

228-
# Reasoning attributes - only set if they exist in SpanAttributes
229-
if hasattr(SpanAttributes, 'LLM_REQUEST_REASONING_SUMMARY'):
230-
_set_span_attribute(
231-
span,
232-
f"{SpanAttributes.LLM_REQUEST_REASONING_SUMMARY}",
233-
traced_response.request_reasoning_summary or (),
234-
)
235-
if hasattr(SpanAttributes, 'LLM_REQUEST_REASONING_EFFORT'):
236-
_set_span_attribute(
237-
span,
238-
f"{SpanAttributes.LLM_REQUEST_REASONING_EFFORT}",
239-
traced_response.request_reasoning_effort or (),
240-
)
241-
if hasattr(SpanAttributes, 'LLM_RESPONSE_REASONING_EFFORT'):
242-
_set_span_attribute(
243-
span,
244-
f"{SpanAttributes.LLM_RESPONSE_REASONING_EFFORT}",
245-
traced_response.response_reasoning_effort or (),
246-
)
247-
248196
if should_send_prompts():
249197
prompt_index = 0
250198
if traced_response.tools:
@@ -480,18 +428,6 @@ def responses_get_or_create_wrapper(tracer: Tracer, wrapped, instance, args, kwa
480428
"model", existing_data.get("request_model", "")
481429
),
482430
response_model=existing_data.get("response_model", ""),
483-
# Reasoning attributes
484-
request_reasoning_summary=(
485-
kwargs.get("reasoning", {}).get(
486-
"summary", existing_data.get("request_reasoning_summary")
487-
)
488-
),
489-
request_reasoning_effort=(
490-
kwargs.get("reasoning", {}).get(
491-
"effort", existing_data.get("request_reasoning_effort")
492-
)
493-
),
494-
response_reasoning_effort=kwargs.get("reasoning", {}).get("effort"),
495431
)
496432
except Exception:
497433
traced_data = None
@@ -543,18 +479,6 @@ def responses_get_or_create_wrapper(tracer: Tracer, wrapped, instance, args, kwa
543479
output_text=existing_data.get("output_text", parsed_response_output_text),
544480
request_model=existing_data.get("request_model", kwargs.get("model")),
545481
response_model=existing_data.get("response_model", parsed_response.model),
546-
# Reasoning attributes
547-
request_reasoning_summary=(
548-
kwargs.get("reasoning", {}).get(
549-
"summary", existing_data.get("request_reasoning_summary")
550-
)
551-
),
552-
request_reasoning_effort=(
553-
kwargs.get("reasoning", {}).get(
554-
"effort", existing_data.get("request_reasoning_effort")
555-
)
556-
),
557-
response_reasoning_effort=kwargs.get("reasoning", {}).get("effort"),
558482
)
559483
responses[parsed_response.id] = traced_data
560484
except Exception:
@@ -618,18 +542,6 @@ async def async_responses_get_or_create_wrapper(
618542
output_text=kwargs.get("output_text", existing_data.get("output_text")),
619543
request_model=kwargs.get("model", existing_data.get("request_model")),
620544
response_model=existing_data.get("response_model"),
621-
# Reasoning attributes
622-
request_reasoning_summary=(
623-
kwargs.get("reasoning", {}).get(
624-
"summary", existing_data.get("request_reasoning_summary")
625-
)
626-
),
627-
request_reasoning_effort=(
628-
kwargs.get("reasoning", {}).get(
629-
"effort", existing_data.get("request_reasoning_effort")
630-
)
631-
),
632-
response_reasoning_effort=kwargs.get("reasoning", {}).get("effort"),
633545
)
634546
except Exception:
635547
traced_data = None
@@ -682,18 +594,6 @@ async def async_responses_get_or_create_wrapper(
682594
output_text=existing_data.get("output_text", parsed_response_output_text),
683595
request_model=existing_data.get("request_model", kwargs.get("model")),
684596
response_model=existing_data.get("response_model", parsed_response.model),
685-
# Reasoning attributes
686-
request_reasoning_summary=(
687-
kwargs.get("reasoning", {}).get(
688-
"summary", existing_data.get("request_reasoning_summary")
689-
)
690-
),
691-
request_reasoning_effort=(
692-
kwargs.get("reasoning", {}).get(
693-
"effort", existing_data.get("request_reasoning_effort")
694-
)
695-
),
696-
response_reasoning_effort=kwargs.get("reasoning", {}).get("effort"),
697597
)
698598
responses[parsed_response.id] = traced_data
699599
except Exception:
@@ -795,11 +695,6 @@ def __init__(
795695
output_text="",
796696
request_model=self._request_kwargs.get("model", ""),
797697
response_model="",
798-
request_reasoning_summary=self._request_kwargs.get("reasoning", {}).get(
799-
"summary"
800-
),
801-
request_reasoning_effort=self._request_kwargs.get("reasoning", {}).get("effort"),
802-
response_reasoning_effort=None,
803698
)
804699

805700
self._complete_response_data = None

0 commit comments

Comments
 (0)