Skip to content

Commit 054ebe9

Browse files
committed
flatten LLMInvocation to use attributes instead of dict keys
1 parent 77d9c3c commit 054ebe9

File tree

3 files changed

+59
-34
lines changed

3 files changed

+59
-34
lines changed

util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,10 +74,20 @@ def start_llm(
7474
) -> UUID:
7575
if run_id is None:
7676
run_id = uuid.uuid4()
77+
provider = attributes.pop("provider", None)
78+
response_model_name = attributes.pop("response_model_name", None)
79+
response_id = attributes.pop("response_id", None)
80+
input_tokens = attributes.pop("input_tokens", None)
81+
output_tokens = attributes.pop("output_tokens", None)
7782
invocation = LLMInvocation(
7883
request_model=request_model,
7984
messages=prompts,
8085
run_id=run_id,
86+
provider=provider,
87+
response_model_name=response_model_name,
88+
response_id=response_id,
89+
input_tokens=input_tokens,
90+
output_tokens=output_tokens,
8191
attributes=attributes,
8292
)
8393
self._llm_registry[invocation.run_id] = invocation
@@ -93,6 +103,19 @@ def stop_llm(
93103
invocation = self._llm_registry.pop(run_id)
94104
invocation.end_time = time.time()
95105
invocation.chat_generations = chat_generations
106+
if "provider" in attributes:
107+
invocation.provider = attributes.pop("provider")
108+
if "response_model_name" in attributes:
109+
invocation.response_model_name = attributes.pop(
110+
"response_model_name"
111+
)
112+
if "response_id" in attributes:
113+
invocation.response_id = attributes.pop("response_id")
114+
if "input_tokens" in attributes:
115+
invocation.input_tokens = attributes.pop("input_tokens")
116+
if "output_tokens" in attributes:
117+
invocation.output_tokens = attributes.pop("output_tokens")
118+
# Keep any remaining attributes
96119
invocation.attributes.update(attributes)
97120
self._generator.finish(invocation)
98121
return invocation
@@ -102,6 +125,18 @@ def fail_llm(
102125
) -> LLMInvocation:
103126
invocation = self._llm_registry.pop(run_id)
104127
invocation.end_time = time.time()
128+
if "provider" in attributes:
129+
invocation.provider = attributes.pop("provider")
130+
if "response_model_name" in attributes:
131+
invocation.response_model_name = attributes.pop(
132+
"response_model_name"
133+
)
134+
if "response_id" in attributes:
135+
invocation.response_id = attributes.pop("response_id")
136+
if "input_tokens" in attributes:
137+
invocation.input_tokens = attributes.pop("input_tokens")
138+
if "output_tokens" in attributes:
139+
invocation.output_tokens = attributes.pop("output_tokens")
105140
invocation.attributes.update(**attributes)
106141
self._generator.error(error, invocation)
107142
return invocation

util/opentelemetry-util-genai/src/opentelemetry/util/genai/span_utils.py

Lines changed: 17 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
import json
1616
from dataclasses import asdict
17-
from typing import Any, Dict, List, Optional
17+
from typing import Any, Dict, List
1818

1919
from opentelemetry.semconv._incubating.attributes import (
2020
gen_ai_attributes as GenAI,
@@ -31,7 +31,6 @@
3131
get_content_capturing_mode,
3232
is_experimental_mode,
3333
)
34-
from opentelemetry.util.types import AttributeValue
3534

3635
from .types import Error, InputMessage, LLMInvocation, OutputMessage
3736

@@ -43,8 +42,8 @@ def _apply_common_span_attributes(
4342
4443
Returns (genai_attributes) for use with metrics.
4544
"""
46-
request_model = invocation.attributes.get("request_model")
47-
provider = invocation.attributes.get("provider")
45+
request_model = invocation.request_model
46+
provider = invocation.provider
4847

4948
span.set_attribute(
5049
GenAI.GEN_AI_OPERATION_NAME, GenAI.GenAiOperationNameValues.CHAT.value
@@ -63,34 +62,20 @@ def _apply_common_span_attributes(
6362
GenAI.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons
6463
)
6564

66-
response_model = invocation.attributes.get("response_model_name")
67-
response_id = invocation.attributes.get("response_id")
68-
prompt_tokens = invocation.attributes.get("input_tokens")
69-
completion_tokens = invocation.attributes.get("output_tokens")
70-
_set_response_and_usage_attributes(
71-
span,
72-
response_model,
73-
response_id,
74-
prompt_tokens,
75-
completion_tokens,
76-
)
77-
78-
79-
def _set_response_and_usage_attributes(
80-
span: Span,
81-
response_model: Optional[str],
82-
response_id: Optional[str],
83-
prompt_tokens: Optional[AttributeValue],
84-
completion_tokens: Optional[AttributeValue],
85-
) -> None:
86-
if response_model is not None:
87-
span.set_attribute(GenAI.GEN_AI_RESPONSE_MODEL, response_model)
88-
if response_id is not None:
89-
span.set_attribute(GenAI.GEN_AI_RESPONSE_ID, response_id)
90-
if isinstance(prompt_tokens, (int, float)):
91-
span.set_attribute(GenAI.GEN_AI_USAGE_INPUT_TOKENS, prompt_tokens)
92-
if isinstance(completion_tokens, (int, float)):
93-
span.set_attribute(GenAI.GEN_AI_USAGE_OUTPUT_TOKENS, completion_tokens)
65+
if invocation.response_model_name is not None:
66+
span.set_attribute(
67+
GenAI.GEN_AI_RESPONSE_MODEL, invocation.response_model_name
68+
)
69+
if invocation.response_id is not None:
70+
span.set_attribute(GenAI.GEN_AI_RESPONSE_ID, invocation.response_id)
71+
if isinstance(invocation.input_tokens, (int, float)):
72+
span.set_attribute(
73+
GenAI.GEN_AI_USAGE_INPUT_TOKENS, invocation.input_tokens
74+
)
75+
if isinstance(invocation.output_tokens, (int, float)):
76+
span.set_attribute(
77+
GenAI.GEN_AI_USAGE_OUTPUT_TOKENS, invocation.output_tokens
78+
)
9479

9580

9681
def _maybe_set_span_messages(

util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919
from typing import Any, Dict, List, Literal, Optional, Type, Union
2020
from uuid import UUID
2121

22+
from opentelemetry.util.types import AttributeValue
23+
2224

2325
class ContentCapturingMode(Enum):
2426
# Do not capture content (default).
@@ -86,9 +88,12 @@ class LLMInvocation:
8688
end_time: Optional[float] = None
8789
messages: List[InputMessage] = field(default_factory=list)
8890
chat_generations: List[OutputMessage] = field(default_factory=list)
91+
provider: Optional[str] = None
92+
response_model_name: Optional[str] = None
93+
response_id: Optional[str] = None
94+
input_tokens: Optional[AttributeValue] = None
95+
output_tokens: Optional[AttributeValue] = None
8996
attributes: Dict[str, Any] = field(default_factory=dict)
90-
span_id: int = 0
91-
trace_id: int = 0
9297

9398

9499
@dataclass

0 commit comments

Comments
 (0)