Skip to content

Commit 9d3926f

Browse files
committed
helper function and docstrings
1 parent 054ebe9 commit 9d3926f

File tree

1 file changed

+36
-35
lines changed
  • util/opentelemetry-util-genai/src/opentelemetry/util/genai

1 file changed

+36
-35
lines changed

util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py

Lines changed: 36 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,27 @@
4545
from .version import __version__
4646

4747

48+
def _apply_known_attrs_to_invocation(
49+
invocation: LLMInvocation, attributes: dict[str, Any]
50+
) -> None:
51+
"""Pop known fields from attributes and set them on the invocation.
52+
53+
Mutates the provided attributes dict by popping known keys, leaving
54+
only unknown/custom attributes behind for the caller to persist into
55+
invocation.attributes.
56+
"""
57+
if "provider" in attributes:
58+
invocation.provider = attributes.pop("provider")
59+
if "response_model_name" in attributes:
60+
invocation.response_model_name = attributes.pop("response_model_name")
61+
if "response_id" in attributes:
62+
invocation.response_id = attributes.pop("response_id")
63+
if "input_tokens" in attributes:
64+
invocation.input_tokens = attributes.pop("input_tokens")
65+
if "output_tokens" in attributes:
66+
invocation.output_tokens = attributes.pop("output_tokens")
67+
68+
4869
class TelemetryHandler:
4970
"""
5071
High-level handler managing GenAI invocation lifecycles and emitting
@@ -72,24 +93,25 @@ def start_llm(
7293
run_id: Optional[UUID] = None,
7394
**attributes: Any,
7495
) -> UUID:
96+
"""Start an LLM invocation and create a pending span entry.
97+
98+
Known attributes provided via ``**attributes`` (``provider``,
99+
``response_model_name``, ``response_id``, ``input_tokens``,
100+
``output_tokens``) are extracted and set as explicit fields on the
101+
``LLMInvocation``. Any remaining keys are preserved in
102+
``invocation.attributes`` for custom metadata.
103+
104+
Returns the ``run_id`` used to track the invocation lifecycle.
105+
"""
75106
if run_id is None:
76107
run_id = uuid.uuid4()
77-
provider = attributes.pop("provider", None)
78-
response_model_name = attributes.pop("response_model_name", None)
79-
response_id = attributes.pop("response_id", None)
80-
input_tokens = attributes.pop("input_tokens", None)
81-
output_tokens = attributes.pop("output_tokens", None)
82108
invocation = LLMInvocation(
83109
request_model=request_model,
84110
messages=prompts,
85111
run_id=run_id,
86-
provider=provider,
87-
response_model_name=response_model_name,
88-
response_id=response_id,
89-
input_tokens=input_tokens,
90-
output_tokens=output_tokens,
91112
attributes=attributes,
92113
)
114+
_apply_known_attrs_to_invocation(invocation, invocation.attributes)
93115
self._llm_registry[invocation.run_id] = invocation
94116
self._generator.start(invocation)
95117
return invocation.run_id
@@ -100,43 +122,22 @@ def stop_llm(
100122
chat_generations: List[OutputMessage],
101123
**attributes: Any,
102124
) -> LLMInvocation:
125+
"""Finalize an LLM invocation successfully and end its span."""
103126
invocation = self._llm_registry.pop(run_id)
104127
invocation.end_time = time.time()
105128
invocation.chat_generations = chat_generations
106-
if "provider" in attributes:
107-
invocation.provider = attributes.pop("provider")
108-
if "response_model_name" in attributes:
109-
invocation.response_model_name = attributes.pop(
110-
"response_model_name"
111-
)
112-
if "response_id" in attributes:
113-
invocation.response_id = attributes.pop("response_id")
114-
if "input_tokens" in attributes:
115-
invocation.input_tokens = attributes.pop("input_tokens")
116-
if "output_tokens" in attributes:
117-
invocation.output_tokens = attributes.pop("output_tokens")
118-
# Keep any remaining attributes
129+
_apply_known_attrs_to_invocation(invocation, attributes)
119130
invocation.attributes.update(attributes)
120131
self._generator.finish(invocation)
121132
return invocation
122133

123134
def fail_llm(
124135
self, run_id: UUID, error: Error, **attributes: Any
125136
) -> LLMInvocation:
137+
"""Fail an LLM invocation and end its span with error status."""
126138
invocation = self._llm_registry.pop(run_id)
127139
invocation.end_time = time.time()
128-
if "provider" in attributes:
129-
invocation.provider = attributes.pop("provider")
130-
if "response_model_name" in attributes:
131-
invocation.response_model_name = attributes.pop(
132-
"response_model_name"
133-
)
134-
if "response_id" in attributes:
135-
invocation.response_id = attributes.pop("response_id")
136-
if "input_tokens" in attributes:
137-
invocation.input_tokens = attributes.pop("input_tokens")
138-
if "output_tokens" in attributes:
139-
invocation.output_tokens = attributes.pop("output_tokens")
140+
_apply_known_attrs_to_invocation(invocation, attributes)
140141
invocation.attributes.update(**attributes)
141142
self._generator.error(error, invocation)
142143
return invocation

0 commit comments

Comments
 (0)