Skip to content

Commit be8620b

Browse files
committed
refactor: update span lifecycle to use sdk over setting context manually
1 parent 4280eb9 commit be8620b

File tree

3 files changed

+67
-38
lines changed

3 files changed

+67
-38
lines changed

util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py

Lines changed: 44 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
handler = get_telemetry_handler()
3030
3131
# Create an invocation object with your request data
32-
# The span and context_token attributes are set by the TelemetryHandler, and
32+
# The span and span_scope attributes are set by the TelemetryHandler, and
3333
# managed by the TelemetryHandler during the lifecycle of the span.
3434
3535
# Use the context manager to manage the lifecycle of an LLM invocation.
@@ -63,7 +63,6 @@
6363
from contextlib import contextmanager
6464
from typing import Iterator, Optional
6565

66-
from opentelemetry import context as otel_context
6766
from opentelemetry.semconv._incubating.attributes import (
6867
gen_ai_attributes as GenAI,
6968
)
@@ -72,13 +71,16 @@
7271
SpanKind,
7372
TracerProvider,
7473
get_tracer,
75-
set_span_in_context,
74+
use_span,
7675
)
7776
from opentelemetry.util.genai.span_utils import (
7877
_apply_error_attributes,
7978
_apply_finish_attributes,
8079
)
81-
from opentelemetry.util.genai.types import Error, LLMInvocation
80+
from opentelemetry.util.genai.types import (
81+
Error,
82+
LLMInvocation,
83+
)
8284
from opentelemetry.util.genai.version import __version__
8385

8486

@@ -101,41 +103,56 @@ def start_llm(
101103
invocation: LLMInvocation,
102104
) -> LLMInvocation:
103105
"""Start an LLM invocation and create a pending span entry."""
104-
# Create a span and attach it as current; keep the token to detach later
106+
# Create a span and activate it using the OpenTelemetry helper scope
105107
span = self._tracer.start_span(
106108
name=f"{GenAI.GenAiOperationNameValues.CHAT.value} {invocation.request_model}",
107109
kind=SpanKind.CLIENT,
108110
)
109111
invocation.span = span
110-
invocation.context_token = otel_context.attach(
111-
set_span_in_context(span)
112+
scope = use_span(
113+
span,
114+
end_on_exit=False,
115+
record_exception=False,
116+
set_status_on_exception=False,
112117
)
118+
scope.__enter__()
119+
invocation.span_scope = scope
113120
return invocation
114121

115122
def stop_llm(self, invocation: LLMInvocation) -> LLMInvocation: # pylint: disable=no-self-use
116123
"""Finalize an LLM invocation successfully and end its span."""
117-
if invocation.context_token is None or invocation.span is None:
124+
if invocation.span_scope is None or invocation.span is None:
118125
# TODO: Provide feedback that this invocation was not started
119126
return invocation
120127

121-
_apply_finish_attributes(invocation.span, invocation)
122-
# Detach context and end span
123-
otel_context.detach(invocation.context_token)
124-
invocation.span.end()
128+
scope = invocation.span_scope
129+
span = invocation.span
130+
try:
131+
_apply_finish_attributes(span, invocation)
132+
finally:
133+
scope.__exit__(None, None, None)
134+
span.end()
135+
invocation.span_scope = None
136+
invocation.span = None
125137
return invocation
126138

127139
def fail_llm( # pylint: disable=no-self-use
128140
self, invocation: LLMInvocation, error: Error
129141
) -> LLMInvocation:
130142
"""Fail an LLM invocation and end its span with error status."""
131-
if invocation.context_token is None or invocation.span is None:
143+
if invocation.span_scope is None or invocation.span is None:
132144
# TODO: Provide feedback that this invocation was not started
133145
return invocation
134146

135-
_apply_error_attributes(invocation.span, error)
136-
# Detach context and end span
137-
otel_context.detach(invocation.context_token)
138-
invocation.span.end()
147+
scope = invocation.span_scope
148+
span = invocation.span
149+
try:
150+
_apply_error_attributes(span, error)
151+
finally:
152+
scope.__exit__(None, None, None)
153+
span.end()
154+
invocation.span_scope = None
155+
invocation.span = None
139156
return invocation
140157

141158
@contextmanager
@@ -144,21 +161,22 @@ def llm(
144161
) -> Iterator[LLMInvocation]:
145162
"""Context manager for LLM invocations.
146163
147-
Only set data attributes on the invocation object, do not modify the span or context.
148-
149-
Starts the span on entry. On normal exit, finalizes the invocation and ends the span.
150-
If an exception occurs inside the context, marks the span as error, ends it, and
151-
re-raises the original exception.
164+
Only set data attributes on the invocation object, do not modify the span or
165+
context. Starts the span on entry. On normal exit, finalizes the invocation and
166+
ends the span. If an exception occurs inside the context, marks the span as
167+
error, ends it, and re-raises the original exception.
152168
"""
153169
if invocation is None:
154-
invocation = LLMInvocation(
155-
request_model="",
156-
)
170+
invocation = LLMInvocation(request_model="")
171+
157172
self.start_llm(invocation)
158173
try:
159174
yield invocation
160175
except Exception as exc:
161-
self.fail_llm(invocation, Error(message=str(exc), type=type(exc)))
176+
self.fail_llm(
177+
invocation,
178+
Error(message=str(exc), type=type(exc)),
179+
)
162180
raise
163181
self.stop_llm(invocation)
164182

util/opentelemetry-util-genai/src/opentelemetry/util/genai/span_utils.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,9 +124,14 @@ def _apply_finish_attributes(span: Span, invocation: LLMInvocation) -> None:
124124

125125

126126
def _apply_error_attributes(span: Span, error: Error) -> None:
127-
"""Apply status and error attributes common to error() paths."""
128-
span.set_status(Status(StatusCode.ERROR, error.message))
127+
"""Apply status and error attributes common to error() paths.
128+
129+
The helper stays separate from the context manager's exception handling so we can
130+
attach the GenAI-specific error attributes defined in the semantic conventions
131+
(see https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-agent-spans.md).
132+
"""
129133
if span.is_recording():
134+
span.set_status(Status(StatusCode.ERROR, error.message))
130135
span.set_attribute(ErrorAttributes.ERROR_TYPE, error.type.__qualname__)
131136

132137

util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -13,18 +13,21 @@
1313
# limitations under the License.
1414

1515

16-
from contextvars import Token
1716
from dataclasses import dataclass, field
1817
from enum import Enum
19-
from typing import Any, Dict, List, Literal, Optional, Type, Union
18+
from typing import (
19+
Any,
20+
ContextManager,
21+
Dict,
22+
List,
23+
Literal,
24+
Optional,
25+
Type,
26+
Union,
27+
)
2028

21-
from typing_extensions import TypeAlias
22-
23-
from opentelemetry.context import Context
2429
from opentelemetry.trace import Span
2530

26-
ContextToken: TypeAlias = Token[Context]
27-
2831

2932
class ContentCapturingMode(Enum):
3033
# Do not capture content (default).
@@ -95,13 +98,16 @@ def _new_str_any_dict() -> Dict[str, Any]:
9598
class LLMInvocation:
9699
"""
97100
Represents a single LLM call invocation. When creating an LLMInvocation object,
98-
only update the data attributes. The span and context_token attributes are
99-
set by the TelemetryHandler.
101+
only update the data attributes. The span and span_scope attributes are set by the
102+
TelemetryHandler.
100103
"""
101104

102105
request_model: str
103-
context_token: Optional[ContextToken] = None
104106
span: Optional[Span] = None
107+
# Internal handle returned by opentelemetry.trace.use_span to keep the span active.
108+
span_scope: Optional[ContextManager[Span]] = field(
109+
default=None, compare=False, repr=False
110+
)
105111
input_messages: List[InputMessage] = field(
106112
default_factory=_new_input_messages
107113
)

0 commit comments

Comments
 (0)