Skip to content

Commit 3a891b2

Browse files
committed
Revert "refactor: update span lifecycle to use sdk over setting context manually"
This reverts commit be8620b.
1 parent 0d49e28 commit 3a891b2

File tree

3 files changed

+38
-67
lines changed

3 files changed

+38
-67
lines changed

util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py

Lines changed: 26 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
handler = get_telemetry_handler()
3030
3131
# Create an invocation object with your request data
32-
# The span and span_scope attributes are set by the TelemetryHandler, and
32+
# The span and context_token attributes are set by the TelemetryHandler, and
3333
# managed by the TelemetryHandler during the lifecycle of the span.
3434
3535
# Use the context manager to manage the lifecycle of an LLM invocation.
@@ -63,6 +63,7 @@
6363
from contextlib import contextmanager
6464
from typing import Iterator, Optional
6565

66+
from opentelemetry import context as otel_context
6667
from opentelemetry.semconv._incubating.attributes import (
6768
gen_ai_attributes as GenAI,
6869
)
@@ -71,16 +72,13 @@
7172
SpanKind,
7273
TracerProvider,
7374
get_tracer,
74-
use_span,
75+
set_span_in_context,
7576
)
7677
from opentelemetry.util.genai.span_utils import (
7778
_apply_error_attributes,
7879
_apply_finish_attributes,
7980
)
80-
from opentelemetry.util.genai.types import (
81-
Error,
82-
LLMInvocation,
83-
)
81+
from opentelemetry.util.genai.types import Error, LLMInvocation
8482
from opentelemetry.util.genai.version import __version__
8583

8684

@@ -103,56 +101,41 @@ def start_llm(
103101
invocation: LLMInvocation,
104102
) -> LLMInvocation:
105103
"""Start an LLM invocation and create a pending span entry."""
106-
# Create a span and activate it using the OpenTelemetry helper scope
104+
# Create a span and attach it as current; keep the token to detach later
107105
span = self._tracer.start_span(
108106
name=f"{GenAI.GenAiOperationNameValues.CHAT.value} {invocation.request_model}",
109107
kind=SpanKind.CLIENT,
110108
)
111109
invocation.span = span
112-
scope = use_span(
113-
span,
114-
end_on_exit=False,
115-
record_exception=False,
116-
set_status_on_exception=False,
110+
invocation.context_token = otel_context.attach(
111+
set_span_in_context(span)
117112
)
118-
scope.__enter__()
119-
invocation.span_scope = scope
120113
return invocation
121114

122115
def stop_llm(self, invocation: LLMInvocation) -> LLMInvocation: # pylint: disable=no-self-use
123116
"""Finalize an LLM invocation successfully and end its span."""
124-
if invocation.span_scope is None or invocation.span is None:
117+
if invocation.context_token is None or invocation.span is None:
125118
# TODO: Provide feedback that this invocation was not started
126119
return invocation
127120

128-
scope = invocation.span_scope
129-
span = invocation.span
130-
try:
131-
_apply_finish_attributes(span, invocation)
132-
finally:
133-
scope.__exit__(None, None, None)
134-
span.end()
135-
invocation.span_scope = None
136-
invocation.span = None
121+
_apply_finish_attributes(invocation.span, invocation)
122+
# Detach context and end span
123+
otel_context.detach(invocation.context_token)
124+
invocation.span.end()
137125
return invocation
138126

139127
def fail_llm( # pylint: disable=no-self-use
140128
self, invocation: LLMInvocation, error: Error
141129
) -> LLMInvocation:
142130
"""Fail an LLM invocation and end its span with error status."""
143-
if invocation.span_scope is None or invocation.span is None:
131+
if invocation.context_token is None or invocation.span is None:
144132
# TODO: Provide feedback that this invocation was not started
145133
return invocation
146134

147-
scope = invocation.span_scope
148-
span = invocation.span
149-
try:
150-
_apply_error_attributes(span, error)
151-
finally:
152-
scope.__exit__(None, None, None)
153-
span.end()
154-
invocation.span_scope = None
155-
invocation.span = None
135+
_apply_error_attributes(invocation.span, error)
136+
# Detach context and end span
137+
otel_context.detach(invocation.context_token)
138+
invocation.span.end()
156139
return invocation
157140

158141
@contextmanager
@@ -161,22 +144,21 @@ def llm(
161144
) -> Iterator[LLMInvocation]:
162145
"""Context manager for LLM invocations.
163146
164-
Only set data attributes on the invocation object, do not modify the span or
165-
context. Starts the span on entry. On normal exit, finalizes the invocation and
166-
ends the span. If an exception occurs inside the context, marks the span as
167-
error, ends it, and re-raises the original exception.
147+
Only set data attributes on the invocation object, do not modify the span or context.
148+
149+
Starts the span on entry. On normal exit, finalizes the invocation and ends the span.
150+
If an exception occurs inside the context, marks the span as error, ends it, and
151+
re-raises the original exception.
168152
"""
169153
if invocation is None:
170-
invocation = LLMInvocation(request_model="")
171-
154+
invocation = LLMInvocation(
155+
request_model="",
156+
)
172157
self.start_llm(invocation)
173158
try:
174159
yield invocation
175160
except Exception as exc:
176-
self.fail_llm(
177-
invocation,
178-
Error(message=str(exc), type=type(exc)),
179-
)
161+
self.fail_llm(invocation, Error(message=str(exc), type=type(exc)))
180162
raise
181163
self.stop_llm(invocation)
182164

util/opentelemetry-util-genai/src/opentelemetry/util/genai/span_utils.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -116,14 +116,9 @@ def _apply_finish_attributes(span: Span, invocation: LLMInvocation) -> None:
116116

117117

118118
def _apply_error_attributes(span: Span, error: Error) -> None:
119-
"""Apply status and error attributes common to error() paths.
120-
121-
The helper stays separate from the context manager's exception handling so we can
122-
attach the GenAI-specific error attributes defined in the semantic conventions
123-
(see https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-agent-spans.md).
124-
"""
119+
"""Apply status and error attributes common to error() paths."""
120+
span.set_status(Status(StatusCode.ERROR, error.message))
125121
if span.is_recording():
126-
span.set_status(Status(StatusCode.ERROR, error.message))
127122
span.set_attribute(ErrorAttributes.ERROR_TYPE, error.type.__qualname__)
128123

129124

util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py

Lines changed: 10 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -13,21 +13,18 @@
1313
# limitations under the License.
1414

1515

16-
from contextlib import AbstractContextManager
16+
from contextvars import Token
1717
from dataclasses import dataclass, field
1818
from enum import Enum
19-
from typing import (
20-
Any,
21-
Dict,
22-
List,
23-
Literal,
24-
Optional,
25-
Type,
26-
Union,
27-
)
19+
from typing import Any, Dict, List, Literal, Optional, Type, Union
2820

21+
from typing_extensions import TypeAlias
22+
23+
from opentelemetry.context import Context
2924
from opentelemetry.trace import Span
3025

26+
ContextToken: TypeAlias = Token[Context]
27+
3128

3229
class ContentCapturingMode(Enum):
3330
# Do not capture content (default).
@@ -98,16 +95,13 @@ def _new_str_any_dict() -> Dict[str, Any]:
9895
class LLMInvocation:
9996
"""
10097
Represents a single LLM call invocation. When creating an LLMInvocation object,
101-
only update the data attributes. The span and span_scope attributes are set by the
102-
TelemetryHandler.
98+
only update the data attributes. The span and context_token attributes are
99+
set by the TelemetryHandler.
103100
"""
104101

105102
request_model: str
103+
context_token: Optional[ContextToken] = None
106104
span: Optional[Span] = None
107-
# Internal handle returned by opentelemetry.trace.use_span to keep the span active.
108-
span_scope: Optional[AbstractContextManager[Span]] = field(
109-
default=None, compare=False, repr=False
110-
)
111105
input_messages: List[InputMessage] = field(
112106
default_factory=_new_input_messages
113107
)

0 commit comments

Comments
 (0)