Skip to content

Commit 243bf8d

Browse files
committed
remove generator concept
1 parent 4a08d7f commit 243bf8d

File tree

2 files changed

+52
-124
lines changed

2 files changed

+52
-124
lines changed

util/opentelemetry-util-genai/src/opentelemetry/util/genai/generators.py

Lines changed: 0 additions & 117 deletions
This file was deleted.

util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py

Lines changed: 52 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,24 @@
6262
from contextlib import contextmanager
6363
from typing import Any, Iterator, Optional
6464

65-
from opentelemetry.util.genai.generators import SpanGenerator
65+
from opentelemetry import context as otel_context
66+
from opentelemetry import trace
67+
from opentelemetry.semconv._incubating.attributes import (
68+
gen_ai_attributes as GenAI,
69+
)
70+
from opentelemetry.semconv.schemas import Schemas
71+
from opentelemetry.trace import (
72+
SpanKind,
73+
Tracer,
74+
get_tracer,
75+
set_span_in_context,
76+
)
77+
from opentelemetry.util.genai.span_utils import (
78+
_apply_error_attributes,
79+
_apply_finish_attributes,
80+
)
6681
from opentelemetry.util.genai.types import Error, LLMInvocation
82+
from opentelemetry.util.genai.version import __version__
6783

6884

6985
class TelemetryHandler:
@@ -73,28 +89,57 @@ class TelemetryHandler:
7389
"""
7490

7591
def __init__(self, **kwargs: Any):
76-
self._generator = SpanGenerator(**kwargs)
92+
tracer_provider = kwargs.get("tracer_provider")
93+
tracer = get_tracer(
94+
__name__,
95+
__version__,
96+
tracer_provider,
97+
schema_url=Schemas.V1_36_0.value,
98+
)
99+
self._tracer: Tracer = tracer or trace.get_tracer(__name__)
77100

78101
def start_llm(
79102
self,
80103
invocation: LLMInvocation,
81104
) -> LLMInvocation:
82105
"""Start an LLM invocation and create a pending span entry."""
83-
self._generator.start(invocation)
106+
# Create a span and attach it as current; keep the token to detach later
107+
span = self._tracer.start_span(
108+
name=f"{GenAI.GenAiOperationNameValues.CHAT.value} {invocation.request_model}",
109+
kind=SpanKind.CLIENT,
110+
)
111+
invocation.span = span
112+
invocation.context_token = otel_context.attach(
113+
set_span_in_context(span)
114+
)
84115
return invocation
85116

86-
def stop_llm(self, invocation: LLMInvocation) -> LLMInvocation:
117+
def stop_llm(self, invocation: LLMInvocation) -> LLMInvocation: # pylint: disable=no-self-use
87118
"""Finalize an LLM invocation successfully and end its span."""
88119
invocation.end_time = time.time()
89-
self._generator.finish(invocation)
120+
if invocation.context_token is None or invocation.span is None:
121+
# TODO: Provide feedback that this invocation was not started
122+
return invocation
123+
124+
_apply_finish_attributes(invocation.span, invocation)
125+
# Detach context and end span
126+
otel_context.detach(invocation.context_token)
127+
invocation.span.end()
90128
return invocation
91129

92-
def fail_llm(
130+
def fail_llm( # pylint: disable=no-self-use
93131
self, invocation: LLMInvocation, error: Error
94132
) -> LLMInvocation:
95133
"""Fail an LLM invocation and end its span with error status."""
96134
invocation.end_time = time.time()
97-
self._generator.error(error, invocation)
135+
if invocation.context_token is None or invocation.span is None:
136+
# TODO: Provide feedback that this invocation was not started
137+
return invocation
138+
139+
_apply_error_attributes(invocation.span, error)
140+
# Detach context and end span
141+
otel_context.detach(invocation.context_token)
142+
invocation.span.end()
98143
return invocation
99144

100145
@contextmanager

0 commit comments

Comments
 (0)