Skip to content

Commit 6a19e04

Browse files
committed
remove metrics to reduce PR size
1 parent aeaa319 commit 6a19e04

File tree

4 files changed

+15
-96
lines changed

4 files changed

+15
-96
lines changed

util/opentelemetry-util-genai/CHANGELOG.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,5 +13,4 @@ to take on an enum (`NO_CONTENT/SPAN_ONLY/EVENT_ONLY/SPAN_AND_EVENT`) instead of
1313
### Added
1414

1515
- Generate Spans for LLM invocations
16-
- Generate Metrics for LLM invocations
1716
- Helper functions for starting and finishing LLM invocations

util/opentelemetry-util-genai/README.rst

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -20,24 +20,6 @@ This package provides these span attributes.
2020
-> gen_ai.input.messages: Str("[{\"role\": \"user\", \"content\": \"hello world\"}]")
2121

2222

23-
This package also provides these metric attributes.
24-
Token Usage Metrics:
25-
-> gen_ai.provider.name: Str(openai)
26-
-> gen_ai.operation.name: Str(chat)
27-
-> gen_ai.framework: Str(langchain)
28-
-> gen_ai.request.model: Str(gpt-3.5-turbo)
29-
-> gen_ai.response.model: Str(gpt-3.5-turbo-0125)
30-
-> gen_ai.usage.input_tokens: Int(24)
31-
-> gen_ai.usage.output_tokens: Int(7)
32-
-> gen_ai.token.type: Str(input|output)
33-
34-
Duration Metrics:
35-
-> gen_ai.provider.name: Str(openai)
36-
-> gen_ai.operation.name: Str(chat)
37-
-> gen_ai.framework: Str(langchain)
38-
-> gen_ai.request.model: Str(gpt-3.5-turbo)
39-
-> gen_ai.response.model: Str(gpt-3.5-turbo-0125)
40-
4123
Installation
4224
------------
4325

util/opentelemetry-util-genai/src/opentelemetry/util/genai/generators.py

Lines changed: 13 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@
4646
_OpenTelemetryStabilitySignalType,
4747
_StabilityMode,
4848
)
49-
from opentelemetry.metrics import Histogram, Meter, get_meter
5049
from opentelemetry.semconv._incubating.attributes import (
5150
gen_ai_attributes as GenAI,
5251
)
@@ -67,7 +66,6 @@
6766
)
6867
from opentelemetry.util.types import AttributeValue
6968

70-
from .instruments import Instruments
7169
from .types import Error, InputMessage, LLMInvocation, OutputMessage
7270

7371

@@ -81,7 +79,7 @@ class _SpanState:
8179
children: List[UUID] = field(default_factory=list)
8280

8381

84-
def _get_metric_attributes(
82+
def _get_genai_attributes(
8583
request_model: Optional[str],
8684
response_model: Optional[str],
8785
operation_name: Optional[str],
@@ -202,37 +200,6 @@ def _maybe_set_span_output_messages(
202200
)
203201

204202

205-
def _record_token_metrics(
206-
token_histogram: Histogram,
207-
prompt_tokens: Optional[AttributeValue],
208-
completion_tokens: Optional[AttributeValue],
209-
metric_attributes: Dict[str, AttributeValue],
210-
) -> None:
211-
prompt_attrs: Dict[str, AttributeValue] = {
212-
GenAI.GEN_AI_TOKEN_TYPE: GenAI.GenAiTokenTypeValues.INPUT.value
213-
}
214-
prompt_attrs.update(metric_attributes)
215-
if isinstance(prompt_tokens, (int, float)):
216-
token_histogram.record(prompt_tokens, attributes=prompt_attrs)
217-
218-
completion_attrs: Dict[str, AttributeValue] = {
219-
GenAI.GEN_AI_TOKEN_TYPE: GenAI.GenAiTokenTypeValues.COMPLETION.value
220-
}
221-
completion_attrs.update(metric_attributes)
222-
if isinstance(completion_tokens, (int, float)):
223-
token_histogram.record(completion_tokens, attributes=completion_attrs)
224-
225-
226-
def _record_duration(
227-
duration_histogram: Histogram,
228-
invocation: LLMInvocation,
229-
metric_attributes: Dict[str, AttributeValue],
230-
) -> None:
231-
if invocation.end_time is not None:
232-
elapsed: float = invocation.end_time - invocation.start_time
233-
duration_histogram.record(elapsed, attributes=metric_attributes)
234-
235-
236203
class BaseTelemetryGenerator:
237204
"""
238205
Abstract base for emitters mapping GenAI types -> OpenTelemetry.
@@ -248,21 +215,16 @@ def error(self, error: Error, invocation: LLMInvocation) -> None:
248215
raise NotImplementedError
249216

250217

251-
class SpanMetricGenerator(BaseTelemetryGenerator):
218+
class SpanGenerator(BaseTelemetryGenerator):
252219
"""
253-
Generates only spans and metrics (no events).
220+
Generates only spans.
254221
"""
255222

256223
def __init__(
257224
self,
258225
tracer: Optional[Tracer] = None,
259-
meter: Optional[Meter] = None,
260226
):
261227
self._tracer: Tracer = tracer or trace.get_tracer(__name__)
262-
_meter: Meter = meter or get_meter(__name__)
263-
instruments = Instruments(_meter)
264-
self._duration_histogram = instruments.operation_duration_histogram
265-
self._token_histogram = instruments.token_usage_histogram
266228

267229
# Map from run_id -> _SpanState, to keep track of spans and parent/child relationships
268230
self.spans: Dict[UUID, _SpanState] = {}
@@ -328,14 +290,10 @@ def _span_for_invocation(self, invocation: LLMInvocation):
328290
@staticmethod
329291
def _apply_common_span_attributes(
330292
span: Span, invocation: LLMInvocation
331-
) -> Tuple[
332-
Dict[str, AttributeValue],
333-
Optional[AttributeValue],
334-
Optional[AttributeValue],
335-
]:
293+
) -> Tuple[Dict[str, AttributeValue]]:
336294
"""Apply attributes shared by finish() and error() and compute metrics.
337295
338-
Returns (metric_attributes, prompt_tokens, completion_tokens).
296+
Returns (genai_attributes).
339297
"""
340298
request_model = invocation.attributes.get("request_model")
341299
system = invocation.attributes.get("system")
@@ -360,50 +318,33 @@ def _apply_common_span_attributes(
360318
prompt_tokens,
361319
completion_tokens,
362320
)
363-
364-
metric_attributes = _get_metric_attributes(
321+
genai_attributes = _get_genai_attributes(
365322
request_model,
366323
response_model,
367324
GenAI.GenAiOperationNameValues.CHAT.value,
368325
system,
369326
framework,
370327
)
371-
return metric_attributes, prompt_tokens, completion_tokens
328+
return (genai_attributes,)
372329

373-
def _finalize_invocation(
374-
self,
375-
invocation: LLMInvocation,
376-
metric_attributes: Dict[str, AttributeValue],
377-
) -> None:
330+
def _finalize_invocation(self, invocation: LLMInvocation) -> None:
378331
"""End span(s) and record duration for the invocation."""
379332
self._end_span(invocation.run_id)
380-
_record_duration(
381-
self._duration_histogram, invocation, metric_attributes
382-
)
383333

384334
def finish(self, invocation: LLMInvocation):
385335
with self._span_for_invocation(invocation) as span:
386-
metric_attributes, prompt_tokens, completion_tokens = (
387-
self._apply_common_span_attributes(span, invocation)
388-
)
336+
_ = self._apply_common_span_attributes(
337+
span, invocation
338+
) # return value to be used with metrics
389339
_maybe_set_span_input_messages(span, invocation.messages)
390340
_maybe_set_span_output_messages(span, invocation.chat_generations)
391-
_record_token_metrics(
392-
self._token_histogram,
393-
prompt_tokens,
394-
completion_tokens,
395-
metric_attributes,
396-
)
397-
self._finalize_invocation(invocation, metric_attributes)
341+
self._finalize_invocation(invocation)
398342

399343
def error(self, error: Error, invocation: LLMInvocation):
400344
with self._span_for_invocation(invocation) as span:
401-
metric_attributes, _, _ = self._apply_common_span_attributes(
402-
span, invocation
403-
)
404345
span.set_status(Status(StatusCode.ERROR, error.message))
405346
if span.is_recording():
406347
span.set_attribute(
407348
ErrorAttributes.ERROR_TYPE, error.type.__qualname__
408349
)
409-
self._finalize_invocation(invocation, metric_attributes)
350+
self._finalize_invocation(invocation)

util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@
4545
from opentelemetry.semconv.schemas import Schemas
4646
from opentelemetry.trace import get_tracer
4747

48-
from .generators import SpanMetricGenerator
48+
from .generators import SpanGenerator
4949
from .types import Error, InputMessage, LLMInvocation, OutputMessage
5050
from .version import __version__
5151

@@ -90,10 +90,7 @@ def __init__(self, emitter_type_full: bool = True, **kwargs: Any):
9090
)
9191

9292
# TODO: trigger span+metric+event generation based on the full emitter flag
93-
self._generator = SpanMetricGenerator(
94-
tracer=self._tracer,
95-
meter=self._meter,
96-
)
93+
self._generator = SpanGenerator(tracer=self._tracer)
9794

9895
self._llm_registry: dict[UUID, LLMInvocation] = {}
9996
self._lock = Lock()

0 commit comments

Comments
 (0)