|
| 1 | +# Copyright The OpenTelemetry Authors |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +""" |
| 16 | +Span generation utilities for GenAI telemetry. |
| 17 | +
|
| 18 | +This module maps GenAI (Generative AI) invocations to OpenTelemetry spans and |
| 19 | +applies GenAI semantic convention attributes. |
| 20 | +
|
| 21 | +Classes: |
| 22 | + - BaseTelemetryGenerator: Abstract base for GenAI telemetry emitters. |
| 23 | + - SpanGenerator: Concrete implementation that creates and finalizes spans |
| 24 | + for LLM operations (e.g., chat) and records input/output messages when |
| 25 | + experimental mode and content capture settings allow. |
| 26 | +
|
| 27 | +Usage: |
| 28 | + See `opentelemetry/util/genai/handler.py` for `TelemetryHandler`, which |
| 29 | + constructs `LLMInvocation` objects and delegates to `SpanGenerator.start`, |
| 30 | + `SpanGenerator.finish`, and `SpanGenerator.error` to produce spans that |
| 31 | + follow the GenAI semantic conventions. |
| 32 | +""" |
| 33 | + |
| 34 | +import json |
| 35 | +from contextlib import contextmanager |
| 36 | +from dataclasses import asdict, dataclass, field |
| 37 | +from typing import Any, Dict, List, Optional |
| 38 | +from uuid import UUID |
| 39 | + |
| 40 | +from opentelemetry import trace |
| 41 | +from opentelemetry.semconv._incubating.attributes import ( |
| 42 | + gen_ai_attributes as GenAI, |
| 43 | +) |
| 44 | +from opentelemetry.semconv.attributes import ( |
| 45 | + error_attributes as ErrorAttributes, |
| 46 | +) |
| 47 | +from opentelemetry.trace import ( |
| 48 | + Span, |
| 49 | + SpanKind, |
| 50 | + Tracer, |
| 51 | + set_span_in_context, |
| 52 | + use_span, |
| 53 | +) |
| 54 | +from opentelemetry.trace.status import Status, StatusCode |
| 55 | +from opentelemetry.util.genai.utils import ( |
| 56 | + ContentCapturingMode, |
| 57 | + get_content_capturing_mode, |
| 58 | + is_experimental_mode, |
| 59 | +) |
| 60 | +from opentelemetry.util.types import AttributeValue |
| 61 | + |
| 62 | +from .types import Error, InputMessage, LLMInvocation, OutputMessage |
| 63 | + |
| 64 | + |
| 65 | +@dataclass |
| 66 | +class _SpanState: |
| 67 | + span: Span |
| 68 | + children: List[UUID] = field(default_factory=list) |
| 69 | + |
| 70 | + |
| 71 | +def _apply_common_span_attributes( |
| 72 | + span: Span, invocation: LLMInvocation |
| 73 | +) -> None: |
| 74 | + """Apply attributes shared by finish() and error() and compute metrics. |
| 75 | +
|
| 76 | + Returns (genai_attributes) for use with metrics. |
| 77 | + """ |
| 78 | + request_model = invocation.attributes.get("request_model") |
| 79 | + provider = invocation.attributes.get("provider") |
| 80 | + |
| 81 | + span.set_attribute( |
| 82 | + GenAI.GEN_AI_OPERATION_NAME, GenAI.GenAiOperationNameValues.CHAT.value |
| 83 | + ) |
| 84 | + if request_model: |
| 85 | + span.set_attribute(GenAI.GEN_AI_REQUEST_MODEL, request_model) |
| 86 | + if provider is not None: |
| 87 | + # TODO: clean provider name to match GenAiProviderNameValues? |
| 88 | + span.set_attribute(GenAI.GEN_AI_PROVIDER_NAME, provider) |
| 89 | + |
| 90 | + finish_reasons: List[str] = [] |
| 91 | + for gen in invocation.chat_generations: |
| 92 | + finish_reasons.append(gen.finish_reason) |
| 93 | + if finish_reasons: |
| 94 | + span.set_attribute( |
| 95 | + GenAI.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons |
| 96 | + ) |
| 97 | + |
| 98 | + response_model = invocation.attributes.get("response_model_name") |
| 99 | + response_id = invocation.attributes.get("response_id") |
| 100 | + prompt_tokens = invocation.attributes.get("input_tokens") |
| 101 | + completion_tokens = invocation.attributes.get("output_tokens") |
| 102 | + _set_response_and_usage_attributes( |
| 103 | + span, |
| 104 | + response_model, |
| 105 | + response_id, |
| 106 | + prompt_tokens, |
| 107 | + completion_tokens, |
| 108 | + ) |
| 109 | + |
| 110 | + |
| 111 | +def _set_response_and_usage_attributes( |
| 112 | + span: Span, |
| 113 | + response_model: Optional[str], |
| 114 | + response_id: Optional[str], |
| 115 | + prompt_tokens: Optional[AttributeValue], |
| 116 | + completion_tokens: Optional[AttributeValue], |
| 117 | +) -> None: |
| 118 | + if response_model is not None: |
| 119 | + span.set_attribute(GenAI.GEN_AI_RESPONSE_MODEL, response_model) |
| 120 | + if response_id is not None: |
| 121 | + span.set_attribute(GenAI.GEN_AI_RESPONSE_ID, response_id) |
| 122 | + if isinstance(prompt_tokens, (int, float)): |
| 123 | + span.set_attribute(GenAI.GEN_AI_USAGE_INPUT_TOKENS, prompt_tokens) |
| 124 | + if isinstance(completion_tokens, (int, float)): |
| 125 | + span.set_attribute(GenAI.GEN_AI_USAGE_OUTPUT_TOKENS, completion_tokens) |
| 126 | + |
| 127 | + |
| 128 | +def _maybe_set_span_messages( |
| 129 | + span: Span, |
| 130 | + input_messages: List[InputMessage], |
| 131 | + output_messages: List[OutputMessage], |
| 132 | +) -> None: |
| 133 | + if not is_experimental_mode() or get_content_capturing_mode() not in ( |
| 134 | + ContentCapturingMode.SPAN_ONLY, |
| 135 | + ContentCapturingMode.SPAN_AND_EVENT, |
| 136 | + ): |
| 137 | + return |
| 138 | + message_parts: List[Dict[str, Any]] = [ |
| 139 | + asdict(message) for message in input_messages |
| 140 | + ] |
| 141 | + if message_parts: |
| 142 | + span.set_attribute("gen_ai.input.messages", json.dumps(message_parts)) |
| 143 | + |
| 144 | + generation_parts: List[Dict[str, Any]] = [ |
| 145 | + asdict(generation) for generation in output_messages |
| 146 | + ] |
| 147 | + if generation_parts: |
| 148 | + span.set_attribute( |
| 149 | + "gen_ai.output.messages", json.dumps(generation_parts) |
| 150 | + ) |
| 151 | + |
| 152 | + |
| 153 | +def _apply_finish_attributes(span: Span, invocation: LLMInvocation) -> None: |
| 154 | + """Apply attributes/messages common to finish() paths.""" |
| 155 | + _apply_common_span_attributes(span, invocation) |
| 156 | + _maybe_set_span_messages( |
| 157 | + span, invocation.messages, invocation.chat_generations |
| 158 | + ) |
| 159 | + |
| 160 | + |
| 161 | +def _apply_error_attributes(span: Span, error: Error) -> None: |
| 162 | + """Apply status and error attributes common to error() paths.""" |
| 163 | + span.set_status(Status(StatusCode.ERROR, error.message)) |
| 164 | + if span.is_recording(): |
| 165 | + span.set_attribute(ErrorAttributes.ERROR_TYPE, error.type.__qualname__) |
| 166 | + |
| 167 | + |
| 168 | +class BaseTelemetryGenerator: |
| 169 | + """ |
| 170 | + Abstract base for emitters mapping GenAI types -> OpenTelemetry. |
| 171 | + """ |
| 172 | + |
| 173 | + def start(self, invocation: LLMInvocation) -> None: |
| 174 | + raise NotImplementedError |
| 175 | + |
| 176 | + def finish(self, invocation: LLMInvocation) -> None: |
| 177 | + raise NotImplementedError |
| 178 | + |
| 179 | + def error(self, error: Error, invocation: LLMInvocation) -> None: |
| 180 | + raise NotImplementedError |
| 181 | + |
| 182 | + |
| 183 | +class SpanGenerator(BaseTelemetryGenerator): |
| 184 | + """ |
| 185 | + Generates only spans. |
| 186 | + """ |
| 187 | + |
| 188 | + def __init__( |
| 189 | + self, |
| 190 | + tracer: Optional[Tracer] = None, |
| 191 | + ): |
| 192 | + self._tracer: Tracer = tracer or trace.get_tracer(__name__) |
| 193 | + |
| 194 | + # TODO: Map from run_id -> _SpanState, to keep track of spans and parent/child relationships |
| 195 | + self.spans: Dict[UUID, _SpanState] = {} |
| 196 | + |
| 197 | + def _start_span( |
| 198 | + self, |
| 199 | + name: str, |
| 200 | + kind: SpanKind, |
| 201 | + parent_run_id: Optional[UUID] = None, |
| 202 | + ) -> Span: |
| 203 | + parent_span = ( |
| 204 | + self.spans.get(parent_run_id) |
| 205 | + if parent_run_id is not None |
| 206 | + else None |
| 207 | + ) |
| 208 | + if parent_span is not None: |
| 209 | + ctx = set_span_in_context(parent_span.span) |
| 210 | + span = self._tracer.start_span(name=name, kind=kind, context=ctx) |
| 211 | + else: |
| 212 | + # top-level or missing parent |
| 213 | + span = self._tracer.start_span(name=name, kind=kind) |
| 214 | + set_span_in_context(span) |
| 215 | + |
| 216 | + return span |
| 217 | + |
| 218 | + def _end_span(self, run_id: UUID): |
| 219 | + state = self.spans[run_id] |
| 220 | + for child_id in state.children: |
| 221 | + child_state = self.spans.get(child_id) |
| 222 | + if child_state: |
| 223 | + child_state.span.end() |
| 224 | + state.span.end() |
| 225 | + del self.spans[run_id] |
| 226 | + |
| 227 | + def start(self, invocation: LLMInvocation): |
| 228 | + # Create/register the span; keep it active but do not end it here. |
| 229 | + with self._start_span_for_invocation(invocation): |
| 230 | + pass |
| 231 | + |
| 232 | + @contextmanager |
| 233 | + def _start_span_for_invocation(self, invocation: LLMInvocation): |
| 234 | + """Create/register a span for the invocation and yield it. |
| 235 | +
|
| 236 | + The span is not ended automatically on exiting the context; callers |
| 237 | + must finalize via _finalize_invocation. |
| 238 | + """ |
| 239 | + # Establish parent/child relationship if a parent span exists. |
| 240 | + parent_state = ( |
| 241 | + self.spans.get(invocation.parent_run_id) |
| 242 | + if invocation.parent_run_id is not None |
| 243 | + else None |
| 244 | + ) |
| 245 | + if parent_state is not None: |
| 246 | + parent_state.children.append(invocation.run_id) |
| 247 | + span = self._start_span( |
| 248 | + name=f"{GenAI.GenAiOperationNameValues.CHAT.value} {invocation.request_model}", |
| 249 | + kind=SpanKind.CLIENT, |
| 250 | + parent_run_id=invocation.parent_run_id, |
| 251 | + ) |
| 252 | + with use_span(span, end_on_exit=False) as span: |
| 253 | + span_state = _SpanState( |
| 254 | + span=span, |
| 255 | + ) |
| 256 | + self.spans[invocation.run_id] = span_state |
| 257 | + yield span |
| 258 | + |
| 259 | + def finish(self, invocation: LLMInvocation): |
| 260 | + state = self.spans.get(invocation.run_id) |
| 261 | + if state is None: |
| 262 | + with self._start_span_for_invocation(invocation) as span: |
| 263 | + _apply_finish_attributes(span, invocation) |
| 264 | + self._end_span(invocation.run_id) |
| 265 | + return |
| 266 | + |
| 267 | + span = state.span |
| 268 | + _apply_finish_attributes(span, invocation) |
| 269 | + self._end_span(invocation.run_id) |
| 270 | + |
| 271 | + def error(self, error: Error, invocation: LLMInvocation): |
| 272 | + state = self.spans.get(invocation.run_id) |
| 273 | + if state is None: |
| 274 | + with self._start_span_for_invocation(invocation) as span: |
| 275 | + _apply_error_attributes(span, error) |
| 276 | + self._end_span(invocation.run_id) |
| 277 | + return |
| 278 | + |
| 279 | + span = state.span |
| 280 | + _apply_error_attributes(span, error) |
| 281 | + self._end_span(invocation.run_id) |
0 commit comments