Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions util/opentelemetry-util-genai/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## Unreleased

- Add more Semconv attributes to LLMInvocation spans.
([https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3862](#3862))

## Version 0.2b0 (2025-10-14)

- Add jsonlines support to fsspec uploader
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ def fail_llm( # pylint: disable=no-self-use
# TODO: Provide feedback that this invocation was not started
return invocation

_apply_finish_attributes(invocation.span, invocation)
_apply_error_attributes(invocation.span, error)
# Detach context and end span
otel_context.detach(invocation.context_token)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

from dataclasses import asdict
from typing import List
from typing import Any, Dict, List, Optional

from opentelemetry.semconv._incubating.attributes import (
gen_ai_attributes as GenAI,
Expand Down Expand Up @@ -60,26 +60,7 @@ def _apply_common_span_attributes(
# TODO: clean provider name to match GenAiProviderNameValues?
span.set_attribute(GenAI.GEN_AI_PROVIDER_NAME, invocation.provider)

if invocation.output_messages:
span.set_attribute(
GenAI.GEN_AI_RESPONSE_FINISH_REASONS,
[gen.finish_reason for gen in invocation.output_messages],
)

if invocation.response_model_name is not None:
span.set_attribute(
GenAI.GEN_AI_RESPONSE_MODEL, invocation.response_model_name
)
if invocation.response_id is not None:
span.set_attribute(GenAI.GEN_AI_RESPONSE_ID, invocation.response_id)
if invocation.input_tokens is not None:
span.set_attribute(
GenAI.GEN_AI_USAGE_INPUT_TOKENS, invocation.input_tokens
)
if invocation.output_tokens is not None:
span.set_attribute(
GenAI.GEN_AI_USAGE_OUTPUT_TOKENS, invocation.output_tokens
)
_apply_response_attributes(span, invocation)


def _maybe_set_span_messages(
Expand Down Expand Up @@ -112,6 +93,8 @@ def _apply_finish_attributes(span: Span, invocation: LLMInvocation) -> None:
_maybe_set_span_messages(
span, invocation.input_messages, invocation.output_messages
)
_apply_request_attributes(span, invocation)
_apply_response_attributes(span, invocation)
span.set_attributes(invocation.attributes)


Expand All @@ -122,7 +105,68 @@ def _apply_error_attributes(span: Span, error: Error) -> None:
span.set_attribute(ErrorAttributes.ERROR_TYPE, error.type.__qualname__)


def _apply_request_attributes(span: Span, invocation: LLMInvocation) -> None:
"""Attach GenAI request semantic convention attributes to the span."""
attributes: Dict[str, Any] = {}
if invocation.temperature is not None:
attributes[GenAI.GEN_AI_REQUEST_TEMPERATURE] = invocation.temperature
if invocation.top_p is not None:
attributes[GenAI.GEN_AI_REQUEST_TOP_P] = invocation.top_p
if invocation.frequency_penalty is not None:
attributes[GenAI.GEN_AI_REQUEST_FREQUENCY_PENALTY] = (
invocation.frequency_penalty
)
if invocation.presence_penalty is not None:
attributes[GenAI.GEN_AI_REQUEST_PRESENCE_PENALTY] = (
invocation.presence_penalty
)
if invocation.max_tokens is not None:
attributes[GenAI.GEN_AI_REQUEST_MAX_TOKENS] = invocation.max_tokens
if invocation.stop_sequences is not None:
attributes[GenAI.GEN_AI_REQUEST_STOP_SEQUENCES] = (
invocation.stop_sequences
)
if invocation.seed is not None:
attributes[GenAI.GEN_AI_REQUEST_SEED] = invocation.seed
if attributes:
span.set_attributes(attributes)


def _apply_response_attributes(span: Span, invocation: LLMInvocation) -> None:
"""Attach GenAI response semantic convention attributes to the span."""
attributes: Dict[str, Any] = {}

finish_reasons: Optional[List[str]]
if invocation.response_finish_reasons is not None:
finish_reasons = invocation.response_finish_reasons
elif invocation.output_messages:
finish_reasons = [
message.finish_reason for message in invocation.output_messages
]
else:
finish_reasons = None

if finish_reasons:
attributes[GenAI.GEN_AI_RESPONSE_FINISH_REASONS] = finish_reasons

if invocation.response_model_name is not None:
attributes[GenAI.GEN_AI_RESPONSE_MODEL] = (
invocation.response_model_name
)
if invocation.response_id is not None:
attributes[GenAI.GEN_AI_RESPONSE_ID] = invocation.response_id
if invocation.input_tokens is not None:
attributes[GenAI.GEN_AI_USAGE_INPUT_TOKENS] = invocation.input_tokens
if invocation.output_tokens is not None:
attributes[GenAI.GEN_AI_USAGE_OUTPUT_TOKENS] = invocation.output_tokens

if attributes:
span.set_attributes(attributes)


__all__ = [
"_apply_finish_attributes",
"_apply_error_attributes",
"_apply_request_attributes",
"_apply_response_attributes",
]
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,17 @@ class LLMInvocation:
provider: Optional[str] = None
response_model_name: Optional[str] = None
response_id: Optional[str] = None
response_finish_reasons: Optional[List[str]] = None
input_tokens: Optional[int] = None
output_tokens: Optional[int] = None
attributes: Dict[str, Any] = field(default_factory=_new_str_any_dict)
temperature: Optional[float] = None
top_p: Optional[float] = None
frequency_penalty: Optional[float] = None
presence_penalty: Optional[float] = None
max_tokens: Optional[int] = None
stop_sequences: Optional[List[str]] = None
seed: Optional[int] = None


@dataclass
Expand Down
Loading