diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/.gitignore b/instrumentation-genai/opentelemetry-instrumentation-langchain/.gitignore new file mode 100644 index 0000000000..15f55bffd6 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/.gitignore @@ -0,0 +1,168 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# Mac files +.DS_Store + +# Environment variables +.env + +# sqlite database files +*.db +*.db-shm +*.db-wal + +# PNG files +*.png + +demo/ + +.ruff_cache + +.vscode/ + +output/ + +.terraform.lock.hcl +.terraform/ +foo.sh +tfplan +tfplan.txt +tfplan.json +terraform_output.json + + +# IntelliJ / PyCharm +.idea + + +*.txt + +.dockerconfigjson diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/decorator/main.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/decorator/main.py new file mode 100644 index 0000000000..a1865d4dc2 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/decorator/main.py @@ -0,0 +1,71 @@ +import os + +from dotenv import load_dotenv +from langchain_core.messages import HumanMessage, SystemMessage +from langchain_openai import ChatOpenAI + +from opentelemetry import _events, _logs, metrics, trace +from opentelemetry.exporter.otlp.proto.grpc._log_exporter import ( + OTLPLogExporter, +) +from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import ( + OTLPMetricExporter, +) +from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( + OTLPSpanExporter, +) + +# from opentelemetry.genai.sdk.decorators import llm +from opentelemetry.sdk._events import EventLoggerProvider +from opentelemetry.sdk._logs import LoggerProvider +from opentelemetry.sdk._logs.export import BatchLogRecordProcessor +from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.util.genai.decorators import llm + +# configure tracing +trace.set_tracer_provider(TracerProvider()) +trace.get_tracer_provider().add_span_processor( + BatchSpanProcessor(OTLPSpanExporter()) +) + +metric_reader = PeriodicExportingMetricReader(OTLPMetricExporter()) +metrics.set_meter_provider(MeterProvider(metric_readers=[metric_reader])) + +# configure logging and events +_logs.set_logger_provider(LoggerProvider()) +_logs.get_logger_provider().add_log_record_processor( + BatchLogRecordProcessor(OTLPLogExporter()) +) +_events.set_event_logger_provider(EventLoggerProvider()) + +# Load environment variables from .env file +load_dotenv() + + +@llm(name="invoke_langchain_model") +def invoke_model(messages): + # Get API key from environment variable or set a placeholder + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + raise ValueError("OPENAI_API_KEY environment variable must be set") + + llm = ChatOpenAI(model="gpt-3.5-turbo", api_key=api_key) + result = llm.invoke(messages) + return result + + +def main(): + messages = [ + SystemMessage(content="You are a helpful assistant!"), + HumanMessage(content="What is the capital of France?"), + ] + + result = invoke_model(messages) + print("LLM output:\n", result) + + +if __name__ == "__main__": + main() diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/.env b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/.env index e34c860897..df928327da 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/.env +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/.env @@ -5,4 +5,7 @@ OPENAI_API_KEY=sk-YOUR_API_KEY # OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 # OTEL_EXPORTER_OTLP_PROTOCOL=grpc +# Change to 'false' to hide prompt and completion content +OTEL_INSTRUMENTATION_LANGCHAIN_CAPTURE_MESSAGE_CONTENT=true + OTEL_SERVICE_NAME=opentelemetry-python-langchain-manual diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/README.rst b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/README.rst index 2c829bc801..98e8d1f2b6 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/README.rst +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/README.rst @@ -1,3 +1,4 @@ + OpenTelemetry Langcahin Instrumentation Example ============================================ diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py index e18f53ff31..17e66c0a11 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py @@ -6,6 +6,7 @@ OTLPSpanExporter, ) from opentelemetry.instrumentation.langchain import LangChainInstrumentor + from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt index 0f986b1be0..9cd9c0bace 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt @@ -1,7 +1,8 @@ + langchain==0.3.21 langchain_openai opentelemetry-sdk>=1.31.0 opentelemetry-exporter-otlp-proto-grpc>=1.31.0 # Uncomment after lanchain instrumetation is released -# opentelemetry-instrumentation-langchain~=2.0b0.dev \ No newline at end of file +# opentelemetry-instrumentation-langchain~=2.0b0.dev diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/README.rst b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/README.rst index 3d141ed033..263f45458b 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/README.rst +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/README.rst @@ -1,3 +1,4 @@ + OpenTelemetry Langchain Zero-Code Instrumentation Example ====================================================== diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt index 63b3f56cf1..237e44c57c 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt @@ -1,3 +1,4 @@ + langchain==0.3.21 langchain_openai opentelemetry-sdk>=1.31.0 diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py index 1b135d883f..3c76f14c78 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py @@ -29,6 +29,7 @@ SystemMessage(content="You are a helpful assistant!"), HumanMessage(content="What is the capital of France?"), ] + result = llm.invoke(messages) LangChainInstrumentor().uninstrument() @@ -66,7 +67,6 @@ def __init__( def instrumentation_dependencies(self) -> Collection[str]: return _instruments - def _instrument(self, **kwargs: Any): """ Enable Langchain instrumentation. diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/config.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/config.py new file mode 100644 index 0000000000..3c2e0c9a75 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/config.py @@ -0,0 +1,33 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class Config: + """ + Shared static config for LangChain OTel instrumentation. + """ + + # Logger to handle exceptions during instrumentation + exception_logger = None + + # Globally suppress instrumentation + _suppress_instrumentation = False + + @classmethod + def suppress_instrumentation(cls, suppress: bool = True): + cls._suppress_instrumentation = suppress + + @classmethod + def is_instrumentation_suppressed(cls) -> bool: + return cls._suppress_instrumentation diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/utils.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/utils.py new file mode 100644 index 0000000000..e8626672f2 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/utils.py @@ -0,0 +1,97 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import traceback + +logger = logging.getLogger(__name__) + +# By default, we do not record prompt or completion content. Set this +# environment variable to "true" to enable collection of message text. +OTEL_INSTRUMENTATION_LANGCHAIN_CAPTURE_MESSAGE_CONTENT = ( + "OTEL_INSTRUMENTATION_LANGCHAIN_CAPTURE_MESSAGE_CONTENT" +) + +OTEL_INSTRUMENTATION_GENAI_EXPORTER = "OTEL_INSTRUMENTATION_GENAI_EXPORTER" + +OTEL_INSTRUMENTATION_GENAI_EVALUATION_FRAMEWORK = ( + "OTEL_INSTRUMENTATION_GENAI_EVALUATION_FRAMEWORK" +) + +OTEL_INSTRUMENTATION_GENAI_EVALUATION_ENABLE = ( + "OTEL_INSTRUMENTATION_GENAI_EVALUATION_ENABLE" +) + + +def should_collect_content() -> bool: + val = os.getenv( + OTEL_INSTRUMENTATION_LANGCHAIN_CAPTURE_MESSAGE_CONTENT, "false" + ) + return val.strip().lower() == "true" + + +def should_emit_events() -> bool: + val = os.getenv( + OTEL_INSTRUMENTATION_GENAI_EXPORTER, "SpanMetricEventExporter" + ) + if val.strip().lower() == "spanmetriceventexporter": + return True + elif val.strip().lower() == "spanmetricexporter": + return False + else: + raise ValueError(f"Unknown exporter_type: {val}") + + +def should_enable_evaluation() -> bool: + val = os.getenv(OTEL_INSTRUMENTATION_GENAI_EVALUATION_ENABLE, "True") + return val.strip().lower() == "true" + + +def get_evaluation_framework_name() -> str: + val = os.getenv( + OTEL_INSTRUMENTATION_GENAI_EVALUATION_FRAMEWORK, "Deepeval" + ) + return val.strip().lower() + + +def get_property_value(obj, property_name): + if isinstance(obj, dict): + return obj.get(property_name, None) + + return getattr(obj, property_name, None) + + +def dont_throw(func): + """ + Decorator that catches and logs exceptions, rather than re-raising them, + to avoid interfering with user code if instrumentation fails. + """ + + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + logger.debug( + "OpenTelemetry instrumentation for LangChain encountered an error in %s: %s", + func.__name__, + traceback.format_exc(), + ) + from opentelemetry.instrumentation.langchain.config import Config + + if Config.exception_logger: + Config.exception_logger(e) + return None + + return wrapper diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/.env.example b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/.env.example new file mode 100644 index 0000000000..c60337cb73 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/.env.example @@ -0,0 +1,11 @@ +# Update this with your real OpenAI API key +OPENAI_API_KEY= +APPKEY= +# Uncomment and change to your OTLP endpoint +OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 +OTEL_EXPORTER_OTLP_PROTOCOL=grpc + +# Change to 'false' to hide prompt and completion content +OTEL_INSTRUMENTATION_LANGCHAIN_CAPTURE_MESSAGE_CONTENT=true + +OTEL_SERVICE_NAME=opentelemetry-python-langchain-manual \ No newline at end of file diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/README.rst b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/README.rst new file mode 100644 index 0000000000..325c3d57b2 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/README.rst @@ -0,0 +1,3 @@ +Adding an .env file to set up the environment variables to run the tests. +The test is running by calling LLM APIs provided by Circuit. +There is an sample .env file in this directory. diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_langchain_call.yaml b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_langchain_call.yaml new file mode 100644 index 0000000000..381385a5f3 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_langchain_call.yaml @@ -0,0 +1,144 @@ +interactions: +- request: + body: |- + { + "messages": [ + { + "content": "You are a helpful assistant!", + "role": "system" + }, + { + "content": "What is the capital of France?", + "role": "user" + } + ], + "model": "gpt-3.5-turbo", + "stream": false + } + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '171' + content-type: + - application/json + host: + - api.openai.com + traceparent: + - 00-67db16c8ff85be2c50d4dbfb5553858b-372b2c3c4b99c6d0-01 + user-agent: + - OpenAI/Python 1.86.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.86.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.1 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |- + { + "id": "chatcmpl-Bj8hyoKSOooftbZZk24bce8lAT7PE", + "object": "chat.completion", + "created": 1750097934, + "model": "gpt-3.5-turbo-0125", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "The capital of France is Paris.", + "refusal": null, + "annotations": [] + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 24, + "completion_tokens": 7, + "total_tokens": 31, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default", + "system_fingerprint": null + } + headers: + CF-RAY: + - 950c4ff829573a6b-LAX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Mon, 16 Jun 2025 18:18:54 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '822' + openai-organization: test_openai_org_id + openai-processing-ms: + - '381' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - '387' + x-ratelimit-limit-requests: + - '5000' + x-ratelimit-limit-tokens: + - '2000000' + x-ratelimit-remaining-requests: + - '4999' + x-ratelimit-remaining-tokens: + - '1999981' + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_1eabd7c9c42ed2796829cbda19312189 + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py index df97ad0492..e75fb25ae7 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py @@ -71,7 +71,6 @@ def fixture_tracer_provider(span_exporter): provider.add_span_processor(SimpleSpanProcessor(span_exporter)) return provider - @pytest.fixture(scope="function") def start_instrumentation( tracer_provider, @@ -91,6 +90,7 @@ def environment(): os.environ["OPENAI_API_KEY"] = "test_openai_api_key" + @pytest.fixture(scope="module") def vcr_config(): return { @@ -104,7 +104,6 @@ def vcr_config(): "before_record_response": scrub_response_headers, } - class LiteralBlockScalar(str): """Formats the string as a literal block scalar, preserving whitespace and without interpreting escape characters""" diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_langchain_llm.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_langchain_llm.py new file mode 100644 index 0000000000..3f5fca4443 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_langchain_llm.py @@ -0,0 +1,635 @@ +"""Test suite for LangChain LLM instrumentation with OpenTelemetry. + +This module contains tests that verify the integration between LangChain LLM calls +and OpenTelemetry for observability, including spans, logs, and metrics. +""" + +# Standard library imports +import json +import os +from typing import Any, Dict, List, Optional + +# Third-party imports +import pytest +from langchain_core.messages import ( + HumanMessage, + SystemMessage, + ToolMessage, +) +from langchain_core.tools import tool +from langchain_openai import ChatOpenAI + +from opentelemetry.sdk.metrics.export import Metric +from opentelemetry.sdk.trace import ReadableSpan, Span +from opentelemetry.semconv._incubating.attributes import ( + event_attributes as EventAttributes, +) +from opentelemetry.semconv._incubating.attributes import gen_ai_attributes +from opentelemetry.semconv._incubating.metrics import gen_ai_metrics + +# Constants +CHAT = gen_ai_attributes.GenAiOperationNameValues.CHAT.value +TOOL_OPERATION = "execute_tool" + +########################################### +# Assertion Helpers +########################################### + +# OpenAI Attributes Helpers + + +def assert_openai_completion_attributes( + span: ReadableSpan, + request_model: str, + response: Any, + operation_name: str = "chat", +) -> None: + """Verify OpenAI completion attributes in a span. + + Args: + span: The span to check + request_model: Expected request model name + response: The LLM response object + operation_name: Expected operation name (default: "chat") + """ + return assert_all_openai_attributes( + span, + request_model, + response.response_metadata.get("model_name"), + response.response_metadata.get("token_usage").get("prompt_tokens"), + response.response_metadata.get("token_usage").get("completion_tokens"), + operation_name, + ) + + +def assert_all_openai_attributes( + span: ReadableSpan, + request_model: str, + response_model: str = "gpt-4o-mini-2024-07-18", + input_tokens: Optional[int] = None, + output_tokens: Optional[int] = None, + operation_name: str = "chat", + span_name: str = "chat gpt-4o-mini", + system: str = "LangChain:ChatOpenAI", +): + assert span.name == span_name + + assert ( + operation_name + == span.attributes[gen_ai_attributes.GEN_AI_OPERATION_NAME] + ) + + assert request_model == "gpt-4o-mini" + + assert response_model == "gpt-4o-mini-2024-07-18" + + assert gen_ai_attributes.GEN_AI_RESPONSE_ID in span.attributes + + if input_tokens: + assert ( + input_tokens + == span.attributes[gen_ai_attributes.GEN_AI_USAGE_INPUT_TOKENS] + ) + else: + assert ( + gen_ai_attributes.GEN_AI_USAGE_INPUT_TOKENS not in span.attributes + ) + + if output_tokens: + assert ( + output_tokens + == span.attributes[gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS] + ) + else: + assert ( + gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS not in span.attributes + ) + + +def _assert_tool_request_functions_on_span( + span: Span, expected_tool_names: List[str] +) -> None: + """Verify tool request functions in span attributes. + + Args: + span: The span to check + expected_tool_names: List of expected tool names + """ + for i, name in enumerate(expected_tool_names): + assert span.attributes.get(f"gen_ai.request.function.{i}.name") == name + assert f"gen_ai.request.function.{i}.description" in span.attributes + assert f"gen_ai.request.function.{i}.parameters" in span.attributes + + +# Log Assertion Helpers + + +def assert_message_in_logs( + log: Any, + event_name: str, + expected_content: Dict[str, Any], + parent_span: Span, +) -> None: + """Verify a log message has the expected content and parent span. + + Args: + log: The log record to check + event_name: Expected event name + expected_content: Expected content in the log body + parent_span: Parent span for context verification + """ + assert log.log_record.attributes[EventAttributes.EVENT_NAME] == event_name + # assert ( + # TODO: use constant from GenAIAttributes.GenAiSystemValues after it is added there + # log.log_record.attributes[gen_ai_attributes.GEN_AI_SYSTEM] + # == "langchain" + # ) + + if not expected_content: + assert not log.log_record.body + else: + assert log.log_record.body + assert dict(log.log_record.body) == remove_none_values( + expected_content + ) + assert_log_parent(log, parent_span) + + +def assert_log_parent(log, span): + if span: + assert log.log_record.trace_id == span.get_span_context().trace_id + assert log.log_record.span_id == span.get_span_context().span_id + assert ( + log.log_record.trace_flags == span.get_span_context().trace_flags + ) + + +# Metric Assertion Helpers + + +def remove_none_values(body): + result = {} + for key, value in body.items(): + if value is None: + continue + if isinstance(value, dict): + result[key] = remove_none_values(value) + elif isinstance(value, list): + result[key] = [remove_none_values(i) for i in value] + else: + result[key] = value + return result + + +def assert_duration_metric(metric: Metric, parent_span: Span) -> None: + """Verify duration metric has expected structure and values. + + Args: + metric: The metric to verify + parent_span: Parent span for context verification + """ + assert metric is not None + assert len(metric.data.data_points) >= 1 + assert metric.data.data_points[0].sum > 0 + + assert_duration_metric_attributes( + metric.data.data_points[0].attributes, parent_span + ) + assert_exemplars( + metric.data.data_points[0].exemplars, + metric.data.data_points[0].sum, + parent_span, + ) + + +def assert_exemplars(exemplars, sum, parent_span): + assert len(exemplars) >= 1 + assert exemplars[0].value >= sum + assert exemplars[0].span_id == parent_span.get_span_context().span_id + assert exemplars[0].trace_id == parent_span.get_span_context().trace_id + + +def assert_token_usage_metric(metric: Metric, parent_span: Span) -> None: + """Verify token usage metric has expected structure and values. + + Args: + metric: The metric to verify + parent_span: Parent span for context verification + """ + assert metric is not None + assert len(metric.data.data_points) == 2 + + assert metric.data.data_points[0].sum > 0 + assert_token_usage_metric_attributes( + metric.data.data_points[0].attributes, parent_span + ) + assert_exemplars( + metric.data.data_points[0].exemplars, + metric.data.data_points[0].sum, + parent_span, + ) + + assert metric.data.data_points[1].sum > 0 + assert_token_usage_metric_attributes( + metric.data.data_points[1].attributes, parent_span + ) + assert_exemplars( + metric.data.data_points[1].exemplars, + metric.data.data_points[1].sum, + parent_span, + ) + + +def assert_duration_metric_attributes( + attributes: Dict[str, Any], parent_span: Span +) -> None: + """Verify duration metric attributes. + + Args: + attributes: Metric attributes to verify + parent_span: Parent span for context verification + """ + assert len(attributes) == 5 + # assert attributes.get(gen_ai_attributes.GEN_AI_SYSTEM) == "langchain" + assert ( + attributes.get(gen_ai_attributes.GEN_AI_OPERATION_NAME) + == gen_ai_attributes.GenAiOperationNameValues.CHAT.value + ) + assert ( + attributes.get(gen_ai_attributes.GEN_AI_REQUEST_MODEL) + == parent_span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MODEL] + ) + assert ( + attributes.get(gen_ai_attributes.GEN_AI_RESPONSE_MODEL) + == parent_span.attributes[gen_ai_attributes.GEN_AI_RESPONSE_MODEL] + ) + + +def assert_token_usage_metric_attributes( + attributes: Dict[str, Any], parent_span: Span +) -> None: + """Verify token usage metric attributes. + + Args: + attributes: Metric attributes to verify + parent_span: Parent span for context verification + """ + assert len(attributes) == 6 + # assert attributes.get(gen_ai_attributes.GEN_AI_SYSTEM) == "langchain" + assert ( + attributes.get(gen_ai_attributes.GEN_AI_OPERATION_NAME) + == gen_ai_attributes.GenAiOperationNameValues.CHAT.value + ) + assert ( + attributes.get(gen_ai_attributes.GEN_AI_REQUEST_MODEL) + == parent_span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MODEL] + ) + assert ( + attributes.get(gen_ai_attributes.GEN_AI_RESPONSE_MODEL) + == parent_span.attributes[gen_ai_attributes.GEN_AI_RESPONSE_MODEL] + ) + + +def assert_duration_metric_with_tool( + metric: Metric, spans: List[Span] +) -> None: + """Verify duration metric when tools are involved. + + Args: + metric: The metric to verify + spans: List of spans for context verification + """ + assert spans, "No LLM CHAT spans found" + llm_points = [ + dp + for dp in metric.data.data_points + if dp.attributes.get(gen_ai_attributes.GEN_AI_OPERATION_NAME) == CHAT + ] + assert len(llm_points) >= 1 + for dp in llm_points: + assert dp.sum > 0 + assert_duration_metric_attributes(dp.attributes, spans[0]) + + +def assert_token_usage_metric_with_tool( + metric: Metric, spans: List[Span] +) -> None: + """Verify token usage metric when tools are involved. + + Args: + metric: The metric to verify + spans: List of spans for context verification + """ + assert spans, "No LLM CHAT spans found" + llm_points = [ + dp + for dp in metric.data.data_points + if dp.attributes.get(gen_ai_attributes.GEN_AI_OPERATION_NAME) == CHAT + ] + assert ( + len(llm_points) >= 2 + ) # Should have both input and output token metrics + for dp in llm_points: + assert dp.sum > 0 + assert_token_usage_metric_attributes(dp.attributes, spans[0]) + + +########################################### +# Test Fixtures (from conftest.py) +# - span_exporter +# - log_exporter +# - metric_reader +# - chatOpenAI_client +# - instrument_with_content +########################################### + +########################################### +# Test Functions +########################################### + + +def _get_llm_spans(spans: List[Span]) -> List[Span]: + """Filter spans to get only LLM chat spans. + + Args: + spans: List of spans to filter + + Returns: + List of spans that are LLM chat operations + """ + return [ + s + for s in spans + if s.attributes.get(gen_ai_attributes.GEN_AI_OPERATION_NAME) == CHAT + ] + + +########################################### +# Test Functions +########################################### + +# Note: The following test functions use VCR to record and replay HTTP interactions +# for reliable and deterministic testing. Each test verifies both the functional +# behavior of the LLM calls and the associated OpenTelemetry instrumentation. + +# Basic LLM Call Tests + + +@pytest.mark.vcr() +def test_langchain_call( + span_exporter, + log_exporter, + metric_reader, + chatOpenAI_client, # noqa: N803 + instrument_with_content: None, + monkeypatch, +) -> None: + """Test basic LLM call with telemetry verification. + + This test verifies that: + 1. The LLM call completes successfully + 2. Spans are generated with correct attributes + 3. Logs contain expected messages + 4. Metrics are recorded for the operation + """ + # Setup test LLM with dummy values + monkeypatch.setenv("OPENAI_API_KEY", "test-api-key") + monkeypatch.setenv("APPKEY", "test-app-key") + llm_model_value = "gpt-4o-mini" + llm = ChatOpenAI( + temperature=0.1, + api_key=os.getenv("OPENAI_API_KEY"), + base_url="https://chat-ai.cisco.com/openai/deployments/gpt-4o-mini", + model=llm_model_value, + default_headers={"api-key": os.getenv("OPENAI_API_KEY")}, + model_kwargs={"user": json.dumps({"appkey": os.getenv("APPKEY")})}, + ) + + # Prepare test messages + system_message = SystemMessage(content="You are a helpful assistant!") + user_message = HumanMessage(content="What is the capital of France?") + messages = [system_message, user_message] + + # Execute LLM call + response = llm.invoke(messages) + assert response.content == "The capital of France is Paris." + + # --- Verify Telemetry --- + + # 1. Check spans + spans = span_exporter.get_finished_spans() + assert spans, "No spans were exported" + assert_openai_completion_attributes(spans[0], llm_model_value, response) + + # 2. Check logs + logs = log_exporter.get_finished_logs() + print(f"logs: {logs}") + for log in logs: + print(f"log: {log}") + print(f"log attributes: {log.log_record.attributes}") + print(f"log body: {log.log_record.body}") + system_message = {"content": messages[0].content} + human_message = {"content": messages[1].content} + # will add the logs back once the logs are fixed + # assert_message_in_logs( + # logs[0], "gen_ai.system.message", system_message, spans[0] + # ) + # assert_message_in_logs( + # logs[1], "gen_ai.human.message", human_message, spans[0] + # ) + + chat_generation_event = { + "index": 0, + "finish_reason": "stop", + "message": {"content": response.content, "type": "ChatGeneration"}, + } + # assert_message_in_logs(logs[2], "gen_ai.choice", chat_generation_event, spans[0]) + + # 3. Check metrics + metrics = metric_reader.get_metrics_data().resource_metrics + + print(f"metrics: {metrics}") + assert len(metrics) == 1 + + metric_data = metrics[0].scope_metrics[0].metrics + for m in metric_data: + if m.name == gen_ai_metrics.GEN_AI_CLIENT_OPERATION_DURATION: + assert_duration_metric(m, spans[0]) + if m.name == gen_ai_metrics.GEN_AI_CLIENT_TOKEN_USAGE: + assert_token_usage_metric(m, spans[0]) + + +@pytest.mark.vcr() +def test_langchain_call_with_tools( + span_exporter, + log_exporter, + metric_reader, + instrument_with_content: None, + monkeypatch, +) -> None: + """Test LLM call with tool usage and verify telemetry. + + This test verifies: + 1. Tool definitions and bindings work correctly + 2. Tool execution and response handling + 3. Telemetry includes tool-related spans and metrics + """ + + # Define test tools + @tool + def add(a: int, b: int) -> int: + """Add two integers together.""" + return a + b + + @tool + def multiply(a: int, b: int) -> int: + """Multiply two integers together.""" + return a * b + + monkeypatch.setenv("OPENAI_API_KEY", "test-api-key") + monkeypatch.setenv("APPKEY", "test-app-key") + # Setup LLM with tools + llm = ChatOpenAI( + temperature=0.1, + api_key=os.getenv("OPENAI_API_KEY"), + base_url="https://chat-ai.cisco.com/openai/deployments/gpt-4o-mini", + model="gpt-4o-mini", + default_headers={"api-key": os.getenv("OPENAI_API_KEY")}, + model_kwargs={"user": json.dumps({"appkey": os.getenv("APPKEY")})}, + ) + + tools = [add, multiply] + llm_with_tools = llm.bind_tools(tools) + + # Test conversation flow + messages = [HumanMessage("Please add 2 and 3, then multiply 2 and 3.")] + + # First LLM call - should return tool calls + ai_msg = llm_with_tools.invoke(messages) + messages.append(ai_msg) + + # Process tool calls + tool_calls = getattr( + ai_msg, "tool_calls", None + ) or ai_msg.additional_kwargs.get("tool_calls", []) + + # Execute tools and collect results + name_map = {"add": add, "multiply": multiply} + for tc in tool_calls: + fn = tc.get("function", {}) + tool_name = (fn.get("name") or tc.get("name") or "").lower() + arg_str = fn.get("arguments") + args = ( + json.loads(arg_str) + if isinstance(arg_str, str) + else (tc.get("args") or {}) + ) + + selected_tool = name_map[tool_name] + tool_output = selected_tool.invoke(args) + + messages.append( + ToolMessage( + content=str(tool_output), + name=tool_name, + tool_call_id=tc.get("id", ""), + ) + ) + + # Final LLM call with tool results + final = llm_with_tools.invoke(messages) + assert isinstance(final.content, str) and len(final.content) > 0 + assert "5" in final.content and "6" in final.content + + # --- Verify Telemetry --- + spans = span_exporter.get_finished_spans() + assert len(spans) >= 1 + _assert_tool_request_functions_on_span(spans[0], ["add", "multiply"]) + + # Verify logs + logs = log_exporter.get_finished_logs() + assert len(logs) >= 3 # system/user + gen_ai.choice + + choice_logs = [ + l + for l in logs + if l.log_record.attributes.get("event.name") == "gen_ai.choice" + ] + assert len(choice_logs) >= 1 + body = dict(choice_logs[0].log_record.body or {}) + assert "message" in body and isinstance(body["message"], dict) + assert body["message"].get("type") == "ChatGeneration" + assert isinstance(body["message"].get("content"), str) + + # Verify metrics with tool usage + llm_spans = _get_llm_spans(spans) + for rm in metric_reader.get_metrics_data().resource_metrics: + for scope in rm.scope_metrics: + for metric in scope.metrics: + if metric.name == "gen_ai.client.operation.duration": + assert_duration_metric_with_tool(metric, llm_spans) + elif metric.name == "gen_ai.client.token.usage": + assert_token_usage_metric_with_tool(metric, llm_spans) + + +# Tool-related Assertion Helpers +def assert_duration_metric_with_tool( + metric: Metric, spans: List[Span] +) -> None: + """Verify duration metric attributes when tools are involved. + + Args: + metric: The metric data points to verify + spans: List of spans for context verification + """ + llm_points = [ + dp + for dp in metric.data.data_points + if dp.attributes.get(gen_ai_attributes.GEN_AI_OPERATION_NAME) == CHAT + ] + assert len(llm_points) >= 1 + for dp in llm_points: + assert_duration_metric_attributes(dp.attributes, spans[0]) + if getattr(dp, "exemplars", None): + assert_exemplar_matches_any_llm_span(dp.exemplars, spans) + + +def assert_token_usage_metric_with_tool( + metric: Metric, spans: List[Span] +) -> None: + """Verify token usage metric when tools are involved. + + Args: + metric: The metric to verify + spans: List of spans for context verification + """ + assert spans, "No LLM CHAT spans found" + + # Only consider CHAT datapoints (ignore tool) + llm_points = [ + dp + for dp in metric.data.data_points + if dp.attributes.get(gen_ai_attributes.GEN_AI_OPERATION_NAME) == CHAT + ] + assert len(llm_points) >= 2 + + for dp in llm_points: + assert dp.sum > 0 + assert_token_usage_metric_attributes( + dp.attributes, spans[0] + ) # use attrs from any LLM span + if getattr(dp, "exemplars", None): + assert_exemplar_matches_any_llm_span(dp.exemplars, spans) + + +def assert_exemplar_matches_any_llm_span(exemplars, spans): + assert exemplars and len(exemplars) >= 1 + # Build a lookup of span_id -> (trace_id, span_obj) + by_id = {s.get_span_context().span_id: s for s in spans} + for ex in exemplars: + s = by_id.get(ex.span_id) + assert ( + s is not None + ), f"exemplar.span_id not found among LLM spans: {ex.span_id}" + # Optional: also ensure consistent trace + assert ex.trace_id == s.get_span_context().trace_id diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py index fae84a772a..600faa428c 100644 --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -214,6 +214,7 @@ }, ] default_instrumentations = [ + "opentelemetry-genai-sdk==0.0.1", "opentelemetry-instrumentation-asyncio==0.59b0.dev", "opentelemetry-instrumentation-dbapi==0.59b0.dev", "opentelemetry-instrumentation-logging==0.59b0.dev", diff --git a/util/opentelemetry-util-genai-dev/examples/traceloop_example.py b/util/opentelemetry-util-genai-dev/examples/traceloop_example.py new file mode 100644 index 0000000000..7944f244a7 --- /dev/null +++ b/util/opentelemetry-util-genai-dev/examples/traceloop_example.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 +""" +Traceloop Span Transformation Examples +""" + +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import ( + ConsoleSpanExporter, + SimpleSpanProcessor, +) +from opentelemetry.util.genai.processors import TraceloopSpanProcessor + + +def example_automatic_processing(): + """Example: Automatic span processing with TraceloopSpanProcessor.""" + + # Set up tracer provider + provider = TracerProvider() + + # Add TraceloopSpanProcessor - transforms ALL matching spans automatically + traceloop_processor = TraceloopSpanProcessor( + attribute_transformations={ + "remove": ["debug_info", "internal_id"], + "rename": { + "model_ver": "ai.model.version", + "llm.provider": "ai.system.vendor", + }, + "add": {"service.name": "my-llm-service"}, + } + ) + provider.add_span_processor(traceloop_processor) + + # Add console exporter to see results + provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) + trace.set_tracer_provider(provider) + + # Create spans - they get automatically transformed! + tracer = trace.get_tracer(__name__) + + with tracer.start_as_current_span("chat gpt-4") as span: + span.set_attribute( + "model_ver", "1.0" + ) # Will be renamed to ai.model.version + span.set_attribute( + "llm.provider", "openai" + ) # Will be renamed to ai.system.vendor + span.set_attribute("debug_info", "remove_me") # Will be removed + print("Span automatically transformed when it ends!") + + print("Automatic processing complete\n") + + +def example_simple_setup(): + """Example: Minimal setup for common use case.""" + print("=== Simple Setup ===") + + # Minimal setup - just add the processor with basic rules + provider = TracerProvider() + + processor = TraceloopSpanProcessor( + attribute_transformations={"add": {"service.name": "my-ai-service"}}, + traceloop_attributes={"traceloop.entity.name": "AI-Service"}, + ) + provider.add_span_processor(processor) + trace.set_tracer_provider(provider) + + print("TraceloopSpanProcessor added - all AI spans will be transformed!") + print("Simple setup complete\n") + + +if __name__ == "__main__": + print("Traceloop Span Transformation Examples\n") + + # Show automatic processing (recommended approach) + example_automatic_processing() + + # Show minimal setup + example_simple_setup() + + print("All examples complete!") diff --git a/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/api.py b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/api.py new file mode 100644 index 0000000000..52b0710e26 --- /dev/null +++ b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/api.py @@ -0,0 +1,240 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +from threading import Lock +from typing import Any, List, Optional, Union +from uuid import UUID, uuid4 + +from generators import SpanMetricEventGenerator, SpanMetricGenerator + +from opentelemetry._events import get_event_logger +from opentelemetry._logs import get_logger +from opentelemetry.metrics import get_meter +from opentelemetry.semconv.schemas import Schemas +from opentelemetry.trace import get_tracer + +from .data import ChatGeneration, Error, Message, ToolFunction, ToolOutput +from .types import LLMInvocation, ToolInvocation, TraceloopInvocation +from .version import __version__ + + +class TelemetryClient: + """ + High-level client managing GenAI invocation lifecycles and exporting + them as spans, metrics, and events. + """ + + def __init__(self, exporter_type_full: bool = True, **kwargs): + tracer_provider = kwargs.get("tracer_provider") + self._tracer = get_tracer( + __name__, + __version__, + tracer_provider, + schema_url=Schemas.V1_28_0.value, + ) + + meter_provider = kwargs.get("meter_provider") + self._meter = get_meter( + __name__, + __version__, + meter_provider, + schema_url=Schemas.V1_28_0.value, + ) + + event_logger_provider = kwargs.get("event_logger_provider") + self._event_logger = get_event_logger( + __name__, + __version__, + event_logger_provider=event_logger_provider, + schema_url=Schemas.V1_28_0.value, + ) + + logger_provider = kwargs.get("logger_provider") + self._logger = get_logger( + __name__, + __version__, + logger_provider=logger_provider, + schema_url=Schemas.V1_28_0.value, + ) + + self._generator = ( + SpanMetricEventGenerator( + tracer=self._tracer, + meter=self._meter, + logger=self._logger, + ) + if exporter_type_full + else SpanMetricGenerator(tracer=self._tracer, meter=self._meter) + ) + + self._llm_registry: dict[ + UUID, Union[LLMInvocation, TraceloopInvocation] + ] = {} + self._tool_registry: dict[UUID, ToolInvocation] = {} + self._lock = Lock() + + def start_llm( + self, + prompts: Optional[List[Message]] = None, + tool_functions: Optional[List[ToolFunction]] = None, + run_id: Optional[UUID] = None, + parent_run_id: Optional[UUID] = None, + invocation: Optional[Union[LLMInvocation, TraceloopInvocation]] = None, + **attributes: Any, + ): + """ + Start an LLM invocation. + + Can accept either: + 1. Traditional parameters (prompts, tool_functions, etc.) to create LLMInvocation + 2. Pre-built invocation object (LLMInvocation or TraceloopInvocation) + """ + if invocation is not None: + # Use the provided invocation (could be TraceloopInvocation) + actual_invocation = invocation + # Set run_id if not already set + if run_id is not None: + actual_invocation.run_id = run_id + if parent_run_id is not None: + actual_invocation.parent_run_id = parent_run_id + # Merge any additional attributes + actual_invocation.attributes.update(attributes) + else: + # Create traditional LLMInvocation + actual_run_id = run_id or uuid4() + actual_invocation = LLMInvocation( + request_model=attributes.get("request_model", "unknown"), + messages=prompts or [], + run_id=actual_run_id, + parent_run_id=parent_run_id, + attributes=attributes, + ) + # Handle tool_functions if provided + if tool_functions: + # Store tool functions in attributes for now + actual_invocation.attributes["tool_functions"] = tool_functions + + with self._lock: + self._llm_registry[actual_invocation.run_id] = actual_invocation + self._generator.start(actual_invocation) + return actual_invocation + + def stop_llm( + self, + run_id: UUID, + chat_generations: Optional[List[ChatGeneration]] = None, + **attributes: Any, + ) -> Union[LLMInvocation, TraceloopInvocation]: + with self._lock: + invocation = self._llm_registry.pop(run_id) + invocation.end_time = time.time() + # Convert ChatGeneration to OutputMessage if needed (for now, store as-is) + if chat_generations: + # Store in attributes for compatibility + invocation.attributes["chat_generations"] = chat_generations + invocation.attributes.update(attributes) + self._generator.finish(invocation) + return invocation + + def fail_llm( + self, run_id: UUID, error: Error, **attributes: Any + ) -> Union[LLMInvocation, TraceloopInvocation]: + with self._lock: + invocation = self._llm_registry.pop(run_id) + invocation.end_time = time.time() + invocation.attributes.update(**attributes) + self._generator.error(error, invocation) + return invocation + + def start_tool( + self, + input_str: str, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **attributes, + ): + invocation = ToolInvocation( + input_str=input_str, + run_id=run_id, + parent_run_id=parent_run_id, + attributes=attributes, + ) + with self._lock: + self._tool_registry[invocation.run_id] = invocation + self._generator.init_tool(invocation) + + def stop_tool( + self, run_id: UUID, output: ToolOutput, **attributes + ) -> ToolInvocation: + with self._lock: + invocation = self._tool_registry.pop(run_id) + invocation.end_time = time.time() + invocation.output = output + self._generator.export_tool(invocation) + return invocation + + def fail_tool( + self, run_id: UUID, error: Error, **attributes + ) -> ToolInvocation: + with self._lock: + invocation = self._tool_registry.pop(run_id) + invocation.end_time = time.time() + invocation.attributes.update(**attributes) + self._generator.error_tool(error, invocation) + return invocation + + +# Singleton accessor +_default_client: TelemetryClient | None = None + + +def get_telemetry_client( + exporter_type_full: bool = True, **kwargs +) -> TelemetryClient: + global _default_client + if _default_client is None: + _default_client = TelemetryClient( + exporter_type_full=exporter_type_full, **kwargs + ) + return _default_client + + +# Module‐level convenience functions +def llm_start( + prompts: List[Message], + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **attributes, +): + return get_telemetry_client().start_llm( + prompts=prompts, + run_id=run_id, + parent_run_id=parent_run_id, + **attributes, + ) + + +def llm_stop( + run_id: UUID, chat_generations: List[ChatGeneration], **attributes +) -> LLMInvocation: + return get_telemetry_client().stop_llm( + run_id=run_id, chat_generations=chat_generations, **attributes + ) + + +def llm_fail(run_id: UUID, error: Error, **attributes) -> LLMInvocation: + return get_telemetry_client().fail_llm( + run_id=run_id, error=error, **attributes + ) diff --git a/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/data.py b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/data.py new file mode 100644 index 0000000000..2aec3394e4 --- /dev/null +++ b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/data.py @@ -0,0 +1,82 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import List, Literal, Optional, Type, TypedDict + + +class TextPart(TypedDict): + type: Literal["text"] + content: str + + +# Keep room for future parts without changing the return type +# addition of tools can use Part = Union[TextPart, ToolPart] +Part = TextPart + + +class OtelMessage(TypedDict): + role: str + # role: Literal["user", "assistant", "system", "tool", "tool_message"] # TODO: check semconvs for allowed roles + parts: List[Part] + + +@dataclass +class Message: + content: str + type: str + name: str + + def _to_semconv_dict(self) -> OtelMessage: + """Convert the message to a dictionary suitable for OpenTelemetry semconvs. + + Ref: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/gen-ai.md#gen-ai-input-messages + """ + + # TODO: Support tool_call and tool_call response + return { + "role": self.type, + "parts": [ + { + "content": self.content, + "type": "text", + } + ], + } + + +@dataclass +class ChatGeneration: + content: str + type: str + finish_reason: Optional[str] = None + + +@dataclass +class Error: + message: str + type: Type[BaseException] + + +@dataclass +class ToolOutput: + tool_call_id: str + content: str + + +@dataclass +class ToolFunction: + name: str + description: str + parameters: str diff --git a/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/generators/__init__.py b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/generators/__init__.py index bc6f1cf319..6d7ea8da0e 100644 --- a/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/generators/__init__.py +++ b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/generators/__init__.py @@ -2,10 +2,18 @@ from .span_generator import SpanGenerator from .span_metric_event_generator import SpanMetricEventGenerator from .span_metric_generator import SpanMetricGenerator +from .span_transformer import ( + create_traceloop_invocation_from_span, + transform_existing_span_to_telemetry, +) +from .traceloop_span_generator import TraceloopSpanGenerator __all__ = [ "BaseTelemetryGenerator", "SpanGenerator", "SpanMetricEventGenerator", "SpanMetricGenerator", + "TraceloopSpanGenerator", + "transform_existing_span_to_telemetry", + "create_traceloop_invocation_from_span", ] diff --git a/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/generators/span_transformer.py b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/generators/span_transformer.py new file mode 100644 index 0000000000..59a4d5eedd --- /dev/null +++ b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/generators/span_transformer.py @@ -0,0 +1,123 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Utilities for transforming existing spans into TraceloopInvocations +based on transformation rules. +""" + +from typing import Any, Dict, Optional + +from opentelemetry.sdk.trace import ReadableSpan + +from ..types import TraceloopInvocation +from .traceloop_span_generator import TraceloopSpanGenerator + + +def create_traceloop_invocation_from_span( + existing_span: ReadableSpan, + attribute_transformations: Optional[Dict[str, Any]] = None, + name_transformations: Optional[Dict[str, str]] = None, + traceloop_attributes: Optional[Dict[str, Any]] = None, + request_model: Optional[str] = None, +) -> TraceloopInvocation: + """ + Create a TraceloopInvocation from an existing span, applying transformation rules. + + Args: + existing_span: The original span to extract data from + attribute_transformations: Rules for transforming attributes + name_transformations: Rules for transforming span name + traceloop_attributes: Additional Traceloop-specific attributes + request_model: Override model name (extracted from span if not provided) + + Returns: + TraceloopInvocation with transformed data + """ + + # Extract data from existing span + span_attributes = ( + dict(existing_span.attributes) if existing_span.attributes else {} + ) + span_name = existing_span.name + + # Determine request_model + if request_model is None: + # Try to extract from span attributes + request_model = ( + span_attributes.get("gen_ai.request.model") + or span_attributes.get("llm.request.model") + or span_attributes.get("ai.model.name") + or "unknown" + ) + + # Create TraceloopInvocation with extracted data + invocation = TraceloopInvocation( + request_model=request_model, + attribute_transformations=attribute_transformations or {}, + name_transformations=name_transformations or {}, + traceloop_attributes=traceloop_attributes or {}, + attributes=span_attributes.copy(), # Start with original attributes + # Copy timing information if available + start_time=existing_span.start_time / 1_000_000_000 + if existing_span.start_time + else 0, # Convert from nanoseconds + end_time=existing_span.end_time / 1_000_000_000 + if existing_span.end_time + else None, + ) + + return invocation + + +def transform_existing_span_to_telemetry( + existing_span: ReadableSpan, + attribute_transformations: Optional[Dict[str, Any]] = None, + name_transformations: Optional[Dict[str, str]] = None, + traceloop_attributes: Optional[Dict[str, Any]] = None, + generator: Optional[TraceloopSpanGenerator] = None, +) -> TraceloopInvocation: + """ + Transform an existing span into new telemetry using Traceloop transformation rules. + + Args: + existing_span: The span to transform + attribute_transformations: Transformation rules for attributes + name_transformations: Transformation rules for span names + traceloop_attributes: Additional Traceloop-specific attributes + generator: Optional custom generator (creates default if not provided) + + Returns: + TraceloopInvocation with new span created based on transformation rules + """ + + # Create TraceloopInvocation from existing span data + invocation = create_traceloop_invocation_from_span( + existing_span=existing_span, + attribute_transformations=attribute_transformations, + name_transformations=name_transformations, + traceloop_attributes=traceloop_attributes, + ) + + # Create generator if not provided + if generator is None: + generator = TraceloopSpanGenerator(capture_content=True) + + # Generate new telemetry with transformations applied + generator.start(invocation) + + if existing_span.end_time is not None: + generator.finish(invocation) + + return invocation diff --git a/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/generators/traceloop_span_generator.py b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/generators/traceloop_span_generator.py new file mode 100644 index 0000000000..53a4053d15 --- /dev/null +++ b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/generators/traceloop_span_generator.py @@ -0,0 +1,224 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import time +from typing import Any, Dict, Optional, Union + +from opentelemetry import trace +from opentelemetry.trace import Tracer +from opentelemetry.trace.status import Status, StatusCode + +from ..types import Error, LLMInvocation, TraceloopInvocation +from .base_span_generator import BaseSpanGenerator + + +class TraceloopSpanGenerator(BaseSpanGenerator): + """ + Generator for Traceloop-compatible spans using util-genai infrastructure. + + Instead of modifying existing spans, this creates new telemetry from + TraceloopInvocation data types that contain all proprietary attributes + and transformation rules. + """ + + def __init__( + self, + tracer: Optional[Tracer] = None, + capture_content: bool = False, + default_attribute_rules: Optional[Dict[str, Any]] = None, + default_name_mappings: Optional[Dict[str, str]] = None, + ): + super().__init__(tracer, capture_content) + self.default_attribute_rules = default_attribute_rules or {} + self.default_name_mappings = default_name_mappings or {} + + def _apply_name_transformations( + self, invocation: TraceloopInvocation, span_name: str + ) -> str: + """Apply name transformations based on the invocation's rules.""" + # Use invocation-specific rules, fall back to defaults + name_mappings = { + **self.default_name_mappings, + **invocation.name_transformations, + } + + # Apply direct mappings first + if span_name in name_mappings: + return name_mappings[span_name] + + # Apply pattern-based transformations + for pattern, replacement in name_mappings.items(): + if "*" in pattern: + regex_pattern = pattern.replace("*", ".*") + if re.match(regex_pattern, span_name): + return replacement + + return span_name + + def _apply_attribute_transformations( + self, invocation: TraceloopInvocation + ): + """Apply attribute transformations to the invocation's attributes.""" + if invocation.span is None: + return + + # Use invocation-specific rules, merged with defaults + attribute_rules = { + **self.default_attribute_rules, + **invocation.attribute_transformations, + } + + # Start with the base attributes from invocation + attributes = dict(invocation.attributes) + + # Apply transformation rules + for rule_key, rule_value in attribute_rules.items(): + if rule_key == "remove": + # Remove specified attributes + for attr_to_remove in rule_value: + attributes.pop(attr_to_remove, None) + elif rule_key == "rename": + # Rename attributes + for old_name, new_name in rule_value.items(): + if old_name in attributes: + attributes[new_name] = attributes.pop(old_name) + elif rule_key == "add": + # Add new attributes (traceloop-specific ones) + attributes.update(rule_value) + + # Add traceloop-specific attributes + attributes.update(invocation.traceloop_attributes) + + # Update the invocation's attributes + invocation.attributes = attributes + + def _on_after_start(self, invocation: LLMInvocation): + """Hook called after span start - apply traceloop transformations.""" + if not isinstance(invocation, TraceloopInvocation): + # If not a TraceloopInvocation, just call the parent implementation + super()._on_after_start(invocation) + return + + if invocation.span is None: + return + + # Apply attribute transformations + self._apply_attribute_transformations(invocation) + + # Re-apply attributes after transformation + for k, v in invocation.attributes.items(): + invocation.span.set_attribute(k, v) + + def start( + self, invocation: Union[LLMInvocation, TraceloopInvocation] + ) -> None: + """Start a new span with Traceloop-specific handling.""" + if isinstance(invocation, TraceloopInvocation): + # Generate the base span name + base_span_name = f"chat {invocation.request_model}" + + # Apply name transformations + span_name = self._apply_name_transformations( + invocation, base_span_name + ) + + # Create span with transformed name + span = self._tracer.start_span( + name=span_name, kind=trace.SpanKind.CLIENT + ) + invocation.span = span + + # Set up context management + from opentelemetry.trace import use_span + + cm = use_span(span, end_on_exit=False) + cm.__enter__() + invocation.context_token = cm + + # Apply base attributes first + self._apply_start_attrs(invocation) + + # Apply traceloop-specific transformations + self._on_after_start(invocation) + else: + # Handle regular LLMInvocation + super().start(invocation) + + def finish( + self, invocation: Union[LLMInvocation, TraceloopInvocation] + ) -> None: + """Finish the span with any final transformations.""" + if isinstance(invocation, TraceloopInvocation): + if invocation.span is None: + return + + invocation.end_time = time.time() + + # Apply any final attribute transformations + self._apply_attribute_transformations(invocation) + + # Apply finish attributes + self._apply_finish_attrs(invocation) + + # End the span + token = invocation.context_token + if token is not None and hasattr(token, "__exit__"): + try: + token.__exit__(None, None, None) + except Exception: + pass + invocation.span.end() + else: + # Handle regular LLMInvocation + super().finish(invocation) + + def error( + self, + error: Error, + invocation: Union[LLMInvocation, TraceloopInvocation], + ) -> None: + """Handle error cases with Traceloop-specific handling.""" + if isinstance(invocation, TraceloopInvocation): + if invocation.span is None: + return + + invocation.end_time = time.time() + + # Set error status + invocation.span.set_status(Status(StatusCode.ERROR, error.message)) + if invocation.span.is_recording(): + from opentelemetry.semconv.attributes import ( + error_attributes as ErrorAttributes, + ) + + invocation.span.set_attribute( + ErrorAttributes.ERROR_TYPE, error.type.__qualname__ + ) + + # Apply transformations even on error + self._apply_attribute_transformations(invocation) + self._apply_finish_attrs(invocation) + + # End the span + token = invocation.context_token + if token is not None and hasattr(token, "__exit__"): + try: + token.__exit__(None, None, None) + except Exception: + pass + invocation.span.end() + else: + # Handle regular LLMInvocation + super().error(error, invocation) diff --git a/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/processors/__init__.py b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/processors/__init__.py new file mode 100644 index 0000000000..e26289726a --- /dev/null +++ b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/processors/__init__.py @@ -0,0 +1,5 @@ +from .traceloop_span_processor import TraceloopSpanProcessor + +__all__ = [ + "TraceloopSpanProcessor", +] diff --git a/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/processors/traceloop_span_processor.py b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/processors/traceloop_span_processor.py new file mode 100644 index 0000000000..258c6a9d86 --- /dev/null +++ b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/processors/traceloop_span_processor.py @@ -0,0 +1,138 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +TraceloopSpanProcessor - A span processor that automatically transforms spans +using Traceloop transformation rules. +""" + +from typing import Any, Callable, Dict, Optional + +from opentelemetry.context import Context +from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor +from opentelemetry.trace import Span + +from .span_transformer import transform_existing_span_to_telemetry +from .traceloop_span_generator import TraceloopSpanGenerator + + +class TraceloopSpanProcessor(SpanProcessor): + """ + A span processor that automatically applies Traceloop transformations to spans. + + This processor can be added to your TracerProvider to automatically transform + all spans according to your transformation rules. + """ + + def __init__( + self, + attribute_transformations: Optional[Dict[str, Any]] = None, + name_transformations: Optional[Dict[str, str]] = None, + traceloop_attributes: Optional[Dict[str, Any]] = None, + span_filter: Optional[Callable[[ReadableSpan], bool]] = None, + generator: Optional[TraceloopSpanGenerator] = None, + ): + """ + Initialize the Traceloop span processor. + + Args: + attribute_transformations: Rules for transforming span attributes + name_transformations: Rules for transforming span names + traceloop_attributes: Additional Traceloop-specific attributes to add + span_filter: Optional filter function to determine which spans to transform + generator: Optional custom TraceloopSpanGenerator + """ + self.attribute_transformations = attribute_transformations or {} + self.name_transformations = name_transformations or {} + self.traceloop_attributes = traceloop_attributes or {} + self.span_filter = span_filter or self._default_span_filter + self.generator = generator or TraceloopSpanGenerator( + capture_content=True + ) + + def _default_span_filter(self, span: ReadableSpan) -> bool: + """Default filter: Transform spans that look like LLM/AI calls.""" + if not span.name or not span.attributes: + return False + + # Check for common LLM/AI span indicators + llm_indicators = [ + "chat", + "completion", + "llm", + "ai", + "gpt", + "claude", + "gemini", + "openai", + "anthropic", + "cohere", + "huggingface", + ] + + span_name_lower = span.name.lower() + for indicator in llm_indicators: + if indicator in span_name_lower: + return True + + # Check attributes for AI/LLM markers + for attr_key in span.attributes.keys(): + attr_key_lower = str(attr_key).lower() + if any( + marker in attr_key_lower + for marker in ["llm", "ai", "gen_ai", "model"] + ): + return True + + return False + + def on_start( + self, span: Span, parent_context: Optional[Context] = None + ) -> None: + """Called when a span is started.""" + pass + + def on_end(self, span: ReadableSpan) -> None: + """ + Called when a span is ended. + """ + try: + # Check if this span should be transformed + if not self.span_filter(span): + return + + # Apply transformations and generate new telemetry + transform_existing_span_to_telemetry( + existing_span=span, + attribute_transformations=self.attribute_transformations, + name_transformations=self.name_transformations, + traceloop_attributes=self.traceloop_attributes, + generator=self.generator, + ) + + except Exception as e: + # Don't let transformation errors break the original span processing + import logging + + logging.warning( + f"TraceloopSpanProcessor failed to transform span: {e}" + ) + + def shutdown(self) -> None: + """Called when the tracer provider is shutdown.""" + pass + + def force_flush(self, timeout_millis: int = 30000) -> bool: + """Force flush any buffered spans.""" + return True diff --git a/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/types.py b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/types.py index 6ce2beb3b5..34e149b682 100644 --- a/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/types.py +++ b/util/opentelemetry-util-genai-dev/src/opentelemetry/util/genai/types.py @@ -128,6 +128,23 @@ class EvaluationResult: attributes: Dict[str, Any] = field(default_factory=dict) +@dataclass +class TraceloopInvocation(LLMInvocation): + """ + Represents a Traceloop-compatible LLM invocation. + This data type extends LLMInvocation with additional fields for Traceloop-specific + transformations and proprietary attributes, while maintaining compatibility + with the existing generator infrastructure. + """ + + # Transformation rules for attributes + attribute_transformations: Dict[str, Any] = field(default_factory=dict) + # Name transformation rules + name_transformations: Dict[str, str] = field(default_factory=dict) + # Custom/proprietary attributes specific to Traceloop + traceloop_attributes: Dict[str, Any] = field(default_factory=dict) + + __all__ = [ # existing exports intentionally implicit before; making explicit for new additions "ContentCapturingMode", @@ -139,4 +156,5 @@ class EvaluationResult: "LLMInvocation", "Error", "EvaluationResult", + "TraceloopInvocation", ] diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/api.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/api.py new file mode 100644 index 0000000000..1751894a98 --- /dev/null +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/api.py @@ -0,0 +1,214 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +from threading import Lock +from typing import List, Optional +from uuid import UUID + +from opentelemetry._events import get_event_logger +from opentelemetry._logs import get_logger + +# from opentelemetry.instrumentation.langchain.version import __version__ +try: + from importlib.metadata import version as _pkg_version + + __version__ = _pkg_version("opentelemetry-instrumentation-langchain") +except Exception: # Fallback to a default if package metadata not present (editable dev mode etc.) + __version__ = "0.0.1" +from opentelemetry.metrics import get_meter +from opentelemetry.semconv.schemas import Schemas +from opentelemetry.trace import get_tracer + +from .data import ChatGeneration, Error, Message, ToolFunction, ToolOutput +from .generators import SpanMetricEventGenerator, SpanMetricGenerator +from .types import LLMInvocation, ToolInvocation + + +class TelemetryClient: + """ + High-level client managing GenAI invocation lifecycles and exporting + them as spans, metrics, and events. + """ + + def __init__(self, exporter_type_full: bool = True, **kwargs): + tracer_provider = kwargs.get("tracer_provider") + self._tracer = get_tracer( + __name__, + __version__, + tracer_provider, + schema_url=Schemas.V1_28_0.value, + ) + + meter_provider = kwargs.get("meter_provider") + self._meter = get_meter( + __name__, + __version__, + meter_provider, + schema_url=Schemas.V1_28_0.value, + ) + + event_logger_provider = kwargs.get("event_logger_provider") + self._event_logger = get_event_logger( + __name__, + __version__, + event_logger_provider=event_logger_provider, + schema_url=Schemas.V1_28_0.value, + ) + + logger_provider = kwargs.get("logger_provider") + self._logger = get_logger( + __name__, + __version__, + logger_provider=logger_provider, + schema_url=Schemas.V1_28_0.value, + ) + + self._generator = ( + SpanMetricEventGenerator( + tracer=self._tracer, + meter=self._meter, + logger=self._logger, + ) + if exporter_type_full + else SpanMetricGenerator(tracer=self._tracer, meter=self._meter) + ) + + self._llm_registry: dict[UUID, LLMInvocation] = {} + self._tool_registry: dict[UUID, ToolInvocation] = {} + self._lock = Lock() + + def start_llm( + self, + prompts: List[Message], + tool_functions: List[ToolFunction], + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **attributes, + ): + invocation = LLMInvocation( + messages=prompts, + tool_functions=tool_functions, + run_id=run_id, + parent_run_id=parent_run_id, + attributes=attributes, + ) + with self._lock: + self._llm_registry[invocation.run_id] = invocation + self._generator.start(invocation) + + def stop_llm( + self, + run_id: UUID, + chat_generations: List[ChatGeneration], + **attributes, + ) -> LLMInvocation: + with self._lock: + invocation = self._llm_registry.pop(run_id) + invocation.end_time = time.time() + invocation.chat_generations = chat_generations + invocation.attributes.update(attributes) + self._generator.finish(invocation) + return invocation + + def fail_llm( + self, run_id: UUID, error: Error, **attributes + ) -> LLMInvocation: + with self._lock: + invocation = self._llm_registry.pop(run_id) + invocation.end_time = time.time() + invocation.attributes.update(**attributes) + self._generator.error(error, invocation) + return invocation + + def start_tool( + self, + input_str: str, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **attributes, + ): + invocation = ToolInvocation( + input_str=input_str, + run_id=run_id, + parent_run_id=parent_run_id, + attributes=attributes, + ) + with self._lock: + self._tool_registry[invocation.run_id] = invocation + self._generator.init_tool(invocation) + + def stop_tool( + self, run_id: UUID, output: ToolOutput, **attributes + ) -> ToolInvocation: + with self._lock: + invocation = self._tool_registry.pop(run_id) + invocation.end_time = time.time() + invocation.output = output + self._generator.export_tool(invocation) + return invocation + + def fail_tool( + self, run_id: UUID, error: Error, **attributes + ) -> ToolInvocation: + with self._lock: + invocation = self._tool_registry.pop(run_id) + invocation.end_time = time.time() + invocation.attributes.update(**attributes) + self._generator.error_tool(error, invocation) + return invocation + + +# Singleton accessor +_default_client: TelemetryClient | None = None + + +def get_telemetry_client( + exporter_type_full: bool = True, **kwargs +) -> TelemetryClient: + global _default_client + if _default_client is None: + _default_client = TelemetryClient( + exporter_type_full=exporter_type_full, **kwargs + ) + return _default_client + + +# Module‐level convenience functions +def llm_start( + prompts: List[Message], + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **attributes, +): + return get_telemetry_client().start_llm( + prompts=prompts, + run_id=run_id, + parent_run_id=parent_run_id, + **attributes, + ) + + +def llm_stop( + run_id: UUID, chat_generations: List[ChatGeneration], **attributes +) -> LLMInvocation: + return get_telemetry_client().stop_llm( + run_id=run_id, chat_generations=chat_generations, **attributes + ) + + +def llm_fail(run_id: UUID, error: Error, **attributes) -> LLMInvocation: + return get_telemetry_client().fail_llm( + run_id=run_id, error=error, **attributes + ) diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/data.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/data.py new file mode 100644 index 0000000000..2aec3394e4 --- /dev/null +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/data.py @@ -0,0 +1,82 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import List, Literal, Optional, Type, TypedDict + + +class TextPart(TypedDict): + type: Literal["text"] + content: str + + +# Keep room for future parts without changing the return type +# addition of tools can use Part = Union[TextPart, ToolPart] +Part = TextPart + + +class OtelMessage(TypedDict): + role: str + # role: Literal["user", "assistant", "system", "tool", "tool_message"] # TODO: check semconvs for allowed roles + parts: List[Part] + + +@dataclass +class Message: + content: str + type: str + name: str + + def _to_semconv_dict(self) -> OtelMessage: + """Convert the message to a dictionary suitable for OpenTelemetry semconvs. + + Ref: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/registry/attributes/gen-ai.md#gen-ai-input-messages + """ + + # TODO: Support tool_call and tool_call response + return { + "role": self.type, + "parts": [ + { + "content": self.content, + "type": "text", + } + ], + } + + +@dataclass +class ChatGeneration: + content: str + type: str + finish_reason: Optional[str] = None + + +@dataclass +class Error: + message: str + type: Type[BaseException] + + +@dataclass +class ToolOutput: + tool_call_id: str + content: str + + +@dataclass +class ToolFunction: + name: str + description: str + parameters: str diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/decorators/__init__.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/decorators/__init__.py new file mode 100644 index 0000000000..e7169e2a06 --- /dev/null +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/decorators/__init__.py @@ -0,0 +1,78 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Awaitable, Callable, Optional, TypeVar, Union + +from typing_extensions import ParamSpec + +from opentelemetry.util.genai.decorators.base import ( + entity_class, + entity_method, +) +from opentelemetry.util.genai.types import ( + ObserveSpanKindValues, +) + +P = ParamSpec("P") +R = TypeVar("R") +F = TypeVar("F", bound=Callable[P, Union[R, Awaitable[R]]]) + + +def tool( + name: Optional[str] = None, + method_name: Optional[str] = None, + tlp_span_kind: Optional[ + ObserveSpanKindValues + ] = ObserveSpanKindValues.TOOL, +) -> Callable[[F], F]: + def decorator(target): + # Check if target is a class + if inspect.isclass(target): + return entity_class( + name=name, + method_name=method_name, + tlp_span_kind=tlp_span_kind, + )(target) + # Target is a function/method + return entity_method( + name=name, + tlp_span_kind=tlp_span_kind, + )(target) + + return decorator + + +def llm( + name: Optional[str] = None, + model_name: Optional[str] = None, + method_name: Optional[str] = None, +) -> Callable[[F], F]: + def decorator(target): + # Check if target is a class + if inspect.isclass(target): + return entity_class( + name=name, + model_name=model_name, + method_name=method_name, + tlp_span_kind=ObserveSpanKindValues.LLM, + )(target) + # Target is a function/method + return entity_method( + name=name, + model_name=model_name, + tlp_span_kind=ObserveSpanKindValues.LLM, + )(target) + + return decorator diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/decorators/base.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/decorators/base.py new file mode 100644 index 0000000000..3fae61d844 --- /dev/null +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/decorators/base.py @@ -0,0 +1,565 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import json +import logging +import os +import traceback +from functools import wraps +from typing import ( + Any, + Awaitable, + Callable, + Dict, + List, + Optional, + TypeVar, + Union, +) + +from typing_extensions import ParamSpec + +from opentelemetry import context as context_api +from opentelemetry.util.genai.api import get_telemetry_client +from opentelemetry.util.genai.data import ChatGeneration, Message, ToolFunction +from opentelemetry.util.genai.decorators.helpers import ( + _get_original_function_name, + _is_async_generator, + _is_async_method, +) +from opentelemetry.util.genai.decorators.util import camel_to_snake +from opentelemetry.util.genai.generators import _get_property_value +from opentelemetry.util.genai.types import ( + ObserveSpanKindValues, +) + +P = ParamSpec("P") + +R = TypeVar("R") +F = TypeVar("F", bound=Callable[P, Union[R, Awaitable[R]]]) + +OTEL_INSTRUMENTATION_GENAI_EXPORTER = "OTEL_INSTRUMENTATION_GENAI_EXPORTER" + + +def should_emit_events() -> bool: + val = os.getenv( + OTEL_INSTRUMENTATION_GENAI_EXPORTER, "SpanMetricEventExporter" + ) + if val.strip().lower() == "spanmetriceventexporter": + return True + elif val.strip().lower() == "spanmetricexporter": + return False + else: + raise ValueError(f"Unknown exporter_type: {val}") + + +exporter_type_full = should_emit_events() + +# Instantiate a singleton TelemetryClient bound to our tracer & meter +telemetry = get_telemetry_client(exporter_type_full) + + +def _should_send_prompts(): + return ( + os.getenv("OBSERVE_TRACE_CONTENT") or "true" + ).lower() == "true" or context_api.get_value( + "override_enable_content_tracing" + ) + + +def _handle_llm_span_attributes(tlp_span_kind, args, kwargs, res=None): + """ + Add GenAI-specific attributes to span for LLM operations by delegating to TelemetryClient logic. + + Returns: + run_id (UUID): The run_id if tlp_span_kind is ObserveSpanKindValues.LLM, otherwise None. + + Note: + If tlp_span_kind is not ObserveSpanKindValues.LLM, this function returns None. + Downstream code should check for None before using run_id. + """ + # Import here to avoid circular import issues + from uuid import uuid4 + + # Extract messages and attributes as before + messages = _extract_messages_from_args_kwargs(args, kwargs) + tool_functions = _extract_tool_functions_from_args_kwargs(args, kwargs) + try: + run_id = uuid4() + telemetry.start_llm( + prompts=messages, + tool_functions=tool_functions, + run_id=run_id, + **_extract_llm_attributes_from_args_kwargs(args, kwargs, res), + ) + return run_id # Return run_id so it can be used later + except Exception as e: + logging.error(f"TelemetryClient.start_llm failed: {e}") + raise + return None + + +def _finish_llm_span(run_id, res, **attributes): + """Finish the LLM span with response data""" + if not run_id: + return + if res: + _extract_response_attributes(res, attributes) + chat_generations = _extract_chat_generations_from_response(res) + try: + import contextlib + + with contextlib.suppress(Exception): + telemetry.stop_llm(run_id, chat_generations, **attributes) + except Exception as e: + logging.warning(f"TelemetryClient.stop_llm failed: {e}") + + +def _extract_messages_from_args_kwargs(args, kwargs): + """Extract messages from function arguments using patterns similar to exporters""" + messages = [] + + # Try different patterns to find messages + raw_messages = None + if kwargs.get("messages"): + raw_messages = kwargs["messages"] + elif kwargs.get("inputs"): # Sometimes messages are in inputs + inputs = kwargs["inputs"] + if isinstance(inputs, dict) and "messages" in inputs: + raw_messages = inputs["messages"] + elif len(args) > 0: + # Try to find messages in args + for arg in args: + if hasattr(arg, "messages"): + raw_messages = arg.messages + break + elif isinstance(arg, list) and arg and hasattr(arg[0], "content"): + raw_messages = arg + break + + # Convert to Message objects using similar logic as exporters + if raw_messages: + for msg in raw_messages: + content = _get_property_value(msg, "content") + msg_type = _get_property_value(msg, "type") or _get_property_value( + msg, "role" + ) + # Convert 'human' to 'user' like in exporters + msg_type = "user" if msg_type == "human" else msg_type + + if content and msg_type: + # Provide default values for required arguments + messages.append( + Message( + content=str(content), + name="", # Default empty name + type=str(msg_type), + ) + ) + + return messages + + +def _extract_tool_functions_from_args_kwargs( + args: Any, kwargs: Dict[str, Any] +) -> List["ToolFunction"]: + """Collect tools from kwargs (tools/functions) or first arg attributes, + normalize each object/dict/callable to a ToolFunction (name, description, parameters={}), + skipping anything malformed. + """ + + tool_functions: List[ToolFunction] = [] + + # Try to find tools in various places + tools = None + + # Check kwargs for tools + if kwargs.get("tools"): + tools = kwargs["tools"] + elif kwargs.get("functions"): + tools = kwargs["functions"] + + # Check args for objects that might have tools + if not tools and len(args) > 0: + for arg in args: + if hasattr(arg, "tools"): + tools = getattr(arg, "tools", []) + break + elif hasattr(arg, "functions"): + tools = getattr(arg, "functions", []) + break + + # Ensure tools is always a list for consistent processing + if tools and not isinstance(tools, list): + tools = [tools] + + # Convert tools to ToolFunction objects + if tools: + for tool in tools: + try: + # Handle different tool formats + if hasattr(tool, "name"): + # LangChain-style tool + tool_name = tool.name + tool_description = getattr(tool, "description", "") + elif isinstance(tool, dict) and "name" in tool: + # Dict-style tool + tool_name = tool["name"] + tool_description = tool.get("description", "") + elif hasattr(tool, "__name__"): + # Function-style tool + tool_name = tool.__name__ + tool_description = getattr(tool, "__doc__", "") or "" + else: + continue + + tool_functions.append( + ToolFunction( + name=tool_name, + description=tool_description, + parameters={}, + ) + ) + except Exception: + # Skip tools that can't be processed + continue + + return tool_functions + + +def _extract_llm_attributes_from_args_kwargs(args, kwargs, res=None): + """Extract LLM attributes from function arguments""" + attributes = {} + + # Extract model information + model = None + if kwargs.get("model"): + model = kwargs["model"] + elif kwargs.get("model_name"): + model = kwargs["model_name"] + elif len(args) > 0 and hasattr(args[0], "model"): + model = getattr(args[0], "model", None) + elif len(args) > 0 and isinstance(args[0], str): + model = args[0] # Sometimes model is the first string argument + + if model: + attributes["request_model"] = str(model) + + # Extract system/framework information + system = None + framework = None + + if kwargs.get("system"): + system = kwargs["system"] + elif hasattr(args[0] if args else None, "__class__"): + # Try to infer system from class name + class_name = args[0].__class__.__name__.lower() + if "openai" in class_name or "gpt" in class_name: + system = "openai" + elif "anthropic" in class_name or "claude" in class_name: + system = "anthropic" + elif "google" in class_name or "gemini" in class_name: + system = "google" + elif "langchain" in class_name: + system = "langchain" + framework = "langchain" + + if system is not None: + attributes["system"] = system + + if "framework" in kwargs and kwargs["framework"] is not None: + framework = kwargs["framework"] + else: + framework = "unknown" + + if framework: + attributes["framework"] = framework + + # Extract response attributes if available + if res: + _extract_response_attributes(res, attributes) + + return attributes + + +def _extract_response_attributes(res, attributes): + """Extract attributes from response similar to exporter logic""" + try: + # Check if res has response_metadata attribute directly + metadata = None + if hasattr(res, "response_metadata"): + metadata = res.response_metadata + elif isinstance(res, str): + # If res is a string, try to parse it as JSON + try: + parsed_res = json.loads(res) + metadata = parsed_res.get("response_metadata") + except Exception: + pass + + # Extract token usage if available + if metadata and "token_usage" in metadata: + usage = metadata["token_usage"] + if "prompt_tokens" in usage: + attributes["input_tokens"] = usage["prompt_tokens"] + if "completion_tokens" in usage: + attributes["output_tokens"] = usage["completion_tokens"] + + # Extract response model + if metadata and "model_name" in metadata: + attributes["response_model_name"] = metadata["model_name"] + + # Extract response ID + if hasattr(res, "id"): + attributes["response_id"] = res.id + except Exception: + # Silently ignore errors in extracting response attributes + pass + + +def _extract_chat_generations_from_response(res): + """ + Normalize various response shapes into a list of ChatGeneration objects. + Supported: + - OpenAI style: res.choices[*].message.content (+ role, finish_reason) + - Fallback: res.content (+ optional res.type, finish_reason defaults to "stop") + Returns an empty list on unrecognized structures or errors. Never raises. + All content/type values are coerced to str; finish_reason may be None. + """ + chat_generations = [] + + try: + # Handle OpenAI-style responses with choices + if hasattr(res, "choices") and res.choices: + for choice in res.choices: + content = None + finish_reason = None + msg_type = "assistant" + + if hasattr(choice, "message") and hasattr( + choice.message, "content" + ): + content = choice.message.content + if hasattr(choice.message, "role"): + msg_type = choice.message.role + + if hasattr(choice, "finish_reason"): + finish_reason = choice.finish_reason + + if content: + chat_generations.append( + ChatGeneration( + content=str(content), + finish_reason=finish_reason, + type=str(msg_type), + ) + ) + + # Handle responses with direct content attribute (e.g., some LangChain responses) + elif hasattr(res, "content"): + msg_type = "assistant" + if hasattr(res, "type"): + msg_type = res.type + + chat_generations.append( + ChatGeneration( + content=str(res.content), + finish_reason="stop", # May not be available + type=str(msg_type), + ) + ) + + except Exception: + # Silently ignore errors in extracting chat generations + pass + + return chat_generations + + +def _unwrap_structured_tool(fn): + # Unwraps StructuredTool or similar wrappers to get the underlying function + if hasattr(fn, "func") and callable(fn.func): + return fn.func + return fn + + +def entity_method( + name: Optional[str] = None, + model_name: Optional[str] = None, + tlp_span_kind: Optional[ObserveSpanKindValues] = None, +) -> Callable[[F], F]: + def decorate(fn: F) -> F: + fn = _unwrap_structured_tool(fn) + is_async = _is_async_method(fn) + entity_name = name or _get_original_function_name(fn) + if is_async: + if _is_async_generator(fn): + + @wraps(fn) + async def async_gen_wrap(*args: Any, **kwargs: Any) -> Any: + # add entity_name to kwargs + kwargs["system"] = entity_name + _handle_llm_span_attributes(tlp_span_kind, args, kwargs) + async for item in fn(*args, **kwargs): + yield item + + return async_gen_wrap + else: + + @wraps(fn) + async def async_wrap(*args, **kwargs): + try: + # Start LLM span before the call + run_id = None + if tlp_span_kind == ObserveSpanKindValues.LLM: + run_id = _handle_llm_span_attributes( + tlp_span_kind, args, kwargs + ) + + res = await fn(*args, **kwargs) + if ( + tlp_span_kind == ObserveSpanKindValues.LLM + and run_id + ): + kwargs["system"] = entity_name + # Extract attributes from args and kwargs + attributes = ( + _extract_llm_attributes_from_args_kwargs( + args, kwargs, res + ) + ) + + _finish_llm_span(run_id, res, **attributes) + + except Exception as e: + logging.error(traceback.format_exc()) + raise e + return res + + decorated = async_wrap + else: + + @wraps(fn) + def sync_wrap(*args: Any, **kwargs: Any) -> Any: + try: + # Start LLM span before the call + run_id = None + if tlp_span_kind == ObserveSpanKindValues.LLM: + # Handle LLM span attributes + run_id = _handle_llm_span_attributes( + tlp_span_kind, args, kwargs + ) + + res = fn(*args, **kwargs) + + # Finish LLM span after the call + if tlp_span_kind == ObserveSpanKindValues.LLM and run_id: + kwargs["system"] = entity_name + # Extract attributes from args and kwargs + attributes = _extract_llm_attributes_from_args_kwargs( + args, kwargs, res + ) + + _finish_llm_span(run_id, res, **attributes) + + except Exception as e: + print(traceback.format_exc()) + raise e + return res + + decorated = sync_wrap + # # If the original fn was a StructuredTool, re-wrap + if hasattr(fn, "func") and callable(fn.func): + fn.func = decorated + return fn + return decorated + + return decorate + + +def entity_class( + name: Optional[str], + model_name: Optional[str], + method_name: Optional[str], + tlp_span_kind: Optional[ObserveSpanKindValues] = None, +): + def decorator(cls): + task_name = name if name else camel_to_snake(cls.__qualname__) + + methods_to_wrap = [] + + if method_name: + # Specific method specified - existing behavior + methods_to_wrap = [method_name] + else: + # No method specified - wrap all public methods defined in this class + for attr_name in dir(cls): + if ( + not attr_name.startswith( + "_" + ) # Skip private/built-in methods + and attr_name != "mro" # Skip class method + and hasattr(cls, attr_name) + ): + attr = getattr(cls, attr_name) + # Only wrap functions defined in this class (not inherited methods or built-ins) + if ( + inspect.isfunction( + attr + ) # Functions defined in the class + and not isinstance( + attr, (classmethod, staticmethod, property) + ) + and hasattr( + attr, "__qualname__" + ) # Has qualname attribute + and attr.__qualname__.startswith( + cls.__name__ + "." + ) # Defined in this class + ): + # Additional check: ensure the function has a proper signature with 'self' parameter + try: + sig = inspect.signature(attr) + params = list(sig.parameters.keys()) + if params and params[0] == "self": + methods_to_wrap.append(attr_name) + except (ValueError, TypeError): + # Skip methods that can't be inspected + continue + + # Wrap all detected methods + for method_to_wrap in methods_to_wrap: + if hasattr(cls, method_to_wrap): + original_method = getattr(cls, method_to_wrap) + # Only wrap actual functions defined in this class + unwrapped_method = _unwrap_structured_tool(original_method) + if inspect.isfunction(unwrapped_method): + try: + # Verify the method has a proper signature + sig = inspect.signature(unwrapped_method) + wrapped_method = entity_method( + name=f"{task_name}.{method_to_wrap}", + model_name=model_name, + tlp_span_kind=tlp_span_kind, + )(unwrapped_method) + # Set the wrapped method on the class + setattr(cls, method_to_wrap, wrapped_method) + except Exception: + # Don't wrap methods that can't be properly decorated + continue + + return cls + + return decorator diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/decorators/helpers.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/decorators/helpers.py new file mode 100644 index 0000000000..bf79b95fd7 --- /dev/null +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/decorators/helpers.py @@ -0,0 +1,406 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect + + +def _is_async_method(fn): + # check if co-routine function or async generator( example : using async & yield) + if inspect.iscoroutinefunction(fn) or inspect.isasyncgenfunction(fn): + return True + + # Check if this is a wrapped function that might hide the original async nature + # Look for common wrapper attributes that might contain the original function + for attr_name in ["__wrapped__", "func", "_func", "function"]: + if hasattr(fn, attr_name): + wrapped_fn = getattr(fn, attr_name) + if wrapped_fn and callable(wrapped_fn): + if inspect.iscoroutinefunction( + wrapped_fn + ) or inspect.isasyncgenfunction(wrapped_fn): + return True + # Recursively check in case of multiple levels of wrapping + if _is_async_method(wrapped_fn): + return True + + return False + + +def _is_async_generator(fn): + """Check if function is an async generator, looking through wrapped functions""" + if inspect.isasyncgenfunction(fn): + return True + + # Check if this is a wrapped function that might hide the original async generator nature + for attr_name in ["__wrapped__", "func", "_func", "function"]: + if hasattr(fn, attr_name): + wrapped_fn = getattr(fn, attr_name) + if wrapped_fn and callable(wrapped_fn): + if inspect.isasyncgenfunction(wrapped_fn): + return True + # Recursively check in case of multiple levels of wrapping + if _is_async_generator(wrapped_fn): + return True + + return False + + +def _get_original_function_name(fn): + """Extract the original function name from potentially wrapped functions""" + if hasattr(fn, "__qualname__") and fn.__qualname__: + return fn.__qualname__ + + # Look for the original function in common wrapper attributes + for attr_name in ["__wrapped__", "func", "_func", "function"]: + if hasattr(fn, attr_name): + wrapped_fn = getattr(fn, attr_name) + if wrapped_fn and callable(wrapped_fn): + if ( + hasattr(wrapped_fn, "__qualname__") + and wrapped_fn.__qualname__ + ): + return wrapped_fn.__qualname__ + # Recursively check in case of multiple levels of wrapping + result = _get_original_function_name(wrapped_fn) + if result: + return result + + # Fallback to function name if qualname is not available + return getattr(fn, "__name__", "unknown_function") + + +def _extract_tool_functions_from_args_kwargs(args, kwargs): + """Extract tool functions from function arguments""" + from opentelemetry.genai.sdk.data import ToolFunction + + tool_functions = [] + + # Try to find tools in various places + tools = None + + # Check kwargs for tools + if kwargs.get("tools"): + tools = kwargs["tools"] + elif kwargs.get("functions"): + tools = kwargs["functions"] + + # Check args for objects that might have tools + if not tools and len(args) > 0: + for arg in args: + if hasattr(arg, "tools"): + tools = getattr(arg, "tools", []) + break + elif hasattr(arg, "functions"): + tools = getattr(arg, "functions", []) + break + + # Convert tools to ToolFunction objects + if tools: + for tool in tools: + try: + # Handle different tool formats + if hasattr(tool, "name"): + # LangChain-style tool + tool_name = tool.name + tool_description = getattr(tool, "description", "") + elif isinstance(tool, dict) and "name" in tool: + # Dict-style tool + tool_name = tool["name"] + tool_description = tool.get("description", "") + elif hasattr(tool, "__name__"): + # Function-style tool + tool_name = tool.__name__ + tool_description = getattr(tool, "__doc__", "") or "" + else: + continue + + tool_functions.append( + ToolFunction( + name=tool_name, + description=tool_description, + parameters={}, # Add parameter extraction if needed + ) + ) + except Exception: + # Skip tools that can't be processed + continue + + return tool_functions + + +def _find_llm_instance(args, kwargs): + """Find LLM instance using multiple approaches""" + llm_instance = None + + try: + import sys + + frame = sys._getframe(2) # Get the decorated function's frame + func = frame.f_code + + # Try to get the function object from the frame + if hasattr(frame, "f_globals"): + for name, obj in frame.f_globals.items(): + if ( + hasattr(obj, "__code__") + and obj.__code__ == func + and hasattr(obj, "llm") + ): + llm_instance = obj.llm + break + except Exception: + pass + + # Check kwargs for LLM instance + if not llm_instance: + for key, value in kwargs.items(): + if key.lower() in ["llm", "model", "client"] and _is_llm_instance( + value + ): + llm_instance = value + break + + # Check args for LLM instance + if not llm_instance: + for arg in args: + if _is_llm_instance(arg): + llm_instance = arg + break + # Check for bound tools that contain an LLM + elif hasattr(arg, "llm") and _is_llm_instance(arg.llm): + llm_instance = arg.llm + break + + # Frame inspection to look in local variables + if not llm_instance: + try: + import sys + + frame = sys._getframe( + 2 + ) # Go up 2 frames to get to the decorated function + local_vars = frame.f_locals + + # Look for ChatOpenAI or similar instances in local variables + for var_name, var_value in local_vars.items(): + if _is_llm_instance(var_value): + llm_instance = var_value + break + elif hasattr(var_value, "llm") and _is_llm_instance( + var_value.llm + ): + # Handle bound tools case + llm_instance = var_value.llm + break + except Exception: + pass + + return llm_instance + + +def _is_llm_instance(obj): + """Check if an object is an LLM instance""" + if not hasattr(obj, "__class__"): + return False + + class_name = obj.__class__.__name__ + module_name = ( + obj.__class__.__module__ + if hasattr(obj.__class__, "__module__") + else "" + ) + + # Check for common LLM class patterns + llm_patterns = [ + "ChatOpenAI", + "OpenAI", + "AzureOpenAI", + "AzureChatOpenAI", + "ChatAnthropic", + "Anthropic", + "ChatGoogleGenerativeAI", + "GoogleGenerativeAI", + "ChatVertexAI", + "VertexAI", + "ChatOllama", + "Ollama", + "ChatHuggingFace", + "HuggingFace", + "ChatCohere", + "Cohere", + ] + + return ( + any(pattern in class_name for pattern in llm_patterns) + or "langchain" in module_name.lower() + ) + + +def _extract_llm_config_attributes(llm_instance, attributes): + """Extract configuration attributes from LLM instance""" + try: + # Extract model + if hasattr(llm_instance, "model_name") and llm_instance.model_name: + attributes["request_model"] = str(llm_instance.model_name) + elif hasattr(llm_instance, "model") and llm_instance.model: + attributes["request_model"] = str(llm_instance.model) + + # Extract temperature + if ( + hasattr(llm_instance, "temperature") + and llm_instance.temperature is not None + ): + attributes["request_temperature"] = float(llm_instance.temperature) + + # Extract max_tokens + if ( + hasattr(llm_instance, "max_tokens") + and llm_instance.max_tokens is not None + ): + attributes["request_max_tokens"] = int(llm_instance.max_tokens) + + # Extract top_p + if hasattr(llm_instance, "top_p") and llm_instance.top_p is not None: + attributes["request_top_p"] = float(llm_instance.top_p) + + # Extract top_k + if hasattr(llm_instance, "top_k") and llm_instance.top_k is not None: + attributes["request_top_k"] = int(llm_instance.top_k) + + # Extract frequency_penalty + if ( + hasattr(llm_instance, "frequency_penalty") + and llm_instance.frequency_penalty is not None + ): + attributes["request_frequency_penalty"] = float( + llm_instance.frequency_penalty + ) + + # Extract presence_penalty + if ( + hasattr(llm_instance, "presence_penalty") + and llm_instance.presence_penalty is not None + ): + attributes["request_presence_penalty"] = float( + llm_instance.presence_penalty + ) + + # Extract seed + if hasattr(llm_instance, "seed") and llm_instance.seed is not None: + attributes["request_seed"] = int(llm_instance.seed) + + # Extract stop sequences + if hasattr(llm_instance, "stop") and llm_instance.stop is not None: + stop = llm_instance.stop + if isinstance(stop, (list, tuple)): + attributes["request_stop_sequences"] = list(stop) + else: + attributes["request_stop_sequences"] = [str(stop)] + elif ( + hasattr(llm_instance, "stop_sequences") + and llm_instance.stop_sequences is not None + ): + stop = llm_instance.stop_sequences + if isinstance(stop, (list, tuple)): + attributes["request_stop_sequences"] = list(stop) + else: + attributes["request_stop_sequences"] = [str(stop)] + + except Exception as e: + print(f"Error extracting LLM config attributes: {e}") + + +def _extract_direct_parameters(args, kwargs, attributes): + """Fallback method to extract parameters directly from args/kwargs""" + # Temperature + print("args:", args) + print("kwargs:", kwargs) + temperature = kwargs.get("temperature") + if temperature is not None: + attributes["request_temperature"] = float(temperature) + elif hasattr(args[0] if args else None, "temperature"): + temperature = getattr(args[0], "temperature", None) + if temperature is not None: + attributes["request_temperature"] = float(temperature) + + # Max tokens + max_tokens = kwargs.get("max_tokens") or kwargs.get( + "max_completion_tokens" + ) + if max_tokens is not None: + attributes["request_max_tokens"] = int(max_tokens) + elif hasattr(args[0] if args else None, "max_tokens"): + max_tokens = getattr(args[0], "max_tokens", None) + if max_tokens is not None: + attributes["request_max_tokens"] = int(max_tokens) + + # Top P + top_p = kwargs.get("top_p") + if top_p is not None: + attributes["request_top_p"] = float(top_p) + elif hasattr(args[0] if args else None, "top_p"): + top_p = getattr(args[0], "top_p", None) + if top_p is not None: + attributes["request_top_p"] = float(top_p) + + # Top K + top_k = kwargs.get("top_k") + if top_k is not None: + attributes["request_top_k"] = int(top_k) + elif hasattr(args[0] if args else None, "top_k"): + top_k = getattr(args[0], "top_k", None) + if top_k is not None: + attributes["request_top_k"] = int(top_k) + + # Frequency penalty + frequency_penalty = kwargs.get("frequency_penalty") + if frequency_penalty is not None: + attributes["request_frequency_penalty"] = float(frequency_penalty) + elif hasattr(args[0] if args else None, "frequency_penalty"): + frequency_penalty = getattr(args[0], "frequency_penalty", None) + if frequency_penalty is not None: + attributes["request_frequency_penalty"] = float(frequency_penalty) + + # Presence penalty + presence_penalty = kwargs.get("presence_penalty") + if presence_penalty is not None: + attributes["request_presence_penalty"] = float(presence_penalty) + elif hasattr(args[0] if args else None, "presence_penalty"): + presence_penalty = getattr(args[0], "presence_penalty", None) + if presence_penalty is not None: + attributes["request_presence_penalty"] = float(presence_penalty) + + # Stop sequences + stop_sequences = kwargs.get("stop_sequences") or kwargs.get("stop") + if stop_sequences is not None: + if isinstance(stop_sequences, (list, tuple)): + attributes["request_stop_sequences"] = list(stop_sequences) + else: + attributes["request_stop_sequences"] = [str(stop_sequences)] + elif hasattr(args[0] if args else None, "stop_sequences"): + stop_sequences = getattr(args[0], "stop_sequences", None) + if stop_sequences is not None: + if isinstance(stop_sequences, (list, tuple)): + attributes["request_stop_sequences"] = list(stop_sequences) + else: + attributes["request_stop_sequences"] = [str(stop_sequences)] + + # Seed + seed = kwargs.get("seed") + if seed is not None: + attributes["request_seed"] = int(seed) + elif hasattr(args[0] if args else None, "seed"): + seed = getattr(args[0], "seed", None) + if seed is not None: + attributes["request_seed"] = int(seed) diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/decorators/util.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/decorators/util.py new file mode 100644 index 0000000000..f5287e9ca1 --- /dev/null +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/decorators/util.py @@ -0,0 +1,155 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def _serialize_object(obj, max_depth=3, current_depth=0): + """ + Intelligently serialize an object to a more meaningful representation + """ + if current_depth > max_depth: + return f"<{type(obj).__name__}:max_depth_reached>" + + # Handle basic JSON-serializable types + if obj is None or isinstance(obj, (bool, int, float, str)): + return obj + + # Handle lists and tuples + if isinstance(obj, (list, tuple)): + try: + return [ + _serialize_object(item, max_depth, current_depth + 1) + for item in obj[:10] + ] # Limit to first 10 items + except Exception: + return f"<{type(obj).__name__}:length={len(obj)}>" + + # Handle dictionaries + if isinstance(obj, dict): + try: + serialized = {} + for key, value in list(obj.items())[ + :10 + ]: # Limit to first 10 items + serialized[str(key)] = _serialize_object( + value, max_depth, current_depth + 1 + ) + return serialized + except Exception: + return f"" + + # Handle common object types with meaningful attributes + try: + # Check class attributes first + class_attrs = {} + for attr_name in dir(type(obj)): + if ( + not attr_name.startswith("_") + and not callable(getattr(type(obj), attr_name, None)) + and hasattr(obj, attr_name) + ): + try: + attr_value = getattr(obj, attr_name) + if not callable(attr_value): + class_attrs[attr_name] = _serialize_object( + attr_value, max_depth, current_depth + 1 + ) + if len(class_attrs) >= 5: # Limit attributes + break + except Exception: + continue + + # Check if object has a __dict__ with interesting attributes + instance_attrs = {} + if hasattr(obj, "__dict__"): + obj_dict = obj.__dict__ + if obj_dict: + # Extract meaningful attributes (skip private ones and callables) + for key, value in obj_dict.items(): + if not key.startswith("_") and not callable(value): + try: + instance_attrs[key] = _serialize_object( + value, max_depth, current_depth + 1 + ) + if len(instance_attrs) >= 5: # Limit attributes + break + except Exception: + continue + + # Combine class and instance attributes + all_attrs = {**class_attrs, **instance_attrs} + + if all_attrs: + return { + "__class__": type(obj).__name__, + "__module__": getattr(type(obj), "__module__", "unknown"), + "attributes": all_attrs, + } + + # Special handling for specific types + if hasattr(obj, "message") and hasattr(obj.message, "parts"): + # Handle RequestContext-like objects + try: + parts_content = [] + for part in obj.message.parts: + if hasattr(part, "root") and hasattr(part.root, "text"): + parts_content.append(part.root.text) + return { + "__class__": type(obj).__name__, + "message_content": parts_content, + } + except Exception: + pass + + # Check for common readable attributes + for attr in ["name", "id", "type", "value", "content", "text", "data"]: + if hasattr(obj, attr): + try: + attr_value = getattr(obj, attr) + if not callable(attr_value): + return { + "__class__": type(obj).__name__, + attr: _serialize_object( + attr_value, max_depth, current_depth + 1 + ), + } + except Exception: + continue + + # Fallback to class information + return { + "__class__": type(obj).__name__, + "__module__": getattr(type(obj), "__module__", "unknown"), + "__repr__": str(obj)[:100] + + ("..." if len(str(obj)) > 100 else ""), + } + + except Exception: + # Final fallback + return f"<{type(obj).__name__}:serialization_failed>" + + +def cameltosnake(camel_string: str) -> str: + if not camel_string: + return "" + elif camel_string[0].isupper(): + return f"_{camel_string[0].lower()}{cameltosnake(camel_string[1:])}" + else: + return f"{camel_string[0]}{cameltosnake(camel_string[1:])}" + + +def camel_to_snake(s): + if len(s) <= 1: + return s.lower() + + return cameltosnake(s[0].lower() + s[1:]) diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py index 7dd23affe2..b8372a2b88 100644 --- a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/handler.py @@ -15,6 +15,7 @@ """ Telemetry handler for GenAI invocations. + This module exposes the `TelemetryHandler` class, which manages the lifecycle of GenAI (Generative AI) invocations and emits telemetry data (spans and related attributes). It supports starting, stopping, and failing LLM invocations. @@ -72,6 +73,7 @@ class TelemetryHandler: them as spans, metrics, and events. """ + def __init__(self, **kwargs: Any): self._generator = SpanGenerator(**kwargs) diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/instruments.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/instruments.py new file mode 100644 index 0000000000..619e1cda2d --- /dev/null +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/instruments.py @@ -0,0 +1,68 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from opentelemetry.metrics import Histogram, Meter +from opentelemetry.semconv._incubating.metrics import gen_ai_metrics + +# TODO: should this be in utils or passed to the telemetry client? +_GEN_AI_CLIENT_OPERATION_DURATION_BUCKETS = [ + 0.01, + 0.02, + 0.04, + 0.08, + 0.16, + 0.32, + 0.64, + 1.28, + 2.56, + 5.12, + 10.24, + 20.48, + 40.96, + 81.92, +] + +# TODO: should this be in utils or passed to the telemetry client? +_GEN_AI_CLIENT_TOKEN_USAGE_BUCKETS = [ + 1, + 4, + 16, + 64, + 256, + 1024, + 4096, + 16384, + 65536, + 262144, + 1048576, + 4194304, + 16777216, + 67108864, +] + + +class Instruments: + def __init__(self, meter: Meter): + self.operation_duration_histogram: Histogram = meter.create_histogram( + name=gen_ai_metrics.GEN_AI_CLIENT_OPERATION_DURATION, + description="GenAI operation duration", + unit="s", + explicit_bucket_boundaries_advisory=_GEN_AI_CLIENT_OPERATION_DURATION_BUCKETS, + ) + self.token_usage_histogram: Histogram = meter.create_histogram( + name=gen_ai_metrics.GEN_AI_CLIENT_TOKEN_USAGE, + description="Measures number of input and output tokens used", + unit="{token}", + explicit_bucket_boundaries_advisory=_GEN_AI_CLIENT_TOKEN_USAGE_BUCKETS, + ) diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py index 147c989a4e..19c3e0262a 100644 --- a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - import time from contextvars import Token from dataclasses import dataclass, field @@ -74,6 +73,21 @@ class InputMessage: parts: list[MessagePart] +@dataclass +class ToolInvocation: + """ + Represents a single Tool call invocation. + """ + + run_id: UUID + output: ToolOutput = None + parent_run_id: Optional[UUID] = None + start_time: float = field(default_factory=time.time) + end_time: float = None + input_str: Optional[str] = None + attributes: dict = field(default_factory=dict) + + @dataclass() class OutputMessage: role: str diff --git a/util/opentelemetry-util-genai/tests/test_utils.py b/util/opentelemetry-util-genai/tests/test_utils.py index 1cadf47a30..c95ca961a7 100644 --- a/util/opentelemetry-util-genai/tests/test_utils.py +++ b/util/opentelemetry-util-genai/tests/test_utils.py @@ -16,6 +16,9 @@ import os import unittest from unittest.mock import patch +from uuid import uuid4 + +import pytest from opentelemetry import trace from opentelemetry.instrumentation._semconv import ( diff --git a/uv.lock b/uv.lock index 7d46cd4836..3234eb5980 100644 --- a/uv.lock +++ b/uv.lock @@ -13,6 +13,7 @@ resolution-markers = [ members = [ "opentelemetry-exporter-prometheus-remote-write", "opentelemetry-exporter-richconsole", + "opentelemetry-genai-sdk", "opentelemetry-instrumentation", "opentelemetry-instrumentation-aio-pika", "opentelemetry-instrumentation-aiohttp-client", @@ -1746,6 +1747,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, ] +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050 }, +] + [[package]] name = "iso8601" version = "2.1.0" @@ -2439,6 +2449,29 @@ requires-dist = [ { name = "rich", specifier = ">=10.0.0" }, ] +[[package]] +name = "opentelemetry-genai-sdk" +source = { editable = "instrumentation-genai/opentelemetry-genai-sdk" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] + +[package.optional-dependencies] +test = [ + { name = "pytest" }, +] + +[package.metadata] +requires-dist = [ + { name = "opentelemetry-api", git = "https://github.com/open-telemetry/opentelemetry-python?subdirectory=opentelemetry-api&branch=main" }, + { name = "opentelemetry-instrumentation", editable = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions", git = "https://github.com/open-telemetry/opentelemetry-python?subdirectory=opentelemetry-semantic-conventions&branch=main" }, + { name = "pytest", marker = "extra == 'test'", specifier = ">=7.0.0" }, +] +provides-extras = ["test"] + [[package]] name = "opentelemetry-instrumentation" source = { editable = "opentelemetry-instrumentation" } @@ -4169,6 +4202,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, ] +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538 }, +] + [[package]] name = "prometheus-client" version = "0.22.0" @@ -4790,6 +4832,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, ] +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750 }, +] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -5271,6 +5331,45 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257, upload-time = "2024-11-27T22:38:35.385Z" }, ] +[[package]] +name = "tomli" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077 }, + { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429 }, + { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067 }, + { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030 }, + { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898 }, + { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894 }, + { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319 }, + { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273 }, + { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310 }, + { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309 }, + { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762 }, + { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453 }, + { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486 }, + { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349 }, + { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159 }, + { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243 }, + { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645 }, + { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584 }, + { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875 }, + { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418 }, + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708 }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582 }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543 }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691 }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170 }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530 }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666 }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954 }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724 }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383 }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257 }, +] + [[package]] name = "tornado" version = "6.5"