Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 6 additions & 5 deletions instrumentation-genai/README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@

| Instrumentation | Supported Packages | Metrics support | Semconv status |
| --------------- | ------------------ | --------------- | -------------- |
| [opentelemetry-instrumentation-google-genai](./opentelemetry-instrumentation-google-genai) | google-genai >= 1.0.0 | No | development
| [opentelemetry-instrumentation-openai-v2](./opentelemetry-instrumentation-openai-v2) | openai >= 1.26.0 | Yes | development
| [opentelemetry-instrumentation-vertexai](./opentelemetry-instrumentation-vertexai) | google-cloud-aiplatform >= 1.64 | No | development
| Instrumentation | Supported Packages | Metrics support | Semconv status |
|--------------------------------------------------------------------------------------------|---------------------------------|-----------------| -------------- |
| [opentelemetry-instrumentation-google-genai](./opentelemetry-instrumentation-google-genai) | google-genai >= 1.0.0 | No | development
| [opentelemetry-instrumentation-openai-v2](./opentelemetry-instrumentation-openai-v2) | openai >= 1.26.0 | Yes | development
| [opentelemetry-instrumentation-vertexai](./opentelemetry-instrumentation-vertexai) | google-cloud-aiplatform >= 1.64 | No | development
| [opentelemetry-instrumentation-langchain](./opentelemetry-instrumentation-langchain) | langchain >= 0.3.21 | Yes | development
27 changes: 27 additions & 0 deletions instrumentation-genai/opentelemetry-genai-sdk/README.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
Installation
============

Option 1: pip + requirements.txt
---------------------------------
::

python3 -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt

Option 2: Poetry
----------------
::

poetry install

Running Tests
=============

After installing dependencies, simply run:

::

pytest

This will discover and run `tests/test_sdk.py`.
53 changes: 53 additions & 0 deletions instrumentation-genai/opentelemetry-genai-sdk/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

[project]
name = "opentelemetry-genai-sdk"
dynamic = ["version"]
description = "OpenTelemetry GenAI SDK"
readme = "README.rst"
license = "Apache-2.0"
requires-python = ">=3.8"
authors = [
{ name = "OpenTelemetry Authors", email = "[email protected]" },
]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
]
dependencies = [
"opentelemetry-api ~= 1.36.0",
"opentelemetry-instrumentation ~= 0.57b0",
"opentelemetry-semantic-conventions ~= 0.57b0",
]

[project.optional-dependencies]
test = [
"pytest>=7.0.0",
]
# evaluation = ["deepevals>=0.1.0", "openlit-sdk>=0.1.0"]

[project.urls]
Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation-genai/opentelemetry-genai-sdk"
Repository = "https://github.com/open-telemetry/opentelemetry-python-contrib"

[tool.hatch.version]
path = "src/opentelemetry/genai/sdk/version.py"

[tool.hatch.build.targets.sdist]
include = [
"/src",
"/tests",
]

[tool.hatch.build.targets.wheel]
packages = ["src/opentelemetry"]
10 changes: 10 additions & 0 deletions instrumentation-genai/opentelemetry-genai-sdk/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# OpenTelemetry SDK
opentelemetry-api>=1.34.0
opentelemetry-sdk>=1.34.0

# Testing
pytest>=7.0.0

# (Optional) evaluation libraries
# deepevals>=0.1.0
# openlit-sdk>=0.1.0
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import time
from threading import Lock
from typing import List, Optional
from uuid import UUID

from .types import LLMInvocation, ToolInvocation
from .exporters import SpanMetricEventExporter, SpanMetricExporter
from .data import Message, ChatGeneration, Error, ToolOutput, ToolFunction

from opentelemetry.instrumentation.langchain.version import __version__
from opentelemetry.metrics import get_meter
from opentelemetry.trace import get_tracer
from opentelemetry._events import get_event_logger
from opentelemetry._logs import get_logger
from opentelemetry.semconv.schemas import Schemas


class TelemetryClient:
"""
High-level client managing GenAI invocation lifecycles and exporting
them as spans, metrics, and events.
"""
def __init__(self, exporter_type_full: bool = True, **kwargs):
tracer_provider = kwargs.get("tracer_provider")
self._tracer = get_tracer(
__name__, __version__, tracer_provider, schema_url=Schemas.V1_28_0.value
)

meter_provider = kwargs.get("meter_provider")
self._meter = get_meter(
__name__, __version__, meter_provider, schema_url=Schemas.V1_28_0.value
)

event_logger_provider = kwargs.get("event_logger_provider")
self._event_logger = get_event_logger(
__name__, __version__, event_logger_provider=event_logger_provider, schema_url=Schemas.V1_28_0.value
)

logger_provider = kwargs.get("logger_provider")
self._logger = get_logger(
__name__, __version__, logger_provider=logger_provider, schema_url=Schemas.V1_28_0.value
)

self._exporter = (
SpanMetricEventExporter(tracer=self._tracer, meter=self._meter, event_logger=self._event_logger, logger=self._event_logger)
if exporter_type_full
else SpanMetricExporter(tracer=self._tracer, meter=self._meter)
)

self._llm_registry: dict[UUID, LLMInvocation] = {}
self._tool_registry: dict[UUID, ToolInvocation] = {}
self._lock = Lock()

def start_llm(self, prompts: List[Message], tool_functions: List[ToolFunction], run_id: UUID, parent_run_id: Optional[UUID] = None, **attributes):
invocation = LLMInvocation(messages=prompts , tool_functions=tool_functions, run_id=run_id, parent_run_id=parent_run_id, attributes=attributes)
with self._lock:
self._llm_registry[invocation.run_id] = invocation
self._exporter.init_llm(invocation)

def stop_llm(self, run_id: UUID, chat_generations: List[ChatGeneration], **attributes) -> LLMInvocation:
with self._lock:
invocation = self._llm_registry.pop(run_id)
invocation.end_time = time.time()
invocation.chat_generations = chat_generations
invocation.attributes.update(attributes)
self._exporter.export_llm(invocation)
return invocation

def fail_llm(self, run_id: UUID, error: Error, **attributes) -> LLMInvocation:
with self._lock:
invocation = self._llm_registry.pop(run_id)
invocation.end_time = time.time()
invocation.attributes.update(**attributes)
self._exporter.error_llm(error, invocation)
return invocation

def start_tool(self, input_str: str, run_id: UUID, parent_run_id: Optional[UUID] = None, **attributes):
invocation = ToolInvocation(input_str=input_str , run_id=run_id, parent_run_id=parent_run_id, attributes=attributes)
with self._lock:
self._tool_registry[invocation.run_id] = invocation
self._exporter.init_tool(invocation)

def stop_tool(self, run_id: UUID, output: ToolOutput, **attributes) -> ToolInvocation:
with self._lock:
invocation = self._tool_registry.pop(run_id)
invocation.end_time = time.time()
invocation.output = output
self._exporter.export_tool(invocation)
return invocation

def fail_tool(self, run_id: UUID, error: Error, **attributes) -> ToolInvocation:
with self._lock:
invocation = self._tool_registry.pop(run_id)
invocation.end_time = time.time()
invocation.attributes.update(**attributes)
self._exporter.error_tool(error, invocation)
return invocation

# Singleton accessor
_default_client: TelemetryClient | None = None

def get_telemetry_client(exporter_type_full: bool = True, **kwargs) -> TelemetryClient:
global _default_client
if _default_client is None:
_default_client = TelemetryClient(exporter_type_full=exporter_type_full, **kwargs)
return _default_client

# Module‐level convenience functions
def llm_start(prompts: List[Message], run_id: UUID, parent_run_id: Optional[UUID] = None, **attributes):
return get_telemetry_client().start_llm(prompts=prompts, run_id=run_id, parent_run_id=parent_run_id, **attributes)

def llm_stop(run_id: UUID, chat_generations: List[ChatGeneration], **attributes) -> LLMInvocation:
return get_telemetry_client().stop_llm(run_id=run_id, chat_generations=chat_generations, **attributes)

def llm_fail(run_id: UUID, error: Error, **attributes) -> LLMInvocation:
return get_telemetry_client().fail_llm(run_id=run_id, error=error, **attributes)
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
from dataclasses import dataclass, field
from typing import List


@dataclass
class ToolOutput:
tool_call_id: str
content: str

@dataclass
class ToolFunction:
name: str
description: str
parameters: str

@dataclass
class ToolFunctionCall:
id: str
name: str
arguments: str
type: str

@dataclass
class Message:
content: str
type: str
name: str
tool_call_id: str
tool_function_calls: List[ToolFunctionCall] = field(default_factory=list)

@dataclass
class ChatGeneration:
content: str
type: str
finish_reason: str = None
tool_function_calls: List[ToolFunctionCall] = field(default_factory=list)

@dataclass
class Error:
message: str
type: type[BaseException]
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from deepeval.models import DeepEvalBaseLLM
from deepeval.test_case import LLMTestCase
from deepeval.metrics import AnswerRelevancyMetric


def evaluate_answer_relevancy_metric(prompt:str, output:str, retrieval_context:list) -> AnswerRelevancyMetric:
test_case = LLMTestCase(input=prompt,
actual_output=output,
retrieval_context=retrieval_context,)
relevancy_metric = AnswerRelevancyMetric(threshold=0.5)
relevancy_metric.measure(test_case)
print(relevancy_metric.score, relevancy_metric.reason)
return relevancy_metric
Loading