Skip to content

Commit 40e6c48

Browse files
committed
remove references to tool types
1 parent 00c2091 commit 40e6c48

File tree

5 files changed

+223
-1023
lines changed

5 files changed

+223
-1023
lines changed

util/opentelemetry-util-genai/src/opentelemetry/util/genai/api.py

Lines changed: 3 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@
2323
from opentelemetry.semconv.schemas import Schemas
2424
from opentelemetry.trace import get_tracer
2525

26-
from .data import ChatGeneration, Error, Message, ToolFunction, ToolOutput
26+
from .data import ChatGeneration, Error, Message
2727
from .exporters import SpanMetricEventExporter, SpanMetricExporter
28-
from .types import LLMInvocation, ToolInvocation
28+
from .types import LLMInvocation
2929
from .version import __version__
3030

3131

@@ -80,20 +80,17 @@ def __init__(self, exporter_type_full: bool = True, **kwargs):
8080
)
8181

8282
self._llm_registry: dict[UUID, LLMInvocation] = {}
83-
self._tool_registry: dict[UUID, ToolInvocation] = {}
8483
self._lock = Lock()
8584

8685
def start_llm(
8786
self,
8887
prompts: List[Message],
89-
tool_functions: List[ToolFunction],
9088
run_id: UUID,
9189
parent_run_id: Optional[UUID] = None,
9290
**attributes,
9391
):
9492
invocation = LLMInvocation(
9593
messages=prompts,
96-
tool_functions=tool_functions,
9794
run_id=run_id,
9895
parent_run_id=parent_run_id,
9996
attributes=attributes,
@@ -122,47 +119,10 @@ def fail_llm(
122119
with self._lock:
123120
invocation = self._llm_registry.pop(run_id)
124121
invocation.end_time = time.time()
125-
invocation.attributes.update(**attributes)
122+
invocation.attributes.update(attributes)
126123
self._exporter.error_llm(error, invocation)
127124
return invocation
128125

129-
def start_tool(
130-
self,
131-
input_str: str,
132-
run_id: UUID,
133-
parent_run_id: Optional[UUID] = None,
134-
**attributes,
135-
):
136-
invocation = ToolInvocation(
137-
input_str=input_str,
138-
run_id=run_id,
139-
parent_run_id=parent_run_id,
140-
attributes=attributes,
141-
)
142-
with self._lock:
143-
self._tool_registry[invocation.run_id] = invocation
144-
self._exporter.init_tool(invocation)
145-
146-
def stop_tool(
147-
self, run_id: UUID, output: ToolOutput, **attributes
148-
) -> ToolInvocation:
149-
with self._lock:
150-
invocation = self._tool_registry.pop(run_id)
151-
invocation.end_time = time.time()
152-
invocation.output = output
153-
self._exporter.export_tool(invocation)
154-
return invocation
155-
156-
def fail_tool(
157-
self, run_id: UUID, error: Error, **attributes
158-
) -> ToolInvocation:
159-
with self._lock:
160-
invocation = self._tool_registry.pop(run_id)
161-
invocation.end_time = time.time()
162-
invocation.attributes.update(**attributes)
163-
self._exporter.error_tool(error, invocation)
164-
return invocation
165-
166126

167127
# Singleton accessor
168128
_default_client: TelemetryClient | None = None

util/opentelemetry-util-genai/src/opentelemetry/util/genai/data.py

Lines changed: 1 addition & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,26 +1,4 @@
1-
from dataclasses import dataclass, field
2-
from typing import List
3-
4-
5-
@dataclass
6-
class ToolOutput:
7-
tool_call_id: str
8-
content: str
9-
10-
11-
@dataclass
12-
class ToolFunction:
13-
name: str
14-
description: str
15-
parameters: str
16-
17-
18-
@dataclass
19-
class ToolFunctionCall:
20-
id: str
21-
name: str
22-
arguments: str
23-
type: str
1+
from dataclasses import dataclass
242

253

264
@dataclass
@@ -29,15 +7,13 @@ class Message:
297
type: str
308
name: str
319
tool_call_id: str
32-
tool_function_calls: List[ToolFunctionCall] = field(default_factory=list)
3310

3411

3512
@dataclass
3613
class ChatGeneration:
3714
content: str
3815
type: str
3916
finish_reason: str = None
40-
tool_function_calls: List[ToolFunctionCall] = field(default_factory=list)
4117

4218

4319
@dataclass
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
# Copyright The OpenTelemetry Authors
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from abc import ABC, abstractmethod
16+
17+
from .types import LLMInvocation
18+
19+
20+
class EvaluationResult:
21+
"""
22+
Standardized result for any GenAI evaluation.
23+
"""
24+
25+
def __init__(self, score: float, details: dict = None):
26+
self.score = score
27+
self.details = details or {}
28+
29+
30+
class Evaluator(ABC):
31+
"""
32+
Abstract base: any evaluation backend must implement.
33+
"""
34+
35+
@abstractmethod
36+
def evaluate(self, invocation: LLMInvocation) -> EvaluationResult:
37+
"""
38+
Evaluate a completed LLMInvocation and return a result.
39+
"""
40+
pass
41+
42+
43+
class DeepEvalsEvaluator(Evaluator):
44+
"""
45+
Uses DeepEvals library for LLM-as-judge evaluations.
46+
"""
47+
48+
def __init__(self, config: dict = None):
49+
# e.g. load models, setup API keys
50+
self.config = config or {}
51+
52+
def evaluate(self, invocation: LLMInvocation) -> EvaluationResult:
53+
# stub: integrate with deepevals SDK
54+
# result = deepevals.judge(invocation.prompt, invocation.response, **self.config)
55+
score = 0.0 # placeholder
56+
details = {"method": "deepevals"}
57+
return EvaluationResult(score=score, details=details)
58+
59+
60+
class OpenLitEvaluator(Evaluator):
61+
"""
62+
Uses OpenLit or similar OSS evaluation library.
63+
"""
64+
65+
def __init__(self, config: dict = None):
66+
self.config = config or {}
67+
68+
def evaluate(self, invocation: LLMInvocation) -> EvaluationResult:
69+
# stub: integrate with openlit SDK
70+
score = 0.0 # placeholder
71+
details = {"method": "openlit"}
72+
return EvaluationResult(score=score, details=details)
73+
74+
75+
# Registry for easy lookup
76+
EVALUATORS = {
77+
"deepevals": DeepEvalsEvaluator,
78+
"openlit": OpenLitEvaluator,
79+
}
80+
81+
82+
def get_evaluator(name: str, config: dict = None) -> Evaluator:
83+
"""
84+
Factory: return an evaluator by name.
85+
"""
86+
cls = EVALUATORS.get(name.lower())
87+
if not cls:
88+
raise ValueError(f"Unknown evaluator: {name}")
89+
return cls(config)

0 commit comments

Comments
 (0)