Skip to content

Commit 23e286f

Browse files
committed
feat: Add LDAIConfigTracker.get_summary method
1 parent fcc720a commit 23e286f

File tree

3 files changed

+162
-25
lines changed

3 files changed

+162
-25
lines changed

ldai/client.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ class LDAIClient:
129129
"""The LaunchDarkly AI SDK client object."""
130130

131131
def __init__(self, client: LDClient):
132-
self.client = client
132+
self._client = client
133133

134134
def config(
135135
self,
@@ -147,7 +147,7 @@ def config(
147147
:param variables: Additional variables for the model configuration.
148148
:return: The value of the model configuration along with a tracker used for gathering metrics.
149149
"""
150-
variation = self.client.variation(key, context, default_value.to_dict())
150+
variation = self._client.variation(key, context, default_value.to_dict())
151151

152152
all_variables = {}
153153
if variables:
@@ -184,7 +184,7 @@ def config(
184184
)
185185

186186
tracker = LDAIConfigTracker(
187-
self.client,
187+
self._client,
188188
variation.get('_ldMeta', {}).get('variationKey', ''),
189189
key,
190190
context,

ldai/testing/test_tracker.py

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
from unittest.mock import MagicMock
2+
3+
import pytest
4+
from ldclient import Config, Context, LDClient
5+
from ldclient.integrations.test_data import TestData
6+
7+
from ldai.tracker import FeedbackKind, LDAIConfigTracker
8+
9+
10+
@pytest.fixture
11+
def td() -> TestData:
12+
td = TestData.data_source()
13+
td.update(
14+
td.flag('model-config')
15+
.variations(
16+
{
17+
'model': {'name': 'fakeModel', 'parameters': {'temperature': 0.5, 'maxTokens': 4096}, 'custom': {'extra-attribute': 'value'}},
18+
'provider': {'name': 'fakeProvider'},
19+
'messages': [{'role': 'system', 'content': 'Hello, {{name}}!'}],
20+
'_ldMeta': {'enabled': True, 'variationKey': 'abcd'},
21+
},
22+
"green",
23+
)
24+
.variation_for_all(0)
25+
)
26+
27+
return td
28+
29+
30+
@pytest.fixture
31+
def client(td: TestData) -> LDClient:
32+
config = Config('sdk-key', update_processor_class=td, send_events=False)
33+
client = LDClient(config=config)
34+
client.track = MagicMock() # type: ignore
35+
return client
36+
37+
38+
def test_summary_starts_empty(client: LDClient):
39+
context = Context.create('user-key')
40+
tracker = LDAIConfigTracker(client, "variation-key", "config-key", context)
41+
42+
assert tracker.get_summary().duration is None
43+
assert tracker.get_summary().feedback is None
44+
assert tracker.get_summary().success is None
45+
assert tracker.get_summary().usage is None
46+
47+
48+
def test_tracks_duration(client: LDClient):
49+
context = Context.create('user-key')
50+
tracker = LDAIConfigTracker(client, "variation-key", "config-key", context)
51+
tracker.track_duration(100)
52+
53+
client.track.assert_called_with( # type: ignore
54+
'$ld:ai:duration:total',
55+
context,
56+
{'variationKey': 'variation-key', 'configKey': 'config-key'},
57+
100
58+
)
59+
60+
assert tracker.get_summary().duration == 100
61+
62+
63+
@pytest.mark.parametrize(
64+
"kind,label",
65+
[
66+
pytest.param(FeedbackKind.Positive, "positive", id="positive"),
67+
pytest.param(FeedbackKind.Negative, "negative", id="negative"),
68+
],
69+
)
70+
def test_tracks_feedback(client: LDClient, kind: FeedbackKind, label: str):
71+
context = Context.create('user-key')
72+
tracker = LDAIConfigTracker(client, "variation-key", "config-key", context)
73+
74+
tracker.track_feedback({'kind': kind})
75+
76+
client.track.assert_called_with( # type: ignore
77+
f'$ld:ai:feedback:user:{label}',
78+
context,
79+
{'variationKey': 'variation-key', 'configKey': 'config-key'},
80+
1
81+
)
82+
assert tracker.get_summary().feedback == {'kind': kind}
83+
84+
85+
def test_tracks_success(client: LDClient):
86+
context = Context.create('user-key')
87+
tracker = LDAIConfigTracker(client, "variation-key", "config-key", context)
88+
tracker.track_success()
89+
90+
client.track.assert_called_with( # type: ignore
91+
'$ld:ai:generation',
92+
context,
93+
{'variationKey': 'variation-key', 'configKey': 'config-key'},
94+
1
95+
)
96+
97+
assert tracker.get_summary().success is True

ldai/tracker.py

Lines changed: 62 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import time
22
from dataclasses import dataclass
33
from enum import Enum
4-
from typing import Dict, Union
4+
from typing import Dict, Optional, Union
55

66
from ldclient import Context, LDClient
77

@@ -21,7 +21,6 @@ class TokenMetrics:
2121
output: int # type: ignore
2222

2323

24-
@dataclass
2524
class FeedbackKind(Enum):
2625
"""
2726
Types of feedback that can be provided for AI operations.
@@ -131,6 +130,34 @@ def to_metrics(self) -> TokenMetrics:
131130
)
132131

133132

133+
class LDAIMetricSummary:
134+
"""
135+
Summary of metrics which have been tracked.
136+
"""
137+
138+
def __init__(self):
139+
self._duration = None
140+
self._success = None
141+
self._feedback = None
142+
self._usage = None
143+
144+
@property
145+
def duration(self) -> Optional[int]:
146+
return self._duration
147+
148+
@property
149+
def success(self) -> Optional[bool]:
150+
return self._success
151+
152+
@property
153+
def feedback(self) -> Optional[Dict[str, FeedbackKind]]:
154+
return self._feedback
155+
156+
@property
157+
def usage(self) -> Optional[Union[TokenUsage, BedrockTokenUsage]]:
158+
return self._usage
159+
160+
134161
class LDAIConfigTracker:
135162
"""
136163
Tracks configuration and usage metrics for LaunchDarkly AI operations.
@@ -147,10 +174,11 @@ def __init__(
147174
:param config_key: Configuration key for tracking.
148175
:param context: Context for evaluation.
149176
"""
150-
self.ld_client = ld_client
151-
self.variation_key = variation_key
152-
self.config_key = config_key
153-
self.context = context
177+
self._ld_client = ld_client
178+
self._variation_key = variation_key
179+
self._config_key = config_key
180+
self._context = context
181+
self._summary = LDAIMetricSummary()
154182

155183
def __get_track_data(self):
156184
"""
@@ -159,8 +187,8 @@ def __get_track_data(self):
159187
:return: Dictionary containing variation and config keys.
160188
"""
161189
return {
162-
'variationKey': self.variation_key,
163-
'configKey': self.config_key,
190+
'variationKey': self._variation_key,
191+
'configKey': self._config_key,
164192
}
165193

166194
def track_duration(self, duration: int) -> None:
@@ -169,8 +197,9 @@ def track_duration(self, duration: int) -> None:
169197
170198
:param duration: Duration in milliseconds.
171199
"""
172-
self.ld_client.track(
173-
'$ld:ai:duration:total', self.context, self.__get_track_data(), duration
200+
self._summary._duration = duration
201+
self._ld_client.track(
202+
'$ld:ai:duration:total', self._context, self.__get_track_data(), duration
174203
)
175204

176205
def track_duration_of(self, func):
@@ -193,17 +222,18 @@ def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None:
193222
194223
:param feedback: Dictionary containing feedback kind.
195224
"""
225+
self._summary._feedback = feedback
196226
if feedback['kind'] == FeedbackKind.Positive:
197-
self.ld_client.track(
227+
self._ld_client.track(
198228
'$ld:ai:feedback:user:positive',
199-
self.context,
229+
self._context,
200230
self.__get_track_data(),
201231
1,
202232
)
203233
elif feedback['kind'] == FeedbackKind.Negative:
204-
self.ld_client.track(
234+
self._ld_client.track(
205235
'$ld:ai:feedback:user:negative',
206-
self.context,
236+
self._context,
207237
self.__get_track_data(),
208238
1,
209239
)
@@ -212,8 +242,9 @@ def track_success(self) -> None:
212242
"""
213243
Track a successful AI generation.
214244
"""
215-
self.ld_client.track(
216-
'$ld:ai:generation', self.context, self.__get_track_data(), 1
245+
self._summary._success = True
246+
self._ld_client.track(
247+
'$ld:ai:generation', self._context, self.__get_track_data(), 1
217248
)
218249

219250
def track_openai_metrics(self, func):
@@ -253,25 +284,34 @@ def track_tokens(self, tokens: Union[TokenUsage, BedrockTokenUsage]) -> None:
253284
254285
:param tokens: Token usage data from either custom, OpenAI, or Bedrock sources.
255286
"""
287+
self._summary._usage = tokens
256288
token_metrics = tokens.to_metrics()
257289
if token_metrics.total > 0:
258-
self.ld_client.track(
290+
self._ld_client.track(
259291
'$ld:ai:tokens:total',
260-
self.context,
292+
self._context,
261293
self.__get_track_data(),
262294
token_metrics.total,
263295
)
264296
if token_metrics.input > 0:
265-
self.ld_client.track(
297+
self._ld_client.track(
266298
'$ld:ai:tokens:input',
267-
self.context,
299+
self._context,
268300
self.__get_track_data(),
269301
token_metrics.input,
270302
)
271303
if token_metrics.output > 0:
272-
self.ld_client.track(
304+
self._ld_client.track(
273305
'$ld:ai:tokens:output',
274-
self.context,
306+
self._context,
275307
self.__get_track_data(),
276308
token_metrics.output,
277309
)
310+
311+
def get_summary(self) -> LDAIMetricSummary:
312+
"""
313+
Get the current summary of AI metrics.
314+
315+
:return: Summary of AI metrics.
316+
"""
317+
return self._summary

0 commit comments

Comments
 (0)