1
1
import time
2
2
from dataclasses import dataclass
3
3
from enum import Enum
4
- from typing import Dict , Union
4
+ from typing import Dict , Optional , Union
5
5
6
6
from ldclient import Context , LDClient
7
7
@@ -21,7 +21,6 @@ class TokenMetrics:
21
21
output : int # type: ignore
22
22
23
23
24
- @dataclass
25
24
class FeedbackKind (Enum ):
26
25
"""
27
26
Types of feedback that can be provided for AI operations.
@@ -131,6 +130,34 @@ def to_metrics(self) -> TokenMetrics:
131
130
)
132
131
133
132
133
+ class LDAIMetricSummary :
134
+ """
135
+ Summary of metrics which have been tracked.
136
+ """
137
+
138
+ def __init__ (self ):
139
+ self ._duration = None
140
+ self ._success = None
141
+ self ._feedback = None
142
+ self ._usage = None
143
+
144
+ @property
145
+ def duration (self ) -> Optional [int ]:
146
+ return self ._duration
147
+
148
+ @property
149
+ def success (self ) -> Optional [bool ]:
150
+ return self ._success
151
+
152
+ @property
153
+ def feedback (self ) -> Optional [Dict [str , FeedbackKind ]]:
154
+ return self ._feedback
155
+
156
+ @property
157
+ def usage (self ) -> Optional [Union [TokenUsage , BedrockTokenUsage ]]:
158
+ return self ._usage
159
+
160
+
134
161
class LDAIConfigTracker :
135
162
"""
136
163
Tracks configuration and usage metrics for LaunchDarkly AI operations.
@@ -147,10 +174,11 @@ def __init__(
147
174
:param config_key: Configuration key for tracking.
148
175
:param context: Context for evaluation.
149
176
"""
150
- self .ld_client = ld_client
151
- self .variation_key = variation_key
152
- self .config_key = config_key
153
- self .context = context
177
+ self ._ld_client = ld_client
178
+ self ._variation_key = variation_key
179
+ self ._config_key = config_key
180
+ self ._context = context
181
+ self ._summary = LDAIMetricSummary ()
154
182
155
183
def __get_track_data (self ):
156
184
"""
@@ -159,8 +187,8 @@ def __get_track_data(self):
159
187
:return: Dictionary containing variation and config keys.
160
188
"""
161
189
return {
162
- 'variationKey' : self .variation_key ,
163
- 'configKey' : self .config_key ,
190
+ 'variationKey' : self ._variation_key ,
191
+ 'configKey' : self ._config_key ,
164
192
}
165
193
166
194
def track_duration (self , duration : int ) -> None :
@@ -169,8 +197,9 @@ def track_duration(self, duration: int) -> None:
169
197
170
198
:param duration: Duration in milliseconds.
171
199
"""
172
- self .ld_client .track (
173
- '$ld:ai:duration:total' , self .context , self .__get_track_data (), duration
200
+ self ._summary ._duration = duration
201
+ self ._ld_client .track (
202
+ '$ld:ai:duration:total' , self ._context , self .__get_track_data (), duration
174
203
)
175
204
176
205
def track_duration_of (self , func ):
@@ -193,17 +222,18 @@ def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None:
193
222
194
223
:param feedback: Dictionary containing feedback kind.
195
224
"""
225
+ self ._summary ._feedback = feedback
196
226
if feedback ['kind' ] == FeedbackKind .Positive :
197
- self .ld_client .track (
227
+ self ._ld_client .track (
198
228
'$ld:ai:feedback:user:positive' ,
199
- self .context ,
229
+ self ._context ,
200
230
self .__get_track_data (),
201
231
1 ,
202
232
)
203
233
elif feedback ['kind' ] == FeedbackKind .Negative :
204
- self .ld_client .track (
234
+ self ._ld_client .track (
205
235
'$ld:ai:feedback:user:negative' ,
206
- self .context ,
236
+ self ._context ,
207
237
self .__get_track_data (),
208
238
1 ,
209
239
)
@@ -212,8 +242,9 @@ def track_success(self) -> None:
212
242
"""
213
243
Track a successful AI generation.
214
244
"""
215
- self .ld_client .track (
216
- '$ld:ai:generation' , self .context , self .__get_track_data (), 1
245
+ self ._summary ._success = True
246
+ self ._ld_client .track (
247
+ '$ld:ai:generation' , self ._context , self .__get_track_data (), 1
217
248
)
218
249
219
250
def track_openai_metrics (self , func ):
@@ -253,25 +284,34 @@ def track_tokens(self, tokens: Union[TokenUsage, BedrockTokenUsage]) -> None:
253
284
254
285
:param tokens: Token usage data from either custom, OpenAI, or Bedrock sources.
255
286
"""
287
+ self ._summary ._usage = tokens
256
288
token_metrics = tokens .to_metrics ()
257
289
if token_metrics .total > 0 :
258
- self .ld_client .track (
290
+ self ._ld_client .track (
259
291
'$ld:ai:tokens:total' ,
260
- self .context ,
292
+ self ._context ,
261
293
self .__get_track_data (),
262
294
token_metrics .total ,
263
295
)
264
296
if token_metrics .input > 0 :
265
- self .ld_client .track (
297
+ self ._ld_client .track (
266
298
'$ld:ai:tokens:input' ,
267
- self .context ,
299
+ self ._context ,
268
300
self .__get_track_data (),
269
301
token_metrics .input ,
270
302
)
271
303
if token_metrics .output > 0 :
272
- self .ld_client .track (
304
+ self ._ld_client .track (
273
305
'$ld:ai:tokens:output' ,
274
- self .context ,
306
+ self ._context ,
275
307
self .__get_track_data (),
276
308
token_metrics .output ,
277
309
)
310
+
311
+ def get_summary (self ) -> LDAIMetricSummary :
312
+ """
313
+ Get the current summary of AI metrics.
314
+
315
+ :return: Summary of AI metrics.
316
+ """
317
+ return self ._summary
0 commit comments