11import time
22from dataclasses import dataclass
33from enum import Enum
4- from typing import Dict , Union
4+ from typing import Dict , Optional , Union
55
66from ldclient import Context , LDClient
77
@@ -21,7 +21,6 @@ class TokenMetrics:
2121 output : int # type: ignore
2222
2323
24- @dataclass
2524class FeedbackKind (Enum ):
2625 """
2726 Types of feedback that can be provided for AI operations.
@@ -131,6 +130,34 @@ def to_metrics(self) -> TokenMetrics:
131130 )
132131
133132
133+ class LDAIMetricSummary :
134+ """
135+ Summary of metrics which have been tracked.
136+ """
137+
138+ def __init__ (self ):
139+ self ._duration = None
140+ self ._success = None
141+ self ._feedback = None
142+ self ._usage = None
143+
144+ @property
145+ def duration (self ) -> Optional [int ]:
146+ return self ._duration
147+
148+ @property
149+ def success (self ) -> Optional [bool ]:
150+ return self ._success
151+
152+ @property
153+ def feedback (self ) -> Optional [Dict [str , FeedbackKind ]]:
154+ return self ._feedback
155+
156+ @property
157+ def usage (self ) -> Optional [Union [TokenUsage , BedrockTokenUsage ]]:
158+ return self ._usage
159+
160+
134161class LDAIConfigTracker :
135162 """
136163 Tracks configuration and usage metrics for LaunchDarkly AI operations.
@@ -147,10 +174,11 @@ def __init__(
147174 :param config_key: Configuration key for tracking.
148175 :param context: Context for evaluation.
149176 """
150- self .ld_client = ld_client
151- self .variation_key = variation_key
152- self .config_key = config_key
153- self .context = context
177+ self ._ld_client = ld_client
178+ self ._variation_key = variation_key
179+ self ._config_key = config_key
180+ self ._context = context
181+ self ._summary = LDAIMetricSummary ()
154182
155183 def __get_track_data (self ):
156184 """
@@ -159,8 +187,8 @@ def __get_track_data(self):
159187 :return: Dictionary containing variation and config keys.
160188 """
161189 return {
162- 'variationKey' : self .variation_key ,
163- 'configKey' : self .config_key ,
190+ 'variationKey' : self ._variation_key ,
191+ 'configKey' : self ._config_key ,
164192 }
165193
166194 def track_duration (self , duration : int ) -> None :
@@ -169,8 +197,9 @@ def track_duration(self, duration: int) -> None:
169197
170198 :param duration: Duration in milliseconds.
171199 """
172- self .ld_client .track (
173- '$ld:ai:duration:total' , self .context , self .__get_track_data (), duration
200+ self ._summary ._duration = duration
201+ self ._ld_client .track (
202+ '$ld:ai:duration:total' , self ._context , self .__get_track_data (), duration
174203 )
175204
176205 def track_duration_of (self , func ):
@@ -193,17 +222,18 @@ def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None:
193222
194223 :param feedback: Dictionary containing feedback kind.
195224 """
225+ self ._summary ._feedback = feedback
196226 if feedback ['kind' ] == FeedbackKind .Positive :
197- self .ld_client .track (
227+ self ._ld_client .track (
198228 '$ld:ai:feedback:user:positive' ,
199- self .context ,
229+ self ._context ,
200230 self .__get_track_data (),
201231 1 ,
202232 )
203233 elif feedback ['kind' ] == FeedbackKind .Negative :
204- self .ld_client .track (
234+ self ._ld_client .track (
205235 '$ld:ai:feedback:user:negative' ,
206- self .context ,
236+ self ._context ,
207237 self .__get_track_data (),
208238 1 ,
209239 )
@@ -212,8 +242,9 @@ def track_success(self) -> None:
212242 """
213243 Track a successful AI generation.
214244 """
215- self .ld_client .track (
216- '$ld:ai:generation' , self .context , self .__get_track_data (), 1
245+ self ._summary ._success = True
246+ self ._ld_client .track (
247+ '$ld:ai:generation' , self ._context , self .__get_track_data (), 1
217248 )
218249
219250 def track_openai_metrics (self , func ):
@@ -253,25 +284,34 @@ def track_tokens(self, tokens: Union[TokenUsage, BedrockTokenUsage]) -> None:
253284
254285 :param tokens: Token usage data from either custom, OpenAI, or Bedrock sources.
255286 """
287+ self ._summary ._usage = tokens
256288 token_metrics = tokens .to_metrics ()
257289 if token_metrics .total > 0 :
258- self .ld_client .track (
290+ self ._ld_client .track (
259291 '$ld:ai:tokens:total' ,
260- self .context ,
292+ self ._context ,
261293 self .__get_track_data (),
262294 token_metrics .total ,
263295 )
264296 if token_metrics .input > 0 :
265- self .ld_client .track (
297+ self ._ld_client .track (
266298 '$ld:ai:tokens:input' ,
267- self .context ,
299+ self ._context ,
268300 self .__get_track_data (),
269301 token_metrics .input ,
270302 )
271303 if token_metrics .output > 0 :
272- self .ld_client .track (
304+ self ._ld_client .track (
273305 '$ld:ai:tokens:output' ,
274- self .context ,
306+ self ._context ,
275307 self .__get_track_data (),
276308 token_metrics .output ,
277309 )
310+
311+ def get_summary (self ) -> LDAIMetricSummary :
312+ """
313+ Get the current summary of AI metrics.
314+
315+ :return: Summary of AI metrics.
316+ """
317+ return self ._summary
0 commit comments