1
+ import time
1
2
from typing import Dict , Union
2
3
from ldclient import Context , LDClient
3
- from ldai .tracking_utils import usage_to_token_metrics
4
- from ldai .types import BedrockTokenUsage , FeedbackKind , TokenUsage , UnderscoreTokenUsage
4
+ from ldai .types import BedrockTokenUsage , FeedbackKind , OpenAITokenUsage , TokenUsage , UnderscoreTokenUsage
5
5
6
6
class LDAIConfigTracker :
7
7
def __init__ (self , ld_client : LDClient , variation_id : str , config_key : str , context : Context ):
@@ -19,23 +19,37 @@ def get_track_data(self):
19
19
def track_duration (self , duration : int ) -> None :
20
20
self .ld_client .track ('$ld:ai:duration:total' , self .context , self .get_track_data (), duration )
21
21
22
- def track_tokens (self , tokens : Union [TokenUsage , UnderscoreTokenUsage , BedrockTokenUsage ]) -> None :
23
- token_metrics = usage_to_token_metrics (tokens )
24
- if token_metrics ['total' ] > 0 :
25
- self .ld_client .track ('$ld:ai:tokens:total' , self .context , self .get_track_data (), token_metrics ['total' ])
26
- if token_metrics ['input' ] > 0 :
27
- self .ld_client .track ('$ld:ai:tokens:input' , self .context , self .get_track_data (), token_metrics ['input' ])
28
- if token_metrics ['output' ] > 0 :
29
- self .ld_client .track ('$ld:ai:tokens:output' , self .context , self .get_track_data (), token_metrics ['output' ])
22
+ def track_duration_of (self , func , * args , ** kwargs ):
23
+ start_time = time .time ()
24
+ result = func (* args , ** kwargs )
25
+ end_time = time .time ()
26
+ duration = int ((end_time - start_time ) * 1000 ) # duration in milliseconds
27
+ self .track_duration (duration )
28
+ return result
30
29
31
30
def track_error (self , error : int ) -> None :
32
31
self .ld_client .track ('$ld:ai:error' , self .context , self .get_track_data (), error )
33
32
34
- def track_generation (self , generation : int ) -> None :
35
- self .ld_client .track ('$ld:ai:generation' , self .context , self .get_track_data (), generation )
36
-
37
33
def track_feedback (self , feedback : Dict [str , FeedbackKind ]) -> None :
38
34
if feedback ['kind' ] == FeedbackKind .Positive :
39
35
self .ld_client .track ('$ld:ai:feedback:user:positive' , self .context , self .get_track_data (), 1 )
40
36
elif feedback ['kind' ] == FeedbackKind .Negative :
41
- self .ld_client .track ('$ld:ai:feedback:user:negative' , self .context , self .get_track_data (), 1 )
37
+ self .ld_client .track ('$ld:ai:feedback:user:negative' , self .context , self .get_track_data (), 1 )
38
+
39
+ def track_generation (self , generation : int ) -> None :
40
+ self .ld_client .track ('$ld:ai:generation' , self .context , self .get_track_data (), generation )
41
+
42
+ def track_openai (self , func , * args , ** kwargs ):
43
+ result = self .track_duration_of (func , * args , ** kwargs )
44
+ if result .usage :
45
+ self .track_tokens (OpenAITokenUsage (result .usage ))
46
+ return result
47
+
48
+ def track_tokens (self , tokens : Union [TokenUsage , UnderscoreTokenUsage , BedrockTokenUsage ]) -> None :
49
+ token_metrics = tokens .to_metrics ()
50
+ if token_metrics ['total' ] > 0 :
51
+ self .ld_client .track ('$ld:ai:tokens:total' , self .context , self .get_track_data (), token_metrics ['total' ])
52
+ if token_metrics ['input' ] > 0 :
53
+ self .ld_client .track ('$ld:ai:tokens:input' , self .context , self .get_track_data (), token_metrics ['input' ])
54
+ if token_metrics ['output' ] > 0 :
55
+ self .ld_client .track ('$ld:ai:tokens:output' , self .context , self .get_track_data (), token_metrics ['output' ])
0 commit comments