6
6
7
7
@dataclass
8
8
class TokenMetrics ():
9
+ """
10
+ Metrics for token usage in AI operations.
11
+
12
+ :param total: Total number of tokens used.
13
+ :param input: Number of input tokens.
14
+ :param output: Number of output tokens.
15
+ """
9
16
total : int
10
17
input : int
11
18
output : int # type: ignore
12
19
13
20
@dataclass
14
21
class FeedbackKind (Enum ):
22
+ """
23
+ Types of feedback that can be provided for AI operations.
24
+ """
15
25
Positive = "positive"
16
26
Negative = "negative"
17
27
18
28
@dataclass
19
29
class TokenUsage ():
30
+ """
31
+ Tracks token usage for AI operations.
32
+
33
+ :param total_tokens: Total number of tokens used.
34
+ :param prompt_tokens: Number of tokens in the prompt.
35
+ :param completion_tokens: Number of tokens in the completion.
36
+ """
20
37
total_tokens : int
21
38
prompt_tokens : int
22
39
completion_tokens : int
23
40
24
41
def to_metrics (self ):
42
+ """
43
+ Convert token usage to metrics format.
44
+
45
+ :return: Dictionary containing token metrics.
46
+ """
25
47
return {
26
48
'total' : self ['total_tokens' ],
27
49
'input' : self ['prompt_tokens' ],
@@ -30,18 +52,38 @@ def to_metrics(self):
30
52
31
53
@dataclass
32
54
class LDOpenAIUsage ():
55
+ """
56
+ LaunchDarkly-specific OpenAI usage tracking.
57
+
58
+ :param total_tokens: Total number of tokens used.
59
+ :param prompt_tokens: Number of tokens in the prompt.
60
+ :param completion_tokens: Number of tokens in the completion.
61
+ """
33
62
total_tokens : int
34
63
prompt_tokens : int
35
64
completion_tokens : int
36
65
37
66
@dataclass
38
67
class OpenAITokenUsage :
68
+ """
69
+ Tracks OpenAI-specific token usage.
70
+ """
39
71
def __init__ (self , data : LDOpenAIUsage ):
72
+ """
73
+ Initialize OpenAI token usage tracking.
74
+
75
+ :param data: OpenAI usage data.
76
+ """
40
77
self .total_tokens = data .total_tokens
41
78
self .prompt_tokens = data .prompt_tokens
42
79
self .completion_tokens = data .completion_tokens
43
80
44
81
def to_metrics (self ) -> TokenMetrics :
82
+ """
83
+ Convert OpenAI token usage to metrics format.
84
+
85
+ :return: TokenMetrics object containing usage data.
86
+ """
45
87
return TokenMetrics (
46
88
total = self .total_tokens ,
47
89
input = self .prompt_tokens ,
@@ -50,35 +92,75 @@ def to_metrics(self) -> TokenMetrics:
50
92
51
93
@dataclass
52
94
class BedrockTokenUsage :
95
+ """
96
+ Tracks AWS Bedrock-specific token usage.
97
+ """
53
98
def __init__ (self , data : dict ):
99
+ """
100
+ Initialize Bedrock token usage tracking.
101
+
102
+ :param data: Dictionary containing Bedrock usage data.
103
+ """
54
104
self .totalTokens = data .get ('totalTokens' , 0 )
55
105
self .inputTokens = data .get ('inputTokens' , 0 )
56
106
self .outputTokens = data .get ('outputTokens' , 0 )
57
107
58
108
def to_metrics (self ) -> TokenMetrics :
109
+ """
110
+ Convert Bedrock token usage to metrics format.
111
+
112
+ :return: TokenMetrics object containing usage data.
113
+ """
59
114
return TokenMetrics (
60
115
total = self .totalTokens ,
61
116
input = self .inputTokens ,
62
117
output = self .outputTokens ,
63
118
)
64
119
65
120
class LDAIConfigTracker :
121
+ """
122
+ Tracks configuration and usage metrics for LaunchDarkly AI operations.
123
+ """
66
124
def __init__ (self , ld_client : LDClient , version_key : str , config_key : str , context : Context ):
125
+ """
126
+ Initialize an AI configuration tracker.
127
+
128
+ :param ld_client: LaunchDarkly client instance.
129
+ :param version_key: Version key for tracking.
130
+ :param config_key: Configuration key for tracking.
131
+ :param context: Context for evaluation.
132
+ """
67
133
self .ld_client = ld_client
68
134
self .version_key = version_key
69
135
self .config_key = config_key
70
136
self .context = context
71
137
72
138
def get_track_data (self ):
139
+ """
140
+ Get tracking data for events.
141
+
142
+ :return: Dictionary containing version and config keys.
143
+ """
73
144
return {
74
145
'versionKey' : self .version_key ,
75
146
'configKey' : self .config_key ,
76
147
}
77
148
78
149
def track_duration (self , duration : int ) -> None :
150
+ """
151
+ Track the duration of an AI operation.
152
+
153
+ :param duration: Duration in milliseconds.
154
+ """
79
155
self .ld_client .track ('$ld:ai:duration:total' , self .context , self .get_track_data (), duration )
80
156
81
157
def track_duration_of (self , func ):
158
+ """
159
+ Track the duration of a function execution.
160
+
161
+ :param func: Function to track.
162
+ :return: Result of the tracked function.
163
+ """
82
164
start_time = time .time ()
83
165
result = func ()
84
166
end_time = time .time ()
@@ -87,21 +169,41 @@ def track_duration_of(self, func):
87
169
return result
88
170
89
171
def track_feedback (self , feedback : Dict [str , FeedbackKind ]) -> None :
172
+ """
173
+ Track user feedback for an AI operation.
174
+
175
+ :param feedback: Dictionary containing feedback kind.
176
+ """
90
177
if feedback ['kind' ] == FeedbackKind .Positive :
91
178
self .ld_client .track ('$ld:ai:feedback:user:positive' , self .context , self .get_track_data (), 1 )
92
179
elif feedback ['kind' ] == FeedbackKind .Negative :
93
180
self .ld_client .track ('$ld:ai:feedback:user:negative' , self .context , self .get_track_data (), 1 )
94
181
95
182
def track_success (self ) -> None :
183
+ """
184
+ Track a successful AI generation.
185
+ """
96
186
self .ld_client .track ('$ld:ai:generation' , self .context , self .get_track_data (), 1 )
97
187
98
188
def track_openai (self , func ):
189
+ """
190
+ Track OpenAI-specific operations.
191
+
192
+ :param func: Function to track.
193
+ :return: Result of the tracked function.
194
+ """
99
195
result = self .track_duration_of (func )
100
196
if result .usage :
101
197
self .track_tokens (OpenAITokenUsage (result .usage ))
102
198
return result
103
199
104
- def track_bedrock_converse (self , res : dict ) -> dict :
200
+ def track_bedrock_converse (self , res : dict ) -> dict :
201
+ """
202
+ Track AWS Bedrock conversation operations.
203
+
204
+ :param res: Response dictionary from Bedrock.
205
+ :return: The original response dictionary.
206
+ """
105
207
status_code = res .get ('$metadata' , {}).get ('httpStatusCode' , 0 )
106
208
if status_code == 200 :
107
209
self .track_success ()
@@ -115,10 +217,15 @@ def track_bedrock_converse(self, res: dict) -> dict:
115
217
return res
116
218
117
219
def track_tokens (self , tokens : Union [TokenUsage , BedrockTokenUsage ]) -> None :
220
+ """
221
+ Track token usage metrics.
222
+
223
+ :param tokens: Token usage data from either custom, OpenAI, or Bedrock sources.
224
+ """
118
225
token_metrics = tokens .to_metrics ()
119
226
if token_metrics .total > 0 :
120
227
self .ld_client .track ('$ld:ai:tokens:total' , self .context , self .get_track_data (), token_metrics .total )
121
228
if token_metrics .input > 0 :
122
229
self .ld_client .track ('$ld:ai:tokens:input' , self .context , self .get_track_data (), token_metrics .input )
123
230
if token_metrics .output > 0 :
124
- self .ld_client .track ('$ld:ai:tokens:output' , self .context , self .get_track_data (), token_metrics .output )
231
+ self .ld_client .track ('$ld:ai:tokens:output' , self .context , self .get_track_data (), token_metrics .output )
0 commit comments