1
1
import time
2
2
from dataclasses import dataclass
3
3
from enum import Enum
4
- from typing import Dict , Optional , Union
4
+ from typing import Dict , Optional
5
5
6
6
from ldclient import Context , LDClient
7
7
8
8
9
- @dataclass
10
- class TokenMetrics :
11
- """
12
- Metrics for token usage in AI operations.
13
-
14
- :param total: Total number of tokens used.
15
- :param input: Number of input tokens.
16
- :param output: Number of output tokens.
17
- """
18
-
19
- total : int
20
- input : int
21
- output : int # type: ignore
22
-
23
-
24
9
class FeedbackKind (Enum ):
25
10
"""
26
11
Types of feedback that can be provided for AI operations.
@@ -35,99 +20,14 @@ class TokenUsage:
35
20
"""
36
21
Tracks token usage for AI operations.
37
22
38
- :param total_tokens: Total number of tokens used.
39
- :param prompt_tokens: Number of tokens in the prompt.
40
- :param completion_tokens: Number of tokens in the completion.
41
- """
42
-
43
- total_tokens : int
44
- prompt_tokens : int
45
- completion_tokens : int
46
-
47
- def to_metrics (self ):
48
- """
49
- Convert token usage to metrics format.
50
-
51
- :return: Dictionary containing token metrics.
52
- """
53
- return {
54
- 'total' : self ['total_tokens' ],
55
- 'input' : self ['prompt_tokens' ],
56
- 'output' : self ['completion_tokens' ],
57
- }
58
-
59
-
60
- @dataclass
61
- class LDOpenAIUsage :
62
- """
63
- LaunchDarkly-specific OpenAI usage tracking.
64
-
65
- :param total_tokens: Total number of tokens used.
66
- :param prompt_tokens: Number of tokens in the prompt.
67
- :param completion_tokens: Number of tokens in the completion.
68
- """
69
-
70
- total_tokens : int
71
- prompt_tokens : int
72
- completion_tokens : int
73
-
74
-
75
- @dataclass
76
- class OpenAITokenUsage :
77
- """
78
- Tracks OpenAI-specific token usage.
79
- """
80
-
81
- def __init__ (self , data : LDOpenAIUsage ):
82
- """
83
- Initialize OpenAI token usage tracking.
84
-
85
- :param data: OpenAI usage data.
86
- """
87
- self .total_tokens = data .total_tokens
88
- self .prompt_tokens = data .prompt_tokens
89
- self .completion_tokens = data .completion_tokens
90
-
91
- def to_metrics (self ) -> TokenMetrics :
92
- """
93
- Convert OpenAI token usage to metrics format.
94
-
95
- :return: TokenMetrics object containing usage data.
96
- """
97
- return TokenMetrics (
98
- total = self .total_tokens ,
99
- input = self .prompt_tokens ,
100
- output = self .completion_tokens ,
101
- )
102
-
103
-
104
- @dataclass
105
- class BedrockTokenUsage :
106
- """
107
- Tracks AWS Bedrock-specific token usage.
23
+ :param total: Total number of tokens used.
24
+ :param input: Number of tokens in the prompt.
25
+ :param output: Number of tokens in the completion.
108
26
"""
109
27
110
- def __init__ (self , data : dict ):
111
- """
112
- Initialize Bedrock token usage tracking.
113
-
114
- :param data: Dictionary containing Bedrock usage data.
115
- """
116
- self .totalTokens = data .get ('totalTokens' , 0 )
117
- self .inputTokens = data .get ('inputTokens' , 0 )
118
- self .outputTokens = data .get ('outputTokens' , 0 )
119
-
120
- def to_metrics (self ) -> TokenMetrics :
121
- """
122
- Convert Bedrock token usage to metrics format.
123
-
124
- :return: TokenMetrics object containing usage data.
125
- """
126
- return TokenMetrics (
127
- total = self .totalTokens ,
128
- input = self .inputTokens ,
129
- output = self .outputTokens ,
130
- )
28
+ total : int
29
+ input : int
30
+ output : int
131
31
132
32
133
33
class LDAIMetricSummary :
@@ -154,7 +54,7 @@ def feedback(self) -> Optional[Dict[str, FeedbackKind]]:
154
54
return self ._feedback
155
55
156
56
@property
157
- def usage (self ) -> Optional [Union [ TokenUsage , BedrockTokenUsage ] ]:
57
+ def usage (self ) -> Optional [TokenUsage ]:
158
58
return self ._usage
159
59
160
60
@@ -255,8 +155,8 @@ def track_openai_metrics(self, func):
255
155
:return: Result of the tracked function.
256
156
"""
257
157
result = self .track_duration_of (func )
258
- if result .usage :
259
- self .track_tokens (OpenAITokenUsage (result .usage ))
158
+ if hasattr ( result , 'usage' ) and hasattr ( result .usage , 'to_dict' ) :
159
+ self .track_tokens (_openai_to_token_usage (result .usage . to_dict () ))
260
160
return result
261
161
262
162
def track_bedrock_converse_metrics (self , res : dict ) -> dict :
@@ -275,37 +175,36 @@ def track_bedrock_converse_metrics(self, res: dict) -> dict:
275
175
if res .get ('metrics' , {}).get ('latencyMs' ):
276
176
self .track_duration (res ['metrics' ]['latencyMs' ])
277
177
if res .get ('usage' ):
278
- self .track_tokens (BedrockTokenUsage (res ['usage' ]))
178
+ self .track_tokens (_bedrock_to_token_usage (res ['usage' ]))
279
179
return res
280
180
281
- def track_tokens (self , tokens : Union [ TokenUsage , BedrockTokenUsage ] ) -> None :
181
+ def track_tokens (self , tokens : TokenUsage ) -> None :
282
182
"""
283
183
Track token usage metrics.
284
184
285
185
:param tokens: Token usage data from either custom, OpenAI, or Bedrock sources.
286
186
"""
287
187
self ._summary ._usage = tokens
288
- token_metrics = tokens .to_metrics ()
289
- if token_metrics .total > 0 :
188
+ if tokens .total > 0 :
290
189
self ._ld_client .track (
291
190
'$ld:ai:tokens:total' ,
292
191
self ._context ,
293
192
self .__get_track_data (),
294
- token_metrics .total ,
193
+ tokens .total ,
295
194
)
296
- if token_metrics .input > 0 :
195
+ if tokens .input > 0 :
297
196
self ._ld_client .track (
298
197
'$ld:ai:tokens:input' ,
299
198
self ._context ,
300
199
self .__get_track_data (),
301
- token_metrics .input ,
200
+ tokens .input ,
302
201
)
303
- if token_metrics .output > 0 :
202
+ if tokens .output > 0 :
304
203
self ._ld_client .track (
305
204
'$ld:ai:tokens:output' ,
306
205
self ._context ,
307
206
self .__get_track_data (),
308
- token_metrics .output ,
207
+ tokens .output ,
309
208
)
310
209
311
210
def get_summary (self ) -> LDAIMetricSummary :
@@ -315,3 +214,31 @@ def get_summary(self) -> LDAIMetricSummary:
315
214
:return: Summary of AI metrics.
316
215
"""
317
216
return self ._summary
217
+
218
+
219
+ def _bedrock_to_token_usage (data : dict ) -> TokenUsage :
220
+ """
221
+ Convert a Bedrock usage dictionary to a TokenUsage object.
222
+
223
+ :param data: Dictionary containing Bedrock usage data.
224
+ :return: TokenUsage object containing usage data.
225
+ """
226
+ return TokenUsage (
227
+ total = data .get ('totalTokens' , 0 ),
228
+ input = data .get ('inputTokens' , 0 ),
229
+ output = data .get ('outputTokens' , 0 ),
230
+ )
231
+
232
+
233
+ def _openai_to_token_usage (data : dict ) -> TokenUsage :
234
+ """
235
+ Convert an OpenAI usage dictionary to a TokenUsage object.
236
+
237
+ :param data: Dictionary containing OpenAI usage data.
238
+ :return: TokenUsage object containing usage data.
239
+ """
240
+ return TokenUsage (
241
+ total = data .get ('total_tokens' , 0 ),
242
+ input = data .get ('prompt_tokens' , 0 ),
243
+ output = data .get ('completion_tokens' , 0 ),
244
+ )
0 commit comments