Skip to content

Commit f52bf8f

Browse files
author
Daniel OBrien
committed
updates to Python tracking
1 parent 7cb25f4 commit f52bf8f

File tree

3 files changed

+76
-38
lines changed

3 files changed

+76
-38
lines changed

ldai/client.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,19 @@
11
import json
22
from typing import Any, Dict, Optional
33
from ldclient import Context
4-
#from config import LDAIConfig, LDAIConfigTracker
54
from ldclient.client import LDClient
65
import chevron
76

87
from ldai.tracker import LDAIConfigTracker
8+
from ldai.types import AIConfig
99

1010
class LDAIClient:
1111
"""The LaunchDarkly AI SDK client object."""
1212

1313
def __init__(self, client: LDClient):
1414
self.client = client
1515

16-
def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> Any:
16+
def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> AIConfig:
1717
"""Get the value of a model configuration asynchronously.
1818
1919
Args:
@@ -40,8 +40,6 @@ def model_config(self, key: str, context: Context, default_value: str, variables
4040
for entry in variation['prompt']
4141
]
4242

43-
#return detail.value,
44-
4543
return {
4644
'config': variation,
4745
'tracker': LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context)

ldai/tracking_utils.py

Lines changed: 3 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,5 @@
11
from typing import Union
2-
from ldai.types import BedrockTokenUsage, TokenMetrics, TokenUsage, UnderscoreTokenUsage
2+
from ldai.types import BedrockTokenUsage, TokenMetrics, OpenAITokenUsage, UnderscoreTokenUsage
33

4-
def usage_to_token_metrics(usage: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> TokenMetrics:
5-
def get_attr(obj, attr, default=0):
6-
if isinstance(obj, dict):
7-
return obj.get(attr, default)
8-
return getattr(obj, attr, default)
9-
10-
if 'inputTokens' in usage and 'outputTokens' in usage:
11-
# Bedrock usage
12-
return {
13-
'total': get_attr(usage, 'totalTokens'),
14-
'input': get_attr(usage, 'inputTokens'),
15-
'output': get_attr(usage, 'outputTokens'),
16-
}
17-
18-
# OpenAI usage (both camelCase and snake_case)
19-
return {
20-
'total': get_attr(usage, 'total_tokens', get_attr(usage, 'totalTokens', 0)),
21-
'input': get_attr(usage, 'prompt_tokens', get_attr(usage, 'promptTokens', 0)),
22-
'output': get_attr(usage, 'completion_tokens', get_attr(usage, 'completionTokens', 0)),
23-
}
4+
def usage_to_token_metrics(usage: Union[OpenAITokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> TokenMetrics:
5+
return usage.to_metrics()

ldai/types.py

Lines changed: 71 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,32 @@
11
from enum import Enum
2-
from typing import TypedDict
2+
from typing import Callable, TypedDict
3+
from dataclasses import dataclass
4+
5+
@dataclass
6+
class TokenMetrics(TypedDict):
7+
total: int
8+
input: int
9+
output: int # type: ignore
10+
11+
class AIConfigData(TypedDict):
12+
config: dict
13+
prompt: any
14+
_ldMeta: dict
15+
16+
class AITracker(TypedDict):
17+
track_duration: Callable[..., None]
18+
track_tokens: Callable[..., None]
19+
track_error: Callable[..., None]
20+
track_generation: Callable[..., None]
21+
track_feedback: Callable[..., None]
22+
23+
class AIConfig():
24+
def __init__(self, config: AIConfigData, tracker: AITracker):
25+
self._config = config
26+
self._tracker = tracker
27+
28+
config: AIConfigData
29+
tracker: AITracker
330

431
class FeedbackKind(Enum):
532
Positive = "positive"
@@ -10,17 +37,48 @@ class TokenUsage(TypedDict):
1037
prompt_tokens: int
1138
completion_tokens: int
1239

13-
class UnderscoreTokenUsage(TypedDict):
14-
total_tokens: int
15-
prompt_tokens: int
16-
completion_tokens: int
40+
def to_metrics(self):
41+
return {
42+
'total': self['total_tokens'],
43+
'input': self['prompt_tokens'],
44+
'output': self['completion_tokens'],
45+
}
1746

18-
class BedrockTokenUsage(TypedDict):
19-
totalTokens: int
20-
inputTokens: int
21-
outputTokens: int
47+
class OpenAITokenUsage:
48+
def __init__(self, data: any):
49+
self.total_tokens = data.total_tokens
50+
self.prompt_tokens = data.prompt_tokens
51+
self.completion_tokens = data.completion_tokens
2252

23-
class TokenMetrics(TypedDict):
24-
total: int
25-
input: int
26-
output: int # type: ignore
53+
def to_metrics(self) -> TokenMetrics:
54+
return {
55+
'total': self.total_tokens,
56+
'input': self.prompt_tokens,
57+
'output': self.completion_tokens,
58+
}
59+
60+
class UnderscoreTokenUsage:
61+
def __init__(self, data: dict):
62+
self.total_tokens = data.get('total_tokens', 0)
63+
self.prompt_tokens = data.get('prompt_tokens', 0)
64+
self.completion_tokens = data.get('completion_tokens', 0)
65+
66+
def to_metrics(self) -> TokenMetrics:
67+
return {
68+
'total': self.total_tokens,
69+
'input': self.prompt_tokens,
70+
'output': self.completion_tokens,
71+
}
72+
73+
class BedrockTokenUsage:
74+
def __init__(self, data: dict):
75+
self.totalTokens = data.get('totalTokens', 0)
76+
self.inputTokens = data.get('inputTokens', 0)
77+
self.outputTokens = data.get('outputTokens', 0)
78+
79+
def to_metrics(self) -> TokenMetrics:
80+
return {
81+
'total': self.totalTokens,
82+
'input': self.inputTokens,
83+
'output': self.outputTokens,
84+
}

0 commit comments

Comments
 (0)