Skip to content

Commit 780fa15

Browse files
author
Daniel OBrien
committed
align Python with most recent TypeScript changes
1 parent 676b7d5 commit 780fa15

File tree

3 files changed

+23
-12
lines changed

3 files changed

+23
-12
lines changed

ldai/client.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,8 @@ def model_config(self, key: str, context: Context, default_value: str, variables
3838
for entry in variation['prompt']
3939
]
4040

41-
return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context))
41+
enabled = ['_ldMeta'].get('enabled')
42+
return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['versionKey'], key, context, bool(enabled)))
4243

4344
def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str:
4445
"""Interpolate the template with the given variables.

ldai/tracker.py

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,15 @@
44
from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage, UnderscoreTokenUsage
55

66
class LDAIConfigTracker:
7-
def __init__(self, ld_client: LDClient, variation_id: str, config_key: str, context: Context):
7+
def __init__(self, ld_client: LDClient, version_key: str, config_key: str, context: Context):
88
self.ld_client = ld_client
9-
self.variation_id = variation_id
9+
self.version_key = version_key
1010
self.config_key = config_key
1111
self.context = context
1212

1313
def get_track_data(self):
1414
return {
15-
'variationId': self.variation_id,
15+
'versionKey': self.version_key,
1616
'configKey': self.config_key,
1717
}
1818

@@ -27,24 +27,33 @@ def track_duration_of(self, func, *args, **kwargs):
2727
self.track_duration(duration)
2828
return result
2929

30-
def track_error(self, error: int) -> None:
31-
self.ld_client.track('$ld:ai:error', self.context, self.get_track_data(), error)
32-
3330
def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None:
3431
if feedback['kind'] == FeedbackKind.Positive:
3532
self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.get_track_data(), 1)
3633
elif feedback['kind'] == FeedbackKind.Negative:
3734
self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1)
3835

39-
def track_generation(self, generation: int) -> None:
40-
self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), generation)
36+
def track_success(self) -> None:
37+
self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), 1)
4138

4239
def track_openai(self, func, *args, **kwargs):
4340
result = self.track_duration_of(func, *args, **kwargs)
4441
if result.usage:
4542
self.track_tokens(OpenAITokenUsage(result.usage))
4643
return result
4744

45+
def track_bedrock_converse(self, res: dict) -> dict:
46+
if res.get('$metadata', {}).get('httpStatusCode') == 200:
47+
self.track_success()
48+
elif res.get('$metadata', {}).get('httpStatusCode') and res['$metadata']['httpStatusCode'] >= 400:
49+
# Potentially add error tracking in the future.
50+
pass
51+
if res.get('metrics', {}).get('latencyMs'):
52+
self.track_duration(res['metrics']['latencyMs'])
53+
if res.get('usage'):
54+
self.track_tokens(BedrockTokenUsage(res['usage']))
55+
return res
56+
4857
def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None:
4958
token_metrics = tokens.to_metrics()
5059
if token_metrics['total'] > 0:

ldai/types.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,16 +23,16 @@ class AITracker():
2323
track_feedback: Callable[..., None]
2424

2525
class AIConfig():
26-
def __init__(self, config: AIConfigData, tracker: AITracker):
26+
def __init__(self, config: AIConfigData, tracker: AITracker, enabled: bool):
2727
self.config = config
2828
self.tracker = tracker
29+
self.enabled = enabled
2930

31+
@dataclass
3032
class FeedbackKind(Enum):
3133
Positive = "positive"
3234
Negative = "negative"
3335

34-
@dataclass
35-
3636
class TokenUsage():
3737
total_tokens: int
3838
prompt_tokens: int
@@ -45,6 +45,7 @@ def to_metrics(self):
4545
'output': self['completion_tokens'],
4646
}
4747

48+
@dataclass
4849
class OpenAITokenUsage:
4950
def __init__(self, data: any):
5051
self.total_tokens = data.total_tokens

0 commit comments

Comments
 (0)