Skip to content

Commit 676b7d5

Browse files
author
Dan O'Brien
authored
Updates to Python tracking (#3)
* Clean up unneeded comments * Switch token classes to be closer to AI Provider types * Move from `variationDetail` which generates an event each time to `variation`.
2 parents b0fb330 + 6b4f3fc commit 676b7d5

File tree

5 files changed

+105
-62
lines changed

5 files changed

+105
-62
lines changed

ldai/client.py

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,18 @@
1-
import json
21
from typing import Any, Dict, Optional
32
from ldclient import Context
4-
#from config import LDAIConfig, LDAIConfigTracker
53
from ldclient.client import LDClient
64
import chevron
75

86
from ldai.tracker import LDAIConfigTracker
7+
from ldai.types import AIConfig
98

109
class LDAIClient:
1110
"""The LaunchDarkly AI SDK client object."""
1211

1312
def __init__(self, client: LDClient):
1413
self.client = client
1514

16-
def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> Any:
15+
def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> AIConfig:
1716
"""Get the value of a model configuration asynchronously.
1817
1918
Args:
@@ -31,7 +30,6 @@ def model_config(self, key: str, context: Context, default_value: str, variables
3130
if variables:
3231
all_variables.update(variables)
3332

34-
print(variation)
3533
variation['prompt'] = [
3634
{
3735
**entry,
@@ -40,12 +38,7 @@ def model_config(self, key: str, context: Context, default_value: str, variables
4038
for entry in variation['prompt']
4139
]
4240

43-
#return detail.value,
44-
45-
return {
46-
'config': variation,
47-
'tracker': LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context)
48-
}
41+
return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context))
4942

5043
def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str:
5144
"""Interpolate the template with the given variables.

ldai/tracker.py

Lines changed: 28 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1+
import time
12
from typing import Dict, Union
23
from ldclient import Context, LDClient
3-
from ldai.tracking_utils import usage_to_token_metrics
4-
from ldai.types import BedrockTokenUsage, FeedbackKind, TokenUsage, UnderscoreTokenUsage
4+
from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage, UnderscoreTokenUsage
55

66
class LDAIConfigTracker:
77
def __init__(self, ld_client: LDClient, variation_id: str, config_key: str, context: Context):
@@ -19,23 +19,37 @@ def get_track_data(self):
1919
def track_duration(self, duration: int) -> None:
2020
self.ld_client.track('$ld:ai:duration:total', self.context, self.get_track_data(), duration)
2121

22-
def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None:
23-
token_metrics = usage_to_token_metrics(tokens)
24-
if token_metrics['total'] > 0:
25-
self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics['total'])
26-
if token_metrics['input'] > 0:
27-
self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics['input'])
28-
if token_metrics['output'] > 0:
29-
self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics['output'])
22+
def track_duration_of(self, func, *args, **kwargs):
23+
start_time = time.time()
24+
result = func(*args, **kwargs)
25+
end_time = time.time()
26+
duration = int((end_time - start_time) * 1000) # duration in milliseconds
27+
self.track_duration(duration)
28+
return result
3029

3130
def track_error(self, error: int) -> None:
3231
self.ld_client.track('$ld:ai:error', self.context, self.get_track_data(), error)
3332

34-
def track_generation(self, generation: int) -> None:
35-
self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), generation)
36-
3733
def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None:
3834
if feedback['kind'] == FeedbackKind.Positive:
3935
self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.get_track_data(), 1)
4036
elif feedback['kind'] == FeedbackKind.Negative:
41-
self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1)
37+
self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1)
38+
39+
def track_generation(self, generation: int) -> None:
40+
self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), generation)
41+
42+
def track_openai(self, func, *args, **kwargs):
43+
result = self.track_duration_of(func, *args, **kwargs)
44+
if result.usage:
45+
self.track_tokens(OpenAITokenUsage(result.usage))
46+
return result
47+
48+
def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None:
49+
token_metrics = tokens.to_metrics()
50+
if token_metrics['total'] > 0:
51+
self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics['total'])
52+
if token_metrics['input'] > 0:
53+
self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics['input'])
54+
if token_metrics['output'] > 0:
55+
self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics['output'])

ldai/tracking_utils.py

Lines changed: 0 additions & 23 deletions
This file was deleted.

ldai/types.py

Lines changed: 73 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,26 +1,85 @@
11
from enum import Enum
2-
from typing import TypedDict
2+
from typing import Callable
3+
from dataclasses import dataclass
4+
5+
@dataclass
6+
class TokenMetrics():
7+
total: int
8+
input: int
9+
output: int # type: ignore
10+
11+
@dataclass
12+
13+
class AIConfigData():
14+
config: dict
15+
prompt: any
16+
_ldMeta: dict
17+
18+
class AITracker():
19+
track_duration: Callable[..., None]
20+
track_tokens: Callable[..., None]
21+
track_error: Callable[..., None]
22+
track_generation: Callable[..., None]
23+
track_feedback: Callable[..., None]
24+
25+
class AIConfig():
26+
def __init__(self, config: AIConfigData, tracker: AITracker):
27+
self.config = config
28+
self.tracker = tracker
329

430
class FeedbackKind(Enum):
531
Positive = "positive"
632
Negative = "negative"
733

8-
class TokenUsage(TypedDict):
9-
total_tokens: int
10-
prompt_tokens: int
11-
completion_tokens: int
34+
@dataclass
1235

13-
class UnderscoreTokenUsage(TypedDict):
36+
class TokenUsage():
1437
total_tokens: int
1538
prompt_tokens: int
1639
completion_tokens: int
1740

18-
class BedrockTokenUsage(TypedDict):
19-
totalTokens: int
20-
inputTokens: int
21-
outputTokens: int
41+
def to_metrics(self):
42+
return {
43+
'total': self['total_tokens'],
44+
'input': self['prompt_tokens'],
45+
'output': self['completion_tokens'],
46+
}
2247

23-
class TokenMetrics(TypedDict):
24-
total: int
25-
input: int
26-
output: int # type: ignore
48+
class OpenAITokenUsage:
49+
def __init__(self, data: any):
50+
self.total_tokens = data.total_tokens
51+
self.prompt_tokens = data.prompt_tokens
52+
self.completion_tokens = data.completion_tokens
53+
54+
def to_metrics(self) -> TokenMetrics:
55+
return {
56+
'total': self.total_tokens,
57+
'input': self.prompt_tokens,
58+
'output': self.completion_tokens,
59+
}
60+
61+
class UnderscoreTokenUsage:
62+
def __init__(self, data: dict):
63+
self.total_tokens = data.get('total_tokens', 0)
64+
self.prompt_tokens = data.get('prompt_tokens', 0)
65+
self.completion_tokens = data.get('completion_tokens', 0)
66+
67+
def to_metrics(self) -> TokenMetrics:
68+
return {
69+
'total': self.total_tokens,
70+
'input': self.prompt_tokens,
71+
'output': self.completion_tokens,
72+
}
73+
74+
class BedrockTokenUsage:
75+
def __init__(self, data: dict):
76+
self.totalTokens = data.get('totalTokens', 0)
77+
self.inputTokens = data.get('inputTokens', 0)
78+
self.outputTokens = data.get('outputTokens', 0)
79+
80+
def to_metrics(self) -> TokenMetrics:
81+
return {
82+
'total': self.totalTokens,
83+
'input': self.inputTokens,
84+
'output': self.outputTokens,
85+
}

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "launchdarkly-server-sdk-ai"
3-
version = "0.0.14"
3+
version = "0.0.1"
44
description = "LaunchDarkly SDK for AI"
55
authors = ["LaunchDarkly <[email protected]>"]
66
license = "Apache-2.0"

0 commit comments

Comments
 (0)