Skip to content

Commit 7cb25f4

Browse files
author
Daniel OBrien
committed
basic functionality
1 parent 015ba17 commit 7cb25f4

File tree

8 files changed

+152
-100
lines changed

8 files changed

+152
-100
lines changed
File renamed without changes.

ldai/client.py

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
import json
2+
from typing import Any, Dict, Optional
3+
from ldclient import Context
4+
#from config import LDAIConfig, LDAIConfigTracker
5+
from ldclient.client import LDClient
6+
import chevron
7+
8+
from ldai.tracker import LDAIConfigTracker
9+
10+
class LDAIClient:
11+
"""The LaunchDarkly AI SDK client object."""
12+
13+
def __init__(self, client: LDClient):
14+
self.client = client
15+
16+
def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> Any:
17+
"""Get the value of a model configuration asynchronously.
18+
19+
Args:
20+
key: The key of the model configuration.
21+
context: The context to evaluate the model configuration in.
22+
default_value: The default value of the model configuration.
23+
variables: Additional variables for the model configuration.
24+
25+
Returns:
26+
The value of the model configuration.
27+
"""
28+
variation = self.client.variation(key, context, default_value)
29+
30+
all_variables = {'ldctx': context}
31+
if variables:
32+
all_variables.update(variables)
33+
34+
print(variation)
35+
variation['prompt'] = [
36+
{
37+
**entry,
38+
'content': self.interpolate_template(entry['content'], all_variables)
39+
}
40+
for entry in variation['prompt']
41+
]
42+
43+
#return detail.value,
44+
45+
return {
46+
'config': variation,
47+
'tracker': LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context)
48+
}
49+
50+
def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str:
51+
"""Interpolate the template with the given variables.
52+
53+
Args:
54+
template: The template string.
55+
variables: The variables to interpolate into the template.
56+
57+
Returns:
58+
The interpolated string.
59+
"""
60+
return chevron.render(template, variables)
File renamed without changes.

ldai/tracker.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
from typing import Dict, Union
2+
from ldclient import Context, LDClient
3+
from ldai.tracking_utils import usage_to_token_metrics
4+
from ldai.types import BedrockTokenUsage, FeedbackKind, TokenUsage, UnderscoreTokenUsage
5+
6+
class LDAIConfigTracker:
7+
def __init__(self, ld_client: LDClient, variation_id: str, config_key: str, context: Context):
8+
self.ld_client = ld_client
9+
self.variation_id = variation_id
10+
self.config_key = config_key
11+
self.context = context
12+
13+
def get_track_data(self):
14+
return {
15+
'variationId': self.variation_id,
16+
'configKey': self.config_key,
17+
}
18+
19+
def track_duration(self, duration: int) -> None:
20+
self.ld_client.track('$ld:ai:duration:total', self.context, self.get_track_data(), duration)
21+
22+
def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None:
23+
token_metrics = usage_to_token_metrics(tokens)
24+
if token_metrics['total'] > 0:
25+
self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics['total'])
26+
if token_metrics['input'] > 0:
27+
self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics['input'])
28+
if token_metrics['output'] > 0:
29+
self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics['output'])
30+
31+
def track_error(self, error: int) -> None:
32+
self.ld_client.track('$ld:ai:error', self.context, self.get_track_data(), error)
33+
34+
def track_generation(self, generation: int) -> None:
35+
self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), generation)
36+
37+
def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None:
38+
if feedback['kind'] == FeedbackKind.Positive:
39+
self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.get_track_data(), 1)
40+
elif feedback['kind'] == FeedbackKind.Negative:
41+
self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1)

ldai/tracking_utils.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
from typing import Union
2+
from ldai.types import BedrockTokenUsage, TokenMetrics, TokenUsage, UnderscoreTokenUsage
3+
4+
def usage_to_token_metrics(usage: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> TokenMetrics:
5+
def get_attr(obj, attr, default=0):
6+
if isinstance(obj, dict):
7+
return obj.get(attr, default)
8+
return getattr(obj, attr, default)
9+
10+
if 'inputTokens' in usage and 'outputTokens' in usage:
11+
# Bedrock usage
12+
return {
13+
'total': get_attr(usage, 'totalTokens'),
14+
'input': get_attr(usage, 'inputTokens'),
15+
'output': get_attr(usage, 'outputTokens'),
16+
}
17+
18+
# OpenAI usage (both camelCase and snake_case)
19+
return {
20+
'total': get_attr(usage, 'total_tokens', get_attr(usage, 'totalTokens', 0)),
21+
'input': get_attr(usage, 'prompt_tokens', get_attr(usage, 'promptTokens', 0)),
22+
'output': get_attr(usage, 'completion_tokens', get_attr(usage, 'completionTokens', 0)),
23+
}

ldai/types.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
from enum import Enum
2+
from typing import TypedDict
3+
4+
class FeedbackKind(Enum):
5+
Positive = "positive"
6+
Negative = "negative"
7+
8+
class TokenUsage(TypedDict):
9+
total_tokens: int
10+
prompt_tokens: int
11+
completion_tokens: int
12+
13+
class UnderscoreTokenUsage(TypedDict):
14+
total_tokens: int
15+
prompt_tokens: int
16+
completion_tokens: int
17+
18+
class BedrockTokenUsage(TypedDict):
19+
totalTokens: int
20+
inputTokens: int
21+
outputTokens: int
22+
23+
class TokenMetrics(TypedDict):
24+
total: int
25+
input: int
26+
output: int # type: ignore

ldotel/tracing.py

Lines changed: 0 additions & 99 deletions
This file was deleted.

pyproject.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "launchdarkly-server-sdk-ai"
3-
version = "0.0.1"
3+
version = "0.0.14"
44
description = "LaunchDarkly SDK for AI"
55
authors = ["LaunchDarkly <[email protected]>"]
66
license = "Apache-2.0"
@@ -29,6 +29,7 @@ exclude = [
2929
[tool.poetry.dependencies]
3030
python = ">=3.8"
3131
launchdarkly-server-sdk = ">=9.4.0"
32+
chevron = "=0.14.0"
3233

3334

3435
[tool.poetry.group.dev.dependencies]

0 commit comments

Comments
 (0)