Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 3 additions & 10 deletions ldai/client.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,18 @@
import json
from typing import Any, Dict, Optional
from ldclient import Context
#from config import LDAIConfig, LDAIConfigTracker
from ldclient.client import LDClient
import chevron

from ldai.tracker import LDAIConfigTracker
from ldai.types import AIConfig

class LDAIClient:
"""The LaunchDarkly AI SDK client object."""

def __init__(self, client: LDClient):
self.client = client

def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> Any:
def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> AIConfig:
"""Get the value of a model configuration asynchronously.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks like this is returning a dictionary, not an AIConfig type.

Also just noticed there is a rogue print in there.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@keelerm84 updated the logic to return an AIConfig


Args:
Expand All @@ -31,7 +30,6 @@ def model_config(self, key: str, context: Context, default_value: str, variables
if variables:
all_variables.update(variables)

print(variation)
variation['prompt'] = [
{
**entry,
Expand All @@ -40,12 +38,7 @@ def model_config(self, key: str, context: Context, default_value: str, variables
for entry in variation['prompt']
]

#return detail.value,

return {
'config': variation,
'tracker': LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context)
}
return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context))

def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str:
"""Interpolate the template with the given variables.
Expand Down
42 changes: 28 additions & 14 deletions ldai/tracker.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import time
from typing import Dict, Union
from ldclient import Context, LDClient
from ldai.tracking_utils import usage_to_token_metrics
from ldai.types import BedrockTokenUsage, FeedbackKind, TokenUsage, UnderscoreTokenUsage
from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage, UnderscoreTokenUsage

class LDAIConfigTracker:
def __init__(self, ld_client: LDClient, variation_id: str, config_key: str, context: Context):
Expand All @@ -19,23 +19,37 @@ def get_track_data(self):
def track_duration(self, duration: int) -> None:
self.ld_client.track('$ld:ai:duration:total', self.context, self.get_track_data(), duration)

def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None:
token_metrics = usage_to_token_metrics(tokens)
if token_metrics['total'] > 0:
self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics['total'])
if token_metrics['input'] > 0:
self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics['input'])
if token_metrics['output'] > 0:
self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics['output'])
def track_duration_of(self, func, *args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
duration = int((end_time - start_time) * 1000) # duration in milliseconds
self.track_duration(duration)
return result

def track_error(self, error: int) -> None:
self.ld_client.track('$ld:ai:error', self.context, self.get_track_data(), error)

def track_generation(self, generation: int) -> None:
self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), generation)

def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None:
if feedback['kind'] == FeedbackKind.Positive:
self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.get_track_data(), 1)
elif feedback['kind'] == FeedbackKind.Negative:
self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1)
self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1)

def track_generation(self, generation: int) -> None:
self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), generation)

def track_openai(self, func, *args, **kwargs):
result = self.track_duration_of(func, *args, **kwargs)
if result.usage:
self.track_tokens(OpenAITokenUsage(result.usage))
return result

def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None:
token_metrics = tokens.to_metrics()
if token_metrics['total'] > 0:
self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics['total'])
if token_metrics['input'] > 0:
self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics['input'])
if token_metrics['output'] > 0:
self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics['output'])
23 changes: 0 additions & 23 deletions ldai/tracking_utils.py

This file was deleted.

87 changes: 73 additions & 14 deletions ldai/types.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,85 @@
from enum import Enum
from typing import TypedDict
from typing import Callable
from dataclasses import dataclass

@dataclass
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't typically see both @dataclass and TypedDict used together. Are both of these needed? If so, why not on the other types below?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@keelerm84 this is a lack of knowledge on my part. I switched them over to dataclasses in the following commit.

class TokenMetrics():
total: int
input: int
output: int # type: ignore

@dataclass

class AIConfigData():
config: dict
prompt: any
_ldMeta: dict

class AITracker():
track_duration: Callable[..., None]
track_tokens: Callable[..., None]
track_error: Callable[..., None]
track_generation: Callable[..., None]
track_feedback: Callable[..., None]

class AIConfig():
def __init__(self, config: AIConfigData, tracker: AITracker):
self.config = config
self.tracker = tracker

class FeedbackKind(Enum):
Positive = "positive"
Negative = "negative"

class TokenUsage(TypedDict):
total_tokens: int
prompt_tokens: int
completion_tokens: int
@dataclass

class UnderscoreTokenUsage(TypedDict):
class TokenUsage():
total_tokens: int
prompt_tokens: int
completion_tokens: int

class BedrockTokenUsage(TypedDict):
totalTokens: int
inputTokens: int
outputTokens: int
def to_metrics(self):
return {
'total': self['total_tokens'],
'input': self['prompt_tokens'],
'output': self['completion_tokens'],
}

class TokenMetrics(TypedDict):
total: int
input: int
output: int # type: ignore
class OpenAITokenUsage:
def __init__(self, data: any):
self.total_tokens = data.total_tokens
self.prompt_tokens = data.prompt_tokens
self.completion_tokens = data.completion_tokens

def to_metrics(self) -> TokenMetrics:
return {
'total': self.total_tokens,
'input': self.prompt_tokens,
'output': self.completion_tokens,
}

class UnderscoreTokenUsage:
def __init__(self, data: dict):
self.total_tokens = data.get('total_tokens', 0)
self.prompt_tokens = data.get('prompt_tokens', 0)
self.completion_tokens = data.get('completion_tokens', 0)

def to_metrics(self) -> TokenMetrics:
return {
'total': self.total_tokens,
'input': self.prompt_tokens,
'output': self.completion_tokens,
}

class BedrockTokenUsage:
def __init__(self, data: dict):
self.totalTokens = data.get('totalTokens', 0)
self.inputTokens = data.get('inputTokens', 0)
self.outputTokens = data.get('outputTokens', 0)

def to_metrics(self) -> TokenMetrics:
return {
'total': self.totalTokens,
'input': self.inputTokens,
'output': self.outputTokens,
}
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "launchdarkly-server-sdk-ai"
version = "0.0.14"
version = "0.0.1"
description = "LaunchDarkly SDK for AI"
authors = ["LaunchDarkly <[email protected]>"]
license = "Apache-2.0"
Expand Down
Loading