diff --git a/ldai/client.py b/ldai/client.py index 77fbc24..a1463f8 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -1,11 +1,10 @@ -import json from typing import Any, Dict, Optional from ldclient import Context -#from config import LDAIConfig, LDAIConfigTracker from ldclient.client import LDClient import chevron from ldai.tracker import LDAIConfigTracker +from ldai.types import AIConfig class LDAIClient: """The LaunchDarkly AI SDK client object.""" @@ -13,7 +12,7 @@ class LDAIClient: def __init__(self, client: LDClient): self.client = client - def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> Any: + def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> AIConfig: """Get the value of a model configuration asynchronously. Args: @@ -31,7 +30,6 @@ def model_config(self, key: str, context: Context, default_value: str, variables if variables: all_variables.update(variables) - print(variation) variation['prompt'] = [ { **entry, @@ -40,12 +38,7 @@ def model_config(self, key: str, context: Context, default_value: str, variables for entry in variation['prompt'] ] - #return detail.value, - - return { - 'config': variation, - 'tracker': LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context) - } + return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context)) def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: """Interpolate the template with the given variables. diff --git a/ldai/tracker.py b/ldai/tracker.py index b934e22..9be9d9c 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -1,7 +1,7 @@ +import time from typing import Dict, Union from ldclient import Context, LDClient -from ldai.tracking_utils import usage_to_token_metrics -from ldai.types import BedrockTokenUsage, FeedbackKind, TokenUsage, UnderscoreTokenUsage +from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage, UnderscoreTokenUsage class LDAIConfigTracker: def __init__(self, ld_client: LDClient, variation_id: str, config_key: str, context: Context): @@ -19,23 +19,37 @@ def get_track_data(self): def track_duration(self, duration: int) -> None: self.ld_client.track('$ld:ai:duration:total', self.context, self.get_track_data(), duration) - def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None: - token_metrics = usage_to_token_metrics(tokens) - if token_metrics['total'] > 0: - self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics['total']) - if token_metrics['input'] > 0: - self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics['input']) - if token_metrics['output'] > 0: - self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics['output']) + def track_duration_of(self, func, *args, **kwargs): + start_time = time.time() + result = func(*args, **kwargs) + end_time = time.time() + duration = int((end_time - start_time) * 1000) # duration in milliseconds + self.track_duration(duration) + return result def track_error(self, error: int) -> None: self.ld_client.track('$ld:ai:error', self.context, self.get_track_data(), error) - def track_generation(self, generation: int) -> None: - self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), generation) - def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None: if feedback['kind'] == FeedbackKind.Positive: self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.get_track_data(), 1) elif feedback['kind'] == FeedbackKind.Negative: - self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1) \ No newline at end of file + self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1) + + def track_generation(self, generation: int) -> None: + self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), generation) + + def track_openai(self, func, *args, **kwargs): + result = self.track_duration_of(func, *args, **kwargs) + if result.usage: + self.track_tokens(OpenAITokenUsage(result.usage)) + return result + + def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None: + token_metrics = tokens.to_metrics() + if token_metrics['total'] > 0: + self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics['total']) + if token_metrics['input'] > 0: + self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics['input']) + if token_metrics['output'] > 0: + self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics['output']) \ No newline at end of file diff --git a/ldai/tracking_utils.py b/ldai/tracking_utils.py deleted file mode 100644 index a44bf4a..0000000 --- a/ldai/tracking_utils.py +++ /dev/null @@ -1,23 +0,0 @@ -from typing import Union -from ldai.types import BedrockTokenUsage, TokenMetrics, TokenUsage, UnderscoreTokenUsage - -def usage_to_token_metrics(usage: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> TokenMetrics: - def get_attr(obj, attr, default=0): - if isinstance(obj, dict): - return obj.get(attr, default) - return getattr(obj, attr, default) - - if 'inputTokens' in usage and 'outputTokens' in usage: - # Bedrock usage - return { - 'total': get_attr(usage, 'totalTokens'), - 'input': get_attr(usage, 'inputTokens'), - 'output': get_attr(usage, 'outputTokens'), - } - - # OpenAI usage (both camelCase and snake_case) - return { - 'total': get_attr(usage, 'total_tokens', get_attr(usage, 'totalTokens', 0)), - 'input': get_attr(usage, 'prompt_tokens', get_attr(usage, 'promptTokens', 0)), - 'output': get_attr(usage, 'completion_tokens', get_attr(usage, 'completionTokens', 0)), - } \ No newline at end of file diff --git a/ldai/types.py b/ldai/types.py index e5a962c..efa8300 100644 --- a/ldai/types.py +++ b/ldai/types.py @@ -1,26 +1,85 @@ from enum import Enum -from typing import TypedDict +from typing import Callable +from dataclasses import dataclass + +@dataclass +class TokenMetrics(): + total: int + input: int + output: int # type: ignore + +@dataclass + +class AIConfigData(): + config: dict + prompt: any + _ldMeta: dict + +class AITracker(): + track_duration: Callable[..., None] + track_tokens: Callable[..., None] + track_error: Callable[..., None] + track_generation: Callable[..., None] + track_feedback: Callable[..., None] + +class AIConfig(): + def __init__(self, config: AIConfigData, tracker: AITracker): + self.config = config + self.tracker = tracker class FeedbackKind(Enum): Positive = "positive" Negative = "negative" -class TokenUsage(TypedDict): - total_tokens: int - prompt_tokens: int - completion_tokens: int +@dataclass -class UnderscoreTokenUsage(TypedDict): +class TokenUsage(): total_tokens: int prompt_tokens: int completion_tokens: int -class BedrockTokenUsage(TypedDict): - totalTokens: int - inputTokens: int - outputTokens: int + def to_metrics(self): + return { + 'total': self['total_tokens'], + 'input': self['prompt_tokens'], + 'output': self['completion_tokens'], + } -class TokenMetrics(TypedDict): - total: int - input: int - output: int # type: ignore \ No newline at end of file +class OpenAITokenUsage: + def __init__(self, data: any): + self.total_tokens = data.total_tokens + self.prompt_tokens = data.prompt_tokens + self.completion_tokens = data.completion_tokens + + def to_metrics(self) -> TokenMetrics: + return { + 'total': self.total_tokens, + 'input': self.prompt_tokens, + 'output': self.completion_tokens, + } + +class UnderscoreTokenUsage: + def __init__(self, data: dict): + self.total_tokens = data.get('total_tokens', 0) + self.prompt_tokens = data.get('prompt_tokens', 0) + self.completion_tokens = data.get('completion_tokens', 0) + + def to_metrics(self) -> TokenMetrics: + return { + 'total': self.total_tokens, + 'input': self.prompt_tokens, + 'output': self.completion_tokens, + } + +class BedrockTokenUsage: + def __init__(self, data: dict): + self.totalTokens = data.get('totalTokens', 0) + self.inputTokens = data.get('inputTokens', 0) + self.outputTokens = data.get('outputTokens', 0) + + def to_metrics(self) -> TokenMetrics: + return { + 'total': self.totalTokens, + 'input': self.inputTokens, + 'output': self.outputTokens, + } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 3017ce5..87d77f0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "launchdarkly-server-sdk-ai" -version = "0.0.14" +version = "0.0.1" description = "LaunchDarkly SDK for AI" authors = ["LaunchDarkly "] license = "Apache-2.0"