Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions ldai/client.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
import json
from typing import Any, Dict, Optional
from ldclient import Context
#from config import LDAIConfig, LDAIConfigTracker
from ldclient.client import LDClient
import chevron

from ldai.tracker import LDAIConfigTracker
from ldai.types import AIConfig

class LDAIClient:
"""The LaunchDarkly AI SDK client object."""

def __init__(self, client: LDClient):
self.client = client

def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> Any:
def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> AIConfig:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks like this is returning a dictionary, not an AIConfig type.

Also just noticed there is a rogue print in there.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@keelerm84 updated the logic to return an AIConfig

"""Get the value of a model configuration asynchronously.

Args:
Expand All @@ -40,8 +40,6 @@ def model_config(self, key: str, context: Context, default_value: str, variables
for entry in variation['prompt']
]

#return detail.value,

return {
'config': variation,
'tracker': LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context)
Expand Down
24 changes: 3 additions & 21 deletions ldai/tracking_utils.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,5 @@
from typing import Union
from ldai.types import BedrockTokenUsage, TokenMetrics, TokenUsage, UnderscoreTokenUsage
from ldai.types import BedrockTokenUsage, TokenMetrics, OpenAITokenUsage, UnderscoreTokenUsage

def usage_to_token_metrics(usage: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> TokenMetrics:
def get_attr(obj, attr, default=0):
if isinstance(obj, dict):
return obj.get(attr, default)
return getattr(obj, attr, default)

if 'inputTokens' in usage and 'outputTokens' in usage:
# Bedrock usage
return {
'total': get_attr(usage, 'totalTokens'),
'input': get_attr(usage, 'inputTokens'),
'output': get_attr(usage, 'outputTokens'),
}

# OpenAI usage (both camelCase and snake_case)
return {
'total': get_attr(usage, 'total_tokens', get_attr(usage, 'totalTokens', 0)),
'input': get_attr(usage, 'prompt_tokens', get_attr(usage, 'promptTokens', 0)),
'output': get_attr(usage, 'completion_tokens', get_attr(usage, 'completionTokens', 0)),
}
def usage_to_token_metrics(usage: Union[OpenAITokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> TokenMetrics:
return usage.to_metrics()
84 changes: 71 additions & 13 deletions ldai/types.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,32 @@
from enum import Enum
from typing import TypedDict
from typing import Callable, TypedDict
from dataclasses import dataclass

@dataclass
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't typically see both @dataclass and TypedDict used together. Are both of these needed? If so, why not on the other types below?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@keelerm84 this is a lack of knowledge on my part. I switched them over to dataclasses in the following commit.

class TokenMetrics(TypedDict):
total: int
input: int
output: int # type: ignore

class AIConfigData(TypedDict):
config: dict
prompt: any
_ldMeta: dict

class AITracker(TypedDict):
track_duration: Callable[..., None]
track_tokens: Callable[..., None]
track_error: Callable[..., None]
track_generation: Callable[..., None]
track_feedback: Callable[..., None]

class AIConfig():
def __init__(self, config: AIConfigData, tracker: AITracker):
self._config = config
self._tracker = tracker

config: AIConfigData
tracker: AITracker

class FeedbackKind(Enum):
Positive = "positive"
Expand All @@ -10,17 +37,48 @@ class TokenUsage(TypedDict):
prompt_tokens: int
completion_tokens: int

class UnderscoreTokenUsage(TypedDict):
total_tokens: int
prompt_tokens: int
completion_tokens: int
def to_metrics(self):
return {
'total': self['total_tokens'],
'input': self['prompt_tokens'],
'output': self['completion_tokens'],
}

class BedrockTokenUsage(TypedDict):
totalTokens: int
inputTokens: int
outputTokens: int
class OpenAITokenUsage:
def __init__(self, data: any):
self.total_tokens = data.total_tokens
self.prompt_tokens = data.prompt_tokens
self.completion_tokens = data.completion_tokens

class TokenMetrics(TypedDict):
total: int
input: int
output: int # type: ignore
def to_metrics(self) -> TokenMetrics:
return {
'total': self.total_tokens,
'input': self.prompt_tokens,
'output': self.completion_tokens,
}

class UnderscoreTokenUsage:
def __init__(self, data: dict):
self.total_tokens = data.get('total_tokens', 0)
self.prompt_tokens = data.get('prompt_tokens', 0)
self.completion_tokens = data.get('completion_tokens', 0)

def to_metrics(self) -> TokenMetrics:
return {
'total': self.total_tokens,
'input': self.prompt_tokens,
'output': self.completion_tokens,
}

class BedrockTokenUsage:
def __init__(self, data: dict):
self.totalTokens = data.get('totalTokens', 0)
self.inputTokens = data.get('inputTokens', 0)
self.outputTokens = data.get('outputTokens', 0)

def to_metrics(self) -> TokenMetrics:
return {
'total': self.totalTokens,
'input': self.inputTokens,
'output': self.outputTokens,
}
Loading