Skip to content
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "1.0.0"
".": "0.1.0"
}
4 changes: 1 addition & 3 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ LaunchDarkly has published an [SDK contributor's guide](https://docs.launchdarkl

## Submitting bug reports and feature requests

The LaunchDarkly SDK team monitors the [issue tracker](https://github.com/launchdarkly/python-server-sdk-AI/issues) in the SDK repository. Bug reports and feature requests specific to this library should be filed in this issue tracker. The SDK team will respond to all newly filed issues within two business days.
The LaunchDarkly SDK team monitors the [issue tracker](https://github.com/launchdarkly/python-server-sdk-ai/issues) in the SDK repository. Bug reports and feature requests specific to this library should be filed in this issue tracker. The SDK team will respond to all newly filed issues within two business days.

## Submitting pull requests

Expand Down Expand Up @@ -55,8 +55,6 @@ make lint

The library's module structure is as follows:

<!-- TODO: Add structure description -->

### Type hints

Python does not require the use of type hints, but they can be extremely helpful for spotting mistakes and for improving the IDE experience, so we should always use them in the library. Every method in the public API is expected to have type hints for all non-`self` parameters, and for its return value if any.
Expand Down
13 changes: 1 addition & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,7 @@ This version of the library has a minimum Python version of 3.8.

## Getting started

Install the package

$ pip install launchdarkly-server-sdk-ai

The provided `TracingHook` can be setup as shown below:

<!-- TODO: Install instructions -->

```python
import ldclient

```
Refer to the [SDK reference guide](https://docs.launchdarkly.com/sdk/ai/python) for instructions on getting started with using the SDK.

## Learn more

Expand Down
41 changes: 20 additions & 21 deletions ldai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,42 +12,41 @@ class LDAIClient:
def __init__(self, client: LDClient):
self.client = client

def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> AIConfig:
"""Get the value of a model configuration asynchronously.

Args:
key: The key of the model configuration.
context: The context to evaluate the model configuration in.
default_value: The default value of the model configuration.
variables: Additional variables for the model configuration.
def model_config(self, key: str, context: Context, default_value: AIConfig, variables: Optional[Dict[str, Any]] = None) -> AIConfig:
"""
Get the value of a model configuration asynchronously.

Returns:
The value of the model configuration.
:param key: The key of the model configuration.
:param context: The context to evaluate the model configuration in.
:param default_value: The default value of the model configuration.
:param variables: Additional variables for the model configuration.
:return: The value of the model configuration.
"""
variation = self.client.variation(key, context, default_value)

all_variables = {'ldctx': context}
all_variables = {}
if variables:
all_variables.update(variables)

all_variables['ldctx'] = context
print(variation)
#if isinstance(variation['prompt'], list) and all(isinstance(entry, dict) for entry in variation['prompt']):
variation['prompt'] = [
{
**entry,
'role': entry['role'],
'content': self.interpolate_template(entry['content'], all_variables)
}
for entry in variation['prompt']
]

return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context))
enabled = variation.get('_ldMeta',{}).get('enabled', False)
return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation.get('_ldMeta', {}).get('versionKey', ''), key, context), enabled=bool(enabled))

def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str:
"""Interpolate the template with the given variables.

Args:
template: The template string.
variables: The variables to interpolate into the template.
"""
Interpolate the template with the given variables.

Returns:
The interpolated string.
:template: The template string.
:variables: The variables to interpolate into the template.
:return: The interpolated string.
"""
return chevron.render(template, variables)
101 changes: 101 additions & 0 deletions ldai/testing/test_model_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
import pytest
from ldclient import LDClient, Context, Config
from ldclient.integrations.test_data import TestData
from ldai.types import AIConfig
from ldai.client import LDAIClient
from ldclient.testing.builders import *

@pytest.fixture
def td() -> TestData:
td = TestData.data_source()
td.update(td.flag('model-config').variations({
'model': { 'modelId': 'fakeModel'},
'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}],
'_ldMeta': {'enabled': True, 'versionKey': 'abcd'}
}, "green").variation_for_all(0))

td.update(td.flag('multiple-prompt').variations({
'model': { 'modelId': 'fakeModel'},
'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}, {'role': 'user', 'content': 'The day is, {{day}}!'}],
'_ldMeta': {'enabled': True, 'versionKey': 'abcd'}
}, "green").variation_for_all(0))

td.update(td.flag('ctx-interpolation').variations({
'model': { 'modelId': 'fakeModel'},
'prompt': [{'role': 'system', 'content': 'Hello, {{ldctx.name}}!'}],
'_ldMeta': {'enabled': True, 'versionKey': 'abcd'}
}).variation_for_all(0))

td.update(td.flag('off-config').variations({
'model': { 'modelId': 'fakeModel'},
'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}],
'_ldMeta': {'enabled': False, 'versionKey': 'abcd'}
}).variation_for_all(0))

return td

@pytest.fixture
def client(td: TestData) -> LDClient:
config = Config('sdk-key', update_processor_class=td, send_events=False)
return LDClient(config=config)

@pytest.fixture
def ldai_client(client: LDClient) -> LDAIClient:
return LDAIClient(client)

def test_model_config_interpolation(ldai_client: LDAIClient):
context = Context.create('user-key')
default_value = AIConfig(config={
'model': { 'modelId': 'fakeModel'},
'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}],
'_ldMeta': {'enabled': True, 'versionKey': 'abcd'}
}, tracker=None, enabled=True)
variables = {'name': 'World'}

config = ldai_client.model_config('model-config', context, default_value, variables)

assert config.config['prompt'][0]['content'] == 'Hello, World!'
assert config.enabled is True
assert config.tracker.version_key == 'abcd'

def test_model_config_no_variables(ldai_client: LDAIClient):
context = Context.create('user-key')
default_value = AIConfig(config={}, tracker=None, enabled=True)

config = ldai_client.model_config('model-config', context, default_value, {})

assert config.config['prompt'][0]['content'] == 'Hello, !'
assert config.enabled is True
assert config.tracker.version_key == 'abcd'

def test_context_interpolation(ldai_client: LDAIClient):
context = Context.builder('user-key').name("Sandy").build()
default_value = AIConfig(config={}, tracker=None, enabled=True)
variables = {'name': 'World'}

config = ldai_client.model_config('ctx-interpolation', context, default_value, variables)

assert config.config['prompt'][0]['content'] == 'Hello, Sandy!'
assert config.enabled is True
assert config.tracker.version_key == 'abcd'

def test_model_config_disabled(ldai_client: LDAIClient):
context = Context.create('user-key')
default_value = AIConfig(config={}, tracker=None, enabled=True)

config = ldai_client.model_config('off-config', context, default_value, {})

assert config.enabled is False
assert config.tracker.version_key == 'abcd'

def test_model_config_multiple(ldai_client: LDAIClient):
context = Context.create('user-key')
default_value = AIConfig(config={}, tracker=None, enabled=True)
variables = {'name': 'World', 'day': 'Monday'}

config = ldai_client.model_config('multiple-prompt', context, default_value, variables)

assert config.config['prompt'][0]['content'] == 'Hello, World!'
assert config.config['prompt'][1]['content'] == 'The day is, Monday!'
assert config.enabled is True
assert config.tracker.version_key == 'abcd'
38 changes: 24 additions & 14 deletions ldai/tracker.py
Original file line number Diff line number Diff line change
@@ -1,51 +1,61 @@
import time
from typing import Dict, Union
from ldclient import Context, LDClient
from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage, UnderscoreTokenUsage
from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage

class LDAIConfigTracker:
def __init__(self, ld_client: LDClient, variation_id: str, config_key: str, context: Context):
def __init__(self, ld_client: LDClient, version_key: str, config_key: str, context: Context):
self.ld_client = ld_client
self.variation_id = variation_id
self.version_key = version_key
self.config_key = config_key
self.context = context

def get_track_data(self):
return {
'variationId': self.variation_id,
'versionKey': self.version_key,
'configKey': self.config_key,
}

def track_duration(self, duration: int) -> None:
self.ld_client.track('$ld:ai:duration:total', self.context, self.get_track_data(), duration)

def track_duration_of(self, func, *args, **kwargs):
def track_duration_of(self, func):
start_time = time.time()
result = func(*args, **kwargs)
result = func()
end_time = time.time()
duration = int((end_time - start_time) * 1000) # duration in milliseconds
self.track_duration(duration)
return result

def track_error(self, error: int) -> None:
self.ld_client.track('$ld:ai:error', self.context, self.get_track_data(), error)

def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None:
if feedback['kind'] == FeedbackKind.Positive:
self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.get_track_data(), 1)
elif feedback['kind'] == FeedbackKind.Negative:
self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1)

def track_generation(self, generation: int) -> None:
self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), generation)
def track_success(self) -> None:
self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), 1)

def track_openai(self, func, *args, **kwargs):
result = self.track_duration_of(func, *args, **kwargs)
def track_openai(self, func):
result = self.track_duration_of(func)
if result.usage:
self.track_tokens(OpenAITokenUsage(result.usage))
return result

def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None:
def track_bedrock_converse(self, res: dict) -> dict:
status_code = res.get('$metadata', {}).get('httpStatusCode', 0)
if status_code == 200:
self.track_success()
elif status_code >= 400:
# Potentially add error tracking in the future.
pass
if res.get('metrics', {}).get('latencyMs'):
self.track_duration(res['metrics']['latencyMs'])
if res.get('usage'):
self.track_tokens(BedrockTokenUsage(res['usage']))
return res

def track_tokens(self, tokens: Union[TokenUsage, BedrockTokenUsage]) -> None:
token_metrics = tokens.to_metrics()
if token_metrics['total'] > 0:
self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics['total'])
Expand Down
21 changes: 5 additions & 16 deletions ldai/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ class TokenMetrics():
output: int # type: ignore

@dataclass

class AIConfigData():
config: dict
prompt: any
Expand All @@ -23,16 +22,17 @@ class AITracker():
track_feedback: Callable[..., None]

class AIConfig():
def __init__(self, config: AIConfigData, tracker: AITracker):
def __init__(self, config: AIConfigData, tracker: AITracker, enabled: bool):
self.config = config
self.tracker = tracker
self.enabled = enabled

@dataclass
class FeedbackKind(Enum):
Positive = "positive"
Negative = "negative"

@dataclass

class TokenUsage():
total_tokens: int
prompt_tokens: int
Expand All @@ -45,6 +45,7 @@ def to_metrics(self):
'output': self['completion_tokens'],
}

@dataclass
class OpenAITokenUsage:
def __init__(self, data: any):
self.total_tokens = data.total_tokens
Expand All @@ -58,19 +59,7 @@ def to_metrics(self) -> TokenMetrics:
'output': self.completion_tokens,
}

class UnderscoreTokenUsage:
def __init__(self, data: dict):
self.total_tokens = data.get('total_tokens', 0)
self.prompt_tokens = data.get('prompt_tokens', 0)
self.completion_tokens = data.get('completion_tokens', 0)

def to_metrics(self) -> TokenMetrics:
return {
'total': self.total_tokens,
'input': self.prompt_tokens,
'output': self.completion_tokens,
}

@dataclass
class BedrockTokenUsage:
def __init__(self, data: dict):
self.totalTokens = data.get('totalTokens', 0)
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
[tool.poetry]
name = "launchdarkly-server-sdk-ai"
version = "0.0.1"
version = "0.1.0"
description = "LaunchDarkly SDK for AI"
authors = ["LaunchDarkly <[email protected]>"]
license = "Apache-2.0"
readme = "README.md"
homepage = "https://docs.launchdarkly.com/sdk/server-side/python-ai"
homepage = "https://docs.launchdarkly.com/sdk/ai/python"
repository = "https://github.com/launchdarkly/python-server-sdk-ai"
documentation = "https://launchdarkly-python-sdk-ai.readthedocs.io/en/latest/"
classifiers = [
Expand Down
Loading