From 780fa159b7dac46d0ebe2045929ba6d1d0fc3f3b Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Wed, 6 Nov 2024 13:22:32 -0500 Subject: [PATCH 1/9] align Python with most recent TypeScript changes --- ldai/client.py | 3 ++- ldai/tracker.py | 25 +++++++++++++++++-------- ldai/types.py | 7 ++++--- 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/ldai/client.py b/ldai/client.py index a1463f8..3863d9d 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -38,7 +38,8 @@ def model_config(self, key: str, context: Context, default_value: str, variables for entry in variation['prompt'] ] - return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context)) + enabled = ['_ldMeta'].get('enabled') + return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['versionKey'], key, context, bool(enabled))) def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: """Interpolate the template with the given variables. diff --git a/ldai/tracker.py b/ldai/tracker.py index 9be9d9c..d0f165d 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -4,15 +4,15 @@ from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage, UnderscoreTokenUsage class LDAIConfigTracker: - def __init__(self, ld_client: LDClient, variation_id: str, config_key: str, context: Context): + def __init__(self, ld_client: LDClient, version_key: str, config_key: str, context: Context): self.ld_client = ld_client - self.variation_id = variation_id + self.version_key = version_key self.config_key = config_key self.context = context def get_track_data(self): return { - 'variationId': self.variation_id, + 'versionKey': self.version_key, 'configKey': self.config_key, } @@ -27,17 +27,14 @@ def track_duration_of(self, func, *args, **kwargs): self.track_duration(duration) return result - def track_error(self, error: int) -> None: - self.ld_client.track('$ld:ai:error', self.context, self.get_track_data(), error) - def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None: if feedback['kind'] == FeedbackKind.Positive: self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.get_track_data(), 1) elif feedback['kind'] == FeedbackKind.Negative: self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1) - def track_generation(self, generation: int) -> None: - self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), generation) + def track_success(self) -> None: + self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), 1) def track_openai(self, func, *args, **kwargs): result = self.track_duration_of(func, *args, **kwargs) @@ -45,6 +42,18 @@ def track_openai(self, func, *args, **kwargs): self.track_tokens(OpenAITokenUsage(result.usage)) return result + def track_bedrock_converse(self, res: dict) -> dict: + if res.get('$metadata', {}).get('httpStatusCode') == 200: + self.track_success() + elif res.get('$metadata', {}).get('httpStatusCode') and res['$metadata']['httpStatusCode'] >= 400: + # Potentially add error tracking in the future. + pass + if res.get('metrics', {}).get('latencyMs'): + self.track_duration(res['metrics']['latencyMs']) + if res.get('usage'): + self.track_tokens(BedrockTokenUsage(res['usage'])) + return res + def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None: token_metrics = tokens.to_metrics() if token_metrics['total'] > 0: diff --git a/ldai/types.py b/ldai/types.py index efa8300..2b2d3d3 100644 --- a/ldai/types.py +++ b/ldai/types.py @@ -23,16 +23,16 @@ class AITracker(): track_feedback: Callable[..., None] class AIConfig(): - def __init__(self, config: AIConfigData, tracker: AITracker): + def __init__(self, config: AIConfigData, tracker: AITracker, enabled: bool): self.config = config self.tracker = tracker + self.enabled = enabled +@dataclass class FeedbackKind(Enum): Positive = "positive" Negative = "negative" -@dataclass - class TokenUsage(): total_tokens: int prompt_tokens: int @@ -45,6 +45,7 @@ def to_metrics(self): 'output': self['completion_tokens'], } +@dataclass class OpenAITokenUsage: def __init__(self, data: any): self.total_tokens = data.total_tokens From a1f949050c31c2dc6b282d974c7af7fb50e936c4 Mon Sep 17 00:00:00 2001 From: Dan O'Brien Date: Wed, 6 Nov 2024 15:36:26 -0500 Subject: [PATCH 2/9] Update ldai/client.py Co-authored-by: Matthew M. Keeler --- ldai/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldai/client.py b/ldai/client.py index 3863d9d..ef8e99b 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -38,7 +38,7 @@ def model_config(self, key: str, context: Context, default_value: str, variables for entry in variation['prompt'] ] - enabled = ['_ldMeta'].get('enabled') + enabled = variation['_ldMeta'].get('enabled') return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['versionKey'], key, context, bool(enabled))) def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: From 7be4d270f690d537ec66c6aa55da27b5aaec62e2 Mon Sep 17 00:00:00 2001 From: Dan O'Brien Date: Wed, 6 Nov 2024 15:36:46 -0500 Subject: [PATCH 3/9] Update ldai/tracker.py Co-authored-by: Matthew M. Keeler --- ldai/tracker.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ldai/tracker.py b/ldai/tracker.py index d0f165d..1888626 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -43,9 +43,10 @@ def track_openai(self, func, *args, **kwargs): return result def track_bedrock_converse(self, res: dict) -> dict: - if res.get('$metadata', {}).get('httpStatusCode') == 200: + status_code = res.get('$metadata', {}).get('httpStatusCode', 0) + if status_code == 200: self.track_success() - elif res.get('$metadata', {}).get('httpStatusCode') and res['$metadata']['httpStatusCode'] >= 400: + elif status_code >= 400: # Potentially add error tracking in the future. pass if res.get('metrics', {}).get('latencyMs'): From d53c63c8ff88a4c67a4acb2801eecefd04a370ab Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Wed, 6 Nov 2024 16:14:55 -0500 Subject: [PATCH 4/9] review feedback --- .release-please-manifest.json | 2 +- CONTRIBUTING.md | 4 +-- ldai/client.py | 52 +++++++++++++++++------------------ ldai/tracker.py | 8 +++--- ldai/types.py | 1 - pyproject.toml | 2 +- 6 files changed, 32 insertions(+), 37 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 37fcefa..466df71 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.0.0" + ".": "0.1.0" } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 209b0d6..2388545 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ LaunchDarkly has published an [SDK contributor's guide](https://docs.launchdarkl ## Submitting bug reports and feature requests -The LaunchDarkly SDK team monitors the [issue tracker](https://github.com/launchdarkly/python-server-sdk-AI/issues) in the SDK repository. Bug reports and feature requests specific to this library should be filed in this issue tracker. The SDK team will respond to all newly filed issues within two business days. +The LaunchDarkly SDK team monitors the [issue tracker](https://github.com/launchdarkly/python-server-sdk-ai/issues) in the SDK repository. Bug reports and feature requests specific to this library should be filed in this issue tracker. The SDK team will respond to all newly filed issues within two business days. ## Submitting pull requests @@ -55,8 +55,6 @@ make lint The library's module structure is as follows: - - ### Type hints Python does not require the use of type hints, but they can be extremely helpful for spotting mistakes and for improving the IDE experience, so we should always use them in the library. Every method in the public API is expected to have type hints for all non-`self` parameters, and for its return value if any. diff --git a/ldai/client.py b/ldai/client.py index ef8e99b..e90ce9c 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -12,43 +12,41 @@ class LDAIClient: def __init__(self, client: LDClient): self.client = client - def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> AIConfig: - """Get the value of a model configuration asynchronously. - - Args: - key: The key of the model configuration. - context: The context to evaluate the model configuration in. - default_value: The default value of the model configuration. - variables: Additional variables for the model configuration. + def model_config(self, key: str, context: Context, default_value: AIConfig, variables: Optional[Dict[str, Any]] = None) -> AIConfig: + """ + Get the value of a model configuration asynchronously. - Returns: - The value of the model configuration. + :param key: The key of the model configuration. + :param context: The context to evaluate the model configuration in. + :param default_value: The default value of the model configuration. + :param variables: Additional variables for the model configuration. + :return: The value of the model configuration. """ variation = self.client.variation(key, context, default_value) - all_variables = {'ldctx': context} + all_variables = {} if variables: all_variables.update(variables) + all_variables['ldctx'] = context - variation['prompt'] = [ - { - **entry, - 'content': self.interpolate_template(entry['content'], all_variables) - } - for entry in variation['prompt'] - ] + if isinstance(variation['prompt'], list) and all(isinstance(entry, dict) for entry in variation['prompt']): + variation['prompt'] = [ + { + 'role': entry['role'], + 'content': self.interpolate_template(entry['content'], all_variables) + } + for entry in variation['prompt'] + ] - enabled = variation['_ldMeta'].get('enabled') - return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['versionKey'], key, context, bool(enabled))) + enabled = variation.get('_ldMeta',{}).get('enabled', False) + return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation.get('_ldMeta', {}).get('versionKey', ''), key, context, bool(enabled))) def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: - """Interpolate the template with the given variables. - - Args: - template: The template string. - variables: The variables to interpolate into the template. + """ + Interpolate the template with the given variables. - Returns: - The interpolated string. + :template: The template string. + :variables: The variables to interpolate into the template. + :return: The interpolated string. """ return chevron.render(template, variables) \ No newline at end of file diff --git a/ldai/tracker.py b/ldai/tracker.py index 1888626..e2c8bbc 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -19,9 +19,9 @@ def get_track_data(self): def track_duration(self, duration: int) -> None: self.ld_client.track('$ld:ai:duration:total', self.context, self.get_track_data(), duration) - def track_duration_of(self, func, *args, **kwargs): + def track_duration_of(self, func): start_time = time.time() - result = func(*args, **kwargs) + result = func() end_time = time.time() duration = int((end_time - start_time) * 1000) # duration in milliseconds self.track_duration(duration) @@ -36,8 +36,8 @@ def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None: def track_success(self) -> None: self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), 1) - def track_openai(self, func, *args, **kwargs): - result = self.track_duration_of(func, *args, **kwargs) + def track_openai(self, func): + result = self.track_duration_of(func) if result.usage: self.track_tokens(OpenAITokenUsage(result.usage)) return result diff --git a/ldai/types.py b/ldai/types.py index 2b2d3d3..d4b9f39 100644 --- a/ldai/types.py +++ b/ldai/types.py @@ -9,7 +9,6 @@ class TokenMetrics(): output: int # type: ignore @dataclass - class AIConfigData(): config: dict prompt: any diff --git a/pyproject.toml b/pyproject.toml index 87d77f0..38d5243 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "launchdarkly-server-sdk-ai" -version = "0.0.1" +version = "0.1.0" description = "LaunchDarkly SDK for AI" authors = ["LaunchDarkly "] license = "Apache-2.0" From 24e46808f6c3c9a17f105abb0ccd9c83ce6519de Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 07:33:26 -0500 Subject: [PATCH 5/9] add testing --- ldai/testing/test_model_config.py | 101 ++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 ldai/testing/test_model_config.py diff --git a/ldai/testing/test_model_config.py b/ldai/testing/test_model_config.py new file mode 100644 index 0000000..939c983 --- /dev/null +++ b/ldai/testing/test_model_config.py @@ -0,0 +1,101 @@ +import pytest +from ldclient import LDClient, Context, Config +from ldclient.integrations.test_data import TestData +from ldai.types import AIConfig +from ldai.client import LDAIClient +from ldclient.testing.builders import * + +@pytest.fixture +def td() -> TestData: + td = TestData.data_source() + td.update(td.flag('model-config').variations({ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}], + '_ldMeta': {'enabled': True, 'versionKey': 'abcd'} + }, "green").variation_for_all(0)) + + td.update(td.flag('multiple-prompt').variations({ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}, {'role': 'user', 'content': 'The day is, {{day}}!'}], + '_ldMeta': {'enabled': True, 'versionKey': 'abcd'} + }, "green").variation_for_all(0)) + + td.update(td.flag('ctx-interpolation').variations({ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{ldctx.name}}!'}], + '_ldMeta': {'enabled': True, 'versionKey': 'abcd'} + }).variation_for_all(0)) + + td.update(td.flag('off-config').variations({ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}], + '_ldMeta': {'enabled': False, 'versionKey': 'abcd'} + }).variation_for_all(0)) + + return td + +@pytest.fixture +def client(td: TestData) -> LDClient: + config = Config('sdk-key', update_processor_class=td, send_events=False) + return LDClient(config=config) + +@pytest.fixture +def ldai_client(client: LDClient) -> LDAIClient: + return LDAIClient(client) + +def test_model_config_interpolation(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AIConfig(config={ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}], + '_ldMeta': {'enabled': True, 'versionKey': 'abcd'} + }, tracker=None, enabled=True) + variables = {'name': 'World'} + + config = ldai_client.model_config('model-config', context, default_value, variables) + + assert config.config['prompt'][0]['content'] == 'Hello, World!' + assert config.enabled is True + assert config.tracker.version_key == 'abcd' + +def test_model_config_no_variables(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AIConfig(config={}, tracker=None, enabled=True) + + config = ldai_client.model_config('model-config', context, default_value, {}) + + assert config.config['prompt'][0]['content'] == 'Hello, !' + assert config.enabled is True + assert config.tracker.version_key == 'abcd' + +def test_context_interpolation(ldai_client: LDAIClient): + context = Context.builder('user-key').name("Sandy").build() + default_value = AIConfig(config={}, tracker=None, enabled=True) + variables = {'name': 'World'} + + config = ldai_client.model_config('ctx-interpolation', context, default_value, variables) + + assert config.config['prompt'][0]['content'] == 'Hello, Sandy!' + assert config.enabled is True + assert config.tracker.version_key == 'abcd' + +def test_model_config_disabled(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AIConfig(config={}, tracker=None, enabled=True) + + config = ldai_client.model_config('off-config', context, default_value, {}) + + assert config.enabled is False + assert config.tracker.version_key == 'abcd' + +def test_model_config_multiple(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AIConfig(config={}, tracker=None, enabled=True) + variables = {'name': 'World', 'day': 'Monday'} + + config = ldai_client.model_config('multiple-prompt', context, default_value, variables) + + assert config.config['prompt'][0]['content'] == 'Hello, World!' + assert config.config['prompt'][1]['content'] == 'The day is, Monday!' + assert config.enabled is True + assert config.tracker.version_key == 'abcd' \ No newline at end of file From 5d58b33a1b9466f4e6dfa4997fae80a3aa32bd18 Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 07:33:56 -0500 Subject: [PATCH 6/9] README cleanup client changes --- README.md | 13 +------------ ldai/client.py | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index ba7c9d9..cbe23e0 100644 --- a/README.md +++ b/README.md @@ -12,18 +12,7 @@ This version of the library has a minimum Python version of 3.8. ## Getting started -Install the package - - $ pip install launchdarkly-server-sdk-ai - -The provided `TracingHook` can be setup as shown below: - - - -```python -import ldclient - -``` +Refer to the [SDK reference guide](https://docs.launchdarkly.com/sdk/ai/python) for instructions on getting started with using the SDK. ## Learn more diff --git a/ldai/client.py b/ldai/client.py index e90ce9c..1dbdfb6 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -28,18 +28,18 @@ def model_config(self, key: str, context: Context, default_value: AIConfig, vari if variables: all_variables.update(variables) all_variables['ldctx'] = context - - if isinstance(variation['prompt'], list) and all(isinstance(entry, dict) for entry in variation['prompt']): - variation['prompt'] = [ - { - 'role': entry['role'], - 'content': self.interpolate_template(entry['content'], all_variables) - } - for entry in variation['prompt'] - ] + print(variation) + #if isinstance(variation['prompt'], list) and all(isinstance(entry, dict) for entry in variation['prompt']): + variation['prompt'] = [ + { + 'role': entry['role'], + 'content': self.interpolate_template(entry['content'], all_variables) + } + for entry in variation['prompt'] + ] enabled = variation.get('_ldMeta',{}).get('enabled', False) - return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation.get('_ldMeta', {}).get('versionKey', ''), key, context, bool(enabled))) + return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation.get('_ldMeta', {}).get('versionKey', ''), key, context), enabled=bool(enabled)) def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: """ From de02914791f342ebf50a9918d65e7e0196c95280 Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 09:08:42 -0500 Subject: [PATCH 7/9] add back missing dataclass remove UnderscoreToken class to match TypeScript --- ldai/tracker.py | 4 ++-- ldai/types.py | 14 +------------- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/ldai/tracker.py b/ldai/tracker.py index e2c8bbc..f7bfc51 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -1,7 +1,7 @@ import time from typing import Dict, Union from ldclient import Context, LDClient -from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage, UnderscoreTokenUsage +from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage class LDAIConfigTracker: def __init__(self, ld_client: LDClient, version_key: str, config_key: str, context: Context): @@ -55,7 +55,7 @@ def track_bedrock_converse(self, res: dict) -> dict: self.track_tokens(BedrockTokenUsage(res['usage'])) return res - def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None: + def track_tokens(self, tokens: Union[TokenUsage, BedrockTokenUsage]) -> None: token_metrics = tokens.to_metrics() if token_metrics['total'] > 0: self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics['total']) diff --git a/ldai/types.py b/ldai/types.py index d4b9f39..8c01094 100644 --- a/ldai/types.py +++ b/ldai/types.py @@ -32,6 +32,7 @@ class FeedbackKind(Enum): Positive = "positive" Negative = "negative" +@dataclass class TokenUsage(): total_tokens: int prompt_tokens: int @@ -58,19 +59,6 @@ def to_metrics(self) -> TokenMetrics: 'output': self.completion_tokens, } -class UnderscoreTokenUsage: - def __init__(self, data: dict): - self.total_tokens = data.get('total_tokens', 0) - self.prompt_tokens = data.get('prompt_tokens', 0) - self.completion_tokens = data.get('completion_tokens', 0) - - def to_metrics(self) -> TokenMetrics: - return { - 'total': self.total_tokens, - 'input': self.prompt_tokens, - 'output': self.completion_tokens, - } - class BedrockTokenUsage: def __init__(self, data: dict): self.totalTokens = data.get('totalTokens', 0) From 60f7353544877eee5fb298a2ca4e2ef832165b5a Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 09:09:15 -0500 Subject: [PATCH 8/9] fix up types --- ldai/types.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ldai/types.py b/ldai/types.py index 8c01094..f51ae56 100644 --- a/ldai/types.py +++ b/ldai/types.py @@ -59,6 +59,7 @@ def to_metrics(self) -> TokenMetrics: 'output': self.completion_tokens, } +@dataclass class BedrockTokenUsage: def __init__(self, data: dict): self.totalTokens = data.get('totalTokens', 0) From 005500805f47a0d36d60ae3689969f4cf044fc73 Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 09:10:10 -0500 Subject: [PATCH 9/9] fix project url --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 38d5243..e90bcc0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ description = "LaunchDarkly SDK for AI" authors = ["LaunchDarkly "] license = "Apache-2.0" readme = "README.md" -homepage = "https://docs.launchdarkly.com/sdk/server-side/python-ai" +homepage = "https://docs.launchdarkly.com/sdk/ai/python" repository = "https://github.com/launchdarkly/python-server-sdk-ai" documentation = "https://launchdarkly-python-sdk-ai.readthedocs.io/en/latest/" classifiers = [