Skip to content

Commit 55e0da3

Browse files
authored
fix: Enable support for existing AI Providers (#80)
fix: Order of providers is now preserved fix!: Removed SupportedAIProvider type fix: Use the ldclient logger feat: Re-export ldclient log in ldai fix!: Removed optional logger parameters
1 parent 1ebac50 commit 55e0da3

File tree

7 files changed

+94
-189
lines changed

7 files changed

+94
-189
lines changed

packages/sdk/server-ai/src/ldai/__init__.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,15 @@
11
__version__ = "0.11.0" # x-release-please-version
22

3-
# Export main client
4-
# Export chat
3+
from ldclient import log
4+
55
from ldai.chat import Chat
66
from ldai.client import LDAIClient
7-
# Export judge
87
from ldai.judge import Judge
9-
# Export models for convenience
108
from ldai.models import ( # Deprecated aliases for backward compatibility
119
AIAgentConfig, AIAgentConfigDefault, AIAgentConfigRequest, AIAgents,
1210
AICompletionConfig, AICompletionConfigDefault, AIConfig, AIJudgeConfig,
1311
AIJudgeConfigDefault, JudgeConfiguration, LDAIAgent, LDAIAgentConfig,
1412
LDAIAgentDefaults, LDMessage, ModelConfig, ProviderConfig)
15-
# Export judge types
1613
from ldai.providers.types import EvalScore, JudgeResponse
1714

1815
__all__ = [
@@ -25,14 +22,15 @@
2522
'AICompletionConfigDefault',
2623
'AIJudgeConfig',
2724
'AIJudgeConfigDefault',
28-
'Judge',
2925
'Chat',
3026
'EvalScore',
27+
'Judge',
3128
'JudgeConfiguration',
3229
'JudgeResponse',
3330
'LDMessage',
3431
'ModelConfig',
3532
'ProviderConfig',
33+
'log',
3634
# Deprecated exports
3735
'AIConfig',
3836
'LDAIAgent',

packages/sdk/server-ai/src/ldai/chat/__init__.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import asyncio
44
from typing import Any, Dict, List, Optional
55

6+
from ldai import log
67
from ldai.judge import Judge
78
from ldai.models import AICompletionConfig, LDMessage
89
from ldai.providers.ai_provider import AIProvider
@@ -25,7 +26,6 @@ def __init__(
2526
tracker: LDAIConfigTracker,
2627
provider: AIProvider,
2728
judges: Optional[Dict[str, Judge]] = None,
28-
logger: Optional[Any] = None,
2929
):
3030
"""
3131
Initialize the Chat.
@@ -34,13 +34,11 @@ def __init__(
3434
:param tracker: The tracker for the completion configuration
3535
:param provider: The AI provider to use for chat
3636
:param judges: Optional dictionary of judge instances keyed by their configuration keys
37-
:param logger: Optional logger for logging
3837
"""
3938
self._ai_config = ai_config
4039
self._tracker = tracker
4140
self._provider = provider
4241
self._judges = judges or {}
43-
self._logger = logger
4442
self._messages: List[LDMessage] = []
4543

4644
async def invoke(self, prompt: str) -> ChatResponse:
@@ -101,10 +99,9 @@ def _start_judge_evaluations(
10199
async def evaluate_judge(judge_config):
102100
judge = self._judges.get(judge_config.key)
103101
if not judge:
104-
if self._logger:
105-
self._logger.warn(
106-
f"Judge configuration is not enabled: {judge_config.key}",
107-
)
102+
log.warn(
103+
f"Judge configuration is not enabled: {judge_config.key}",
104+
)
108105
return None
109106

110107
eval_result = await judge.evaluate_messages(

packages/sdk/server-ai/src/ldai/client.py

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,18 @@
1-
import logging
21
from typing import Any, Dict, List, Optional, Tuple
32

43
import chevron
54
from ldclient import Context
65
from ldclient.client import LDClient
76

7+
from ldai import log
88
from ldai.chat import Chat
99
from ldai.judge import Judge
1010
from ldai.models import (AIAgentConfig, AIAgentConfigDefault,
1111
AIAgentConfigRequest, AIAgents, AICompletionConfig,
1212
AICompletionConfigDefault, AIJudgeConfig,
1313
AIJudgeConfigDefault, JudgeConfiguration, LDMessage,
1414
ModelConfig, ProviderConfig)
15-
from ldai.providers.ai_provider_factory import (AIProviderFactory,
16-
SupportedAIProvider)
15+
from ldai.providers.ai_provider_factory import AIProviderFactory
1716
from ldai.tracker import LDAIConfigTracker
1817

1918

@@ -22,7 +21,6 @@ class LDAIClient:
2221

2322
def __init__(self, client: LDClient):
2423
self._client = client
25-
self._logger = logging.getLogger('ldclient.ai')
2624

2725
def completion_config(
2826
self,
@@ -122,7 +120,7 @@ async def create_judge(
122120
context: Context,
123121
default_value: AIJudgeConfigDefault,
124122
variables: Optional[Dict[str, Any]] = None,
125-
default_ai_provider: Optional[SupportedAIProvider] = None,
123+
default_ai_provider: Optional[str] = None,
126124
) -> Optional[Judge]:
127125
"""
128126
Creates and returns a new Judge instance for AI evaluation.
@@ -180,11 +178,11 @@ async def create_judge(
180178
return None
181179

182180
# Create AI provider for the judge
183-
provider = await AIProviderFactory.create(judge_config, self._logger, default_ai_provider)
181+
provider = await AIProviderFactory.create(judge_config, default_ai_provider)
184182
if not provider:
185183
return None
186184

187-
return Judge(judge_config, judge_config.tracker, provider, self._logger)
185+
return Judge(judge_config, judge_config.tracker, provider)
188186
except Exception as error:
189187
# Would log error if logger available
190188
return None
@@ -194,7 +192,7 @@ async def _initialize_judges(
194192
judge_configs: List[JudgeConfiguration.Judge],
195193
context: Context,
196194
variables: Optional[Dict[str, Any]] = None,
197-
default_ai_provider: Optional[SupportedAIProvider] = None,
195+
default_ai_provider: Optional[str] = None,
198196
) -> Dict[str, Judge]:
199197
"""
200198
Initialize judges from judge configurations.
@@ -240,7 +238,7 @@ async def create_chat(
240238
context: Context,
241239
default_value: AICompletionConfigDefault,
242240
variables: Optional[Dict[str, Any]] = None,
243-
default_ai_provider: Optional[SupportedAIProvider] = None,
241+
default_ai_provider: Optional[str] = None,
244242
) -> Optional[Chat]:
245243
"""
246244
Creates and returns a new Chat instance for AI conversations.
@@ -275,15 +273,14 @@ async def create_chat(
275273
print(f"Conversation has {len(messages)} messages")
276274
"""
277275
self._client.track('$ld:ai:config:function:createChat', context, key, 1)
278-
if self._logger:
279-
self._logger.debug(f"Creating chat for key: {key}")
276+
log.debug(f"Creating chat for key: {key}")
280277
config = self.completion_config(key, context, default_value, variables)
281278

282279
if not config.enabled or not config.tracker:
283280
# Would log info if logger available
284281
return None
285282

286-
provider = await AIProviderFactory.create(config, self._logger, default_ai_provider)
283+
provider = await AIProviderFactory.create(config, default_ai_provider)
287284
if not provider:
288285
return None
289286

@@ -296,7 +293,7 @@ async def create_chat(
296293
default_ai_provider,
297294
)
298295

299-
return Chat(config, config.tracker, provider, judges, self._logger)
296+
return Chat(config, config.tracker, provider, judges)
300297

301298
def agent_config(
302299
self,

packages/sdk/server-ai/src/ldai/judge/__init__.py

Lines changed: 18 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
import chevron
77

8+
from ldai import log
89
from ldai.judge.evaluation_schema_builder import EvaluationSchemaBuilder
910
from ldai.models import AIJudgeConfig, LDMessage
1011
from ldai.providers.ai_provider import AIProvider
@@ -26,20 +27,17 @@ def __init__(
2627
ai_config: AIJudgeConfig,
2728
ai_config_tracker: LDAIConfigTracker,
2829
ai_provider: AIProvider,
29-
logger: Optional[Any] = None,
3030
):
3131
"""
3232
Initialize the Judge.
3333
3434
:param ai_config: The judge AI configuration
3535
:param ai_config_tracker: The tracker for the judge configuration
3636
:param ai_provider: The AI provider to use for evaluation
37-
:param logger: Optional logger for logging
3837
"""
3938
self._ai_config = ai_config
4039
self._ai_config_tracker = ai_config_tracker
4140
self._ai_provider = ai_provider
42-
self._logger = logger
4341
self._evaluation_response_structure = EvaluationSchemaBuilder.build(
4442
ai_config.evaluation_metric_keys
4543
)
@@ -60,20 +58,17 @@ async def evaluate(
6058
"""
6159
try:
6260
if not self._ai_config.evaluation_metric_keys or len(self._ai_config.evaluation_metric_keys) == 0:
63-
if self._logger:
64-
self._logger.warn(
65-
'Judge configuration is missing required evaluationMetricKeys'
66-
)
61+
log.warn(
62+
'Judge configuration is missing required evaluationMetricKeys'
63+
)
6764
return None
6865

6966
if not self._ai_config.messages:
70-
if self._logger:
71-
self._logger.warn('Judge configuration must include messages')
67+
log.warn('Judge configuration must include messages')
7268
return None
7369

7470
if random.random() > sampling_rate:
75-
if self._logger:
76-
self._logger.debug(f'Judge evaluation skipped due to sampling rate: {sampling_rate}')
71+
log.debug(f'Judge evaluation skipped due to sampling rate: {sampling_rate}')
7772
return None
7873

7974
messages = self._construct_evaluation_messages(input_text, output_text)
@@ -89,8 +84,7 @@ async def evaluate(
8984
evals = self._parse_evaluation_response(response.data)
9085

9186
if len(evals) != len(self._ai_config.evaluation_metric_keys):
92-
if self._logger:
93-
self._logger.warn('Judge evaluation did not return all evaluations')
87+
log.warn('Judge evaluation did not return all evaluations')
9488
success = False
9589

9690
return JudgeResponse(
@@ -99,8 +93,7 @@ async def evaluate(
9993
success=success,
10094
)
10195
except Exception as error:
102-
if self._logger:
103-
self._logger.error(f'Judge evaluation failed: {error}')
96+
log.error(f'Judge evaluation failed: {error}')
10497
return JudgeResponse(
10598
evals={},
10699
success=False,
@@ -193,8 +186,7 @@ def _parse_evaluation_response(self, data: Dict[str, Any]) -> Dict[str, EvalScor
193186
results: Dict[str, EvalScore] = {}
194187

195188
if not data.get('evaluations') or not isinstance(data['evaluations'], dict):
196-
if self._logger:
197-
self._logger.warn('Invalid response: missing or invalid evaluations object')
189+
log.warn('Invalid response: missing or invalid evaluations object')
198190
return results
199191

200192
evaluations = data['evaluations']
@@ -203,27 +195,24 @@ def _parse_evaluation_response(self, data: Dict[str, Any]) -> Dict[str, EvalScor
203195
evaluation = evaluations.get(metric_key)
204196

205197
if not evaluation or not isinstance(evaluation, dict):
206-
if self._logger:
207-
self._logger.warn(f'Missing evaluation for metric key: {metric_key}')
198+
log.warn(f'Missing evaluation for metric key: {metric_key}')
208199
continue
209200

210201
score = evaluation.get('score')
211202
reasoning = evaluation.get('reasoning')
212203

213204
if not isinstance(score, (int, float)) or score < 0 or score > 1:
214-
if self._logger:
215-
self._logger.warn(
216-
f'Invalid score evaluated for {metric_key}: {score}. '
217-
'Score must be a number between 0 and 1 inclusive'
218-
)
205+
log.warn(
206+
f'Invalid score evaluated for {metric_key}: {score}. '
207+
'Score must be a number between 0 and 1 inclusive'
208+
)
219209
continue
220210

221211
if not isinstance(reasoning, str):
222-
if self._logger:
223-
self._logger.warn(
224-
f'Invalid reasoning evaluated for {metric_key}: {reasoning}. '
225-
'Reasoning must be a string'
226-
)
212+
log.warn(
213+
f'Invalid reasoning evaluated for {metric_key}: {reasoning}. '
214+
'Reasoning must be a string'
215+
)
227216
continue
228217

229218
results[metric_key] = EvalScore(score=float(score), reasoning=reasoning)
Lines changed: 1 addition & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,9 @@
11
"""AI Provider interfaces and factory for LaunchDarkly AI SDK."""
22

33
from ldai.providers.ai_provider import AIProvider
4-
from ldai.providers.ai_provider_factory import (AIProviderFactory,
5-
SupportedAIProvider)
6-
7-
# Export LangChain provider if available
8-
# TODO: Uncomment when langchain provider package is introduced
9-
# try:
10-
# from ldai.providers.langchain import LangChainProvider
11-
# __all__ = [
12-
# 'AIProvider',
13-
# 'AIProviderFactory',
14-
# 'LangChainProvider',
15-
# 'SupportedAIProvider',
16-
# ]
17-
# except ImportError:
18-
# __all__ = [
19-
# 'AIProvider',
20-
# 'AIProviderFactory',
21-
# 'SupportedAIProvider',
22-
# ]
4+
from ldai.providers.ai_provider_factory import AIProviderFactory
235

246
__all__ = [
257
'AIProvider',
268
'AIProviderFactory',
27-
'SupportedAIProvider',
289
]

packages/sdk/server-ai/src/ldai/providers/ai_provider.py

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from abc import ABC, abstractmethod
44
from typing import Any, Dict, List, Optional, Union
55

6+
from ldai import log
67
from ldai.models import AIConfigKind, LDMessage
78
from ldai.providers.types import ChatResponse, StructuredResponse
89

@@ -18,14 +19,6 @@ class AIProvider(ABC):
1819
for better extensibility and backwards compatibility.
1920
"""
2021

21-
def __init__(self, logger: Optional[Any] = None):
22-
"""
23-
Initialize the AI provider.
24-
25-
:param logger: Optional logger for logging provider operations.
26-
"""
27-
self.logger = logger
28-
2922
async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse:
3023
"""
3124
Invoke the chat model with an array of messages.
@@ -39,8 +32,7 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse:
3932
:param messages: Array of LDMessage objects representing the conversation
4033
:return: ChatResponse containing the model's response
4134
"""
42-
if self.logger:
43-
self.logger.warn('invokeModel not implemented by this provider')
35+
log.warn('invokeModel not implemented by this provider')
4436

4537
from ldai.models import LDMessage
4638
from ldai.providers.types import LDAIMetrics
@@ -68,8 +60,7 @@ async def invoke_structured_model(
6860
:param response_structure: Dictionary of output configurations keyed by output name
6961
:return: StructuredResponse containing the structured data
7062
"""
71-
if self.logger:
72-
self.logger.warn('invokeStructuredModel not implemented by this provider')
63+
log.warn('invokeStructuredModel not implemented by this provider')
7364

7465
from ldai.providers.types import LDAIMetrics
7566

@@ -81,15 +72,14 @@ async def invoke_structured_model(
8172

8273
@staticmethod
8374
@abstractmethod
84-
async def create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> 'AIProvider':
75+
async def create(ai_config: AIConfigKind) -> 'AIProvider':
8576
"""
8677
Static method that constructs an instance of the provider.
8778
8879
Each provider implementation must provide their own static create method
8980
that accepts an AIConfigKind and returns a configured instance.
9081
9182
:param ai_config: The LaunchDarkly AI configuration
92-
:param logger: Optional logger for the provider
9383
:return: Configured provider instance
9484
"""
9585
raise NotImplementedError('Provider implementations must override the static create method')

0 commit comments

Comments
 (0)