Skip to content

Commit cae7952

Browse files
committed
fix lint
1 parent 63b1d9e commit cae7952

File tree

14 files changed

+143
-166
lines changed

14 files changed

+143
-166
lines changed

ldai/__init__.py

Lines changed: 8 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,17 @@
11
__version__ = "0.10.1" # x-release-please-version
22

33
# Export main client
4+
# Export chat
5+
from ldai.chat import TrackedChat
46
from ldai.client import LDAIClient
5-
6-
# Export models for convenience
7-
from ldai.models import (
8-
AIAgentConfig,
9-
AIAgentConfigDefault,
10-
AIAgentConfigRequest,
11-
AIAgents,
12-
AICompletionConfig,
13-
AICompletionConfigDefault,
14-
AIJudgeConfig,
15-
AIJudgeConfigDefault,
16-
JudgeConfiguration,
17-
LDMessage,
18-
ModelConfig,
19-
ProviderConfig,
20-
# Deprecated aliases for backward compatibility
21-
AIConfig,
22-
LDAIAgent,
23-
LDAIAgentConfig,
24-
LDAIAgentDefaults,
25-
)
26-
277
# Export judge
288
from ldai.judge import AIJudge
29-
30-
# Export chat
31-
from ldai.chat import TrackedChat
32-
9+
# Export models for convenience
10+
from ldai.models import ( # Deprecated aliases for backward compatibility
11+
AIAgentConfig, AIAgentConfigDefault, AIAgentConfigRequest, AIAgents,
12+
AICompletionConfig, AICompletionConfigDefault, AIConfig, AIJudgeConfig,
13+
AIJudgeConfigDefault, JudgeConfiguration, LDAIAgent, LDAIAgentConfig,
14+
LDAIAgentDefaults, LDMessage, ModelConfig, ProviderConfig)
3315
# Export judge types
3416
from ldai.providers.types import EvalScore, JudgeResponse
3517

ldai/chat/__init__.py

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -3,18 +3,18 @@
33
import asyncio
44
from typing import Any, Dict, List, Optional
55

6+
from ldai.judge import AIJudge
67
from ldai.models import AICompletionConfig, LDMessage
78
from ldai.providers.ai_provider import AIProvider
89
from ldai.providers.types import ChatResponse, JudgeResponse
9-
from ldai.judge import AIJudge
1010
from ldai.tracker import LDAIConfigTracker
1111

1212

1313
class TrackedChat:
1414
"""
1515
Concrete implementation of TrackedChat that provides chat functionality
1616
by delegating to an AIProvider implementation.
17-
17+
1818
This class handles conversation management and tracking, while delegating
1919
the actual model invocation to the provider.
2020
"""
@@ -29,7 +29,7 @@ def __init__(
2929
):
3030
"""
3131
Initialize the TrackedChat.
32-
32+
3333
:param ai_config: The completion AI configuration
3434
:param tracker: The tracker for the completion configuration
3535
:param provider: The AI provider to use for chat
@@ -46,9 +46,9 @@ def __init__(
4646
async def invoke(self, prompt: str) -> ChatResponse:
4747
"""
4848
Invoke the chat model with a prompt string.
49-
49+
5050
This method handles conversation management and tracking, delegating to the provider's invoke_model method.
51-
51+
5252
:param prompt: The user prompt to send to the chat model
5353
:return: ChatResponse containing the model's response and metrics
5454
"""
@@ -86,9 +86,9 @@ def _start_judge_evaluations(
8686
) -> List[asyncio.Task[Optional[JudgeResponse]]]:
8787
"""
8888
Start judge evaluations as async tasks without awaiting them.
89-
89+
9090
Returns a list of async tasks that can be awaited later.
91-
91+
9292
:param messages: Array of messages representing the conversation history
9393
:param response: The AI response to be evaluated
9494
:return: List of async tasks that will return judge evaluation results
@@ -122,60 +122,60 @@ async def evaluate_judge(judge_config):
122122
asyncio.create_task(evaluate_judge(judge_config))
123123
for judge_config in judge_configs
124124
]
125-
125+
126126
return tasks
127127

128128
def get_config(self) -> AICompletionConfig:
129129
"""
130130
Get the underlying AI configuration used to initialize this TrackedChat.
131-
131+
132132
:return: The AI completion configuration
133133
"""
134134
return self._ai_config
135135

136136
def get_tracker(self) -> LDAIConfigTracker:
137137
"""
138138
Get the underlying AI configuration tracker used to initialize this TrackedChat.
139-
139+
140140
:return: The tracker instance
141141
"""
142142
return self._tracker
143143

144144
def get_provider(self) -> AIProvider:
145145
"""
146146
Get the underlying AI provider instance.
147-
147+
148148
This provides direct access to the provider for advanced use cases.
149-
149+
150150
:return: The AI provider instance
151151
"""
152152
return self._provider
153153

154154
def get_judges(self) -> Dict[str, AIJudge]:
155155
"""
156156
Get the judges associated with this TrackedChat.
157-
157+
158158
Returns a dictionary of judge instances keyed by their configuration keys.
159-
159+
160160
:return: Dictionary of judge instances
161161
"""
162162
return self._judges
163163

164164
def append_messages(self, messages: List[LDMessage]) -> None:
165165
"""
166166
Append messages to the conversation history.
167-
167+
168168
Adds messages to the conversation history without invoking the model,
169169
which is useful for managing multi-turn conversations or injecting context.
170-
170+
171171
:param messages: Array of messages to append to the conversation history
172172
"""
173173
self._messages.extend(messages)
174174

175175
def get_messages(self, include_config_messages: bool = False) -> List[LDMessage]:
176176
"""
177177
Get all messages in the conversation history.
178-
178+
179179
:param include_config_messages: Whether to include the config messages from the AIConfig.
180180
Defaults to False.
181181
:return: Array of messages. When include_config_messages is True, returns both config
@@ -186,4 +186,3 @@ def get_messages(self, include_config_messages: bool = False) -> List[LDMessage]
186186
config_messages = self._ai_config.messages or []
187187
return config_messages + self._messages
188188
return list(self._messages)
189-

ldai/client.py

Lines changed: 15 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -7,21 +7,13 @@
77

88
from ldai.chat import TrackedChat
99
from ldai.judge import AIJudge
10-
from ldai.models import (
11-
AIAgentConfig,
12-
AIAgentConfigDefault,
13-
AIAgentConfigRequest,
14-
AIAgents,
15-
AICompletionConfig,
16-
AICompletionConfigDefault,
17-
AIJudgeConfig,
18-
AIJudgeConfigDefault,
19-
JudgeConfiguration,
20-
LDMessage,
21-
ModelConfig,
22-
ProviderConfig,
23-
)
24-
from ldai.providers.ai_provider_factory import AIProviderFactory, SupportedAIProvider
10+
from ldai.models import (AIAgentConfig, AIAgentConfigDefault,
11+
AIAgentConfigRequest, AIAgents, AICompletionConfig,
12+
AICompletionConfigDefault, AIJudgeConfig,
13+
AIJudgeConfigDefault, JudgeConfiguration, LDMessage,
14+
ModelConfig, ProviderConfig)
15+
from ldai.providers.ai_provider_factory import (AIProviderFactory,
16+
SupportedAIProvider)
2517
from ldai.tracker import LDAIConfigTracker
2618

2719

@@ -204,15 +196,15 @@ async def _initialize_judges(
204196
) -> Dict[str, AIJudge]:
205197
"""
206198
Initialize judges from judge configurations.
207-
199+
208200
:param judge_configs: List of judge configurations
209201
:param context: Standard Context used when evaluating flags
210202
:param variables: Dictionary of values for instruction interpolation
211203
:param default_ai_provider: Optional default AI provider to use
212204
:return: Dictionary of judge instances keyed by their configuration keys
213205
"""
214206
judges: Dict[str, AIJudge] = {}
215-
207+
216208
async def create_judge_for_config(judge_key: str):
217209
judge = await self.create_judge(
218210
judge_key,
@@ -222,22 +214,22 @@ async def create_judge_for_config(judge_key: str):
222214
default_ai_provider,
223215
)
224216
return judge_key, judge
225-
217+
226218
judge_promises = [
227219
create_judge_for_config(judge_config.key)
228220
for judge_config in judge_configs
229221
]
230-
222+
231223
import asyncio
232224
results = await asyncio.gather(*judge_promises, return_exceptions=True)
233-
225+
234226
for result in results:
235227
if isinstance(result, Exception):
236228
continue
237-
judge_key, judge = result
229+
judge_key, judge = result # type: ignore[misc]
238230
if judge:
239231
judges[judge_key] = judge
240-
232+
241233
return judges
242234

243235
async def create_chat(
@@ -275,7 +267,7 @@ async def create_chat(
275267
if chat:
276268
response = await chat.invoke("I need help with my order")
277269
print(response.message.content)
278-
270+
279271
# Access conversation history
280272
messages = chat.get_messages()
281273
print(f"Conversation has {len(messages)} messages")

ldai/judge/__init__.py

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,17 +5,18 @@
55

66
import chevron
77

8+
from ldai.judge.evaluation_schema_builder import EvaluationSchemaBuilder
89
from ldai.models import AIJudgeConfig, LDMessage
910
from ldai.providers.ai_provider import AIProvider
10-
from ldai.providers.types import ChatResponse, EvalScore, JudgeResponse, StructuredResponse
11+
from ldai.providers.types import (ChatResponse, EvalScore, JudgeResponse,
12+
StructuredResponse)
1113
from ldai.tracker import LDAIConfigTracker
12-
from ldai.judge.evaluation_schema_builder import EvaluationSchemaBuilder
1314

1415

1516
class AIJudge:
1617
"""
1718
Judge implementation that handles evaluation functionality and conversation management.
18-
19+
1920
According to the AIEval spec, judges are AI Configs with mode: "judge" that evaluate
2021
other AI Configs using structured output.
2122
"""
@@ -29,7 +30,7 @@ def __init__(
2930
):
3031
"""
3132
Initialize the Judge.
32-
33+
3334
:param ai_config: The judge AI configuration
3435
:param ai_config_tracker: The tracker for the judge configuration
3536
:param ai_provider: The AI provider to use for evaluation
@@ -51,7 +52,7 @@ async def evaluate(
5152
) -> Optional[JudgeResponse]:
5253
"""
5354
Evaluates an AI response using the judge's configuration.
54-
55+
5556
:param input_text: The input prompt or question that was provided to the AI
5657
:param output_text: The AI-generated response to be evaluated
5758
:param sampling_rate: Sampling rate (0-1) to determine if evaluation should be processed (defaults to 1)
@@ -113,7 +114,7 @@ async def evaluate_messages(
113114
) -> Optional[JudgeResponse]:
114115
"""
115116
Evaluates an AI response from chat messages and response.
116-
117+
117118
:param messages: Array of messages representing the conversation history
118119
:param response: The AI response to be evaluated
119120
:param sampling_ratio: Sampling ratio (0-1) to determine if evaluation should be processed (defaults to 1)
@@ -127,31 +128,31 @@ async def evaluate_messages(
127128
def get_ai_config(self) -> AIJudgeConfig:
128129
"""
129130
Returns the AI Config used by this judge.
130-
131+
131132
:return: The judge AI configuration
132133
"""
133134
return self._ai_config
134135

135136
def get_tracker(self) -> LDAIConfigTracker:
136137
"""
137138
Returns the tracker associated with this judge.
138-
139+
139140
:return: The tracker for the judge configuration
140141
"""
141142
return self._ai_config_tracker
142143

143144
def get_provider(self) -> AIProvider:
144145
"""
145146
Returns the AI provider used by this judge.
146-
147+
147148
:return: The AI provider
148149
"""
149150
return self._ai_provider
150151

151152
def _construct_evaluation_messages(self, input_text: str, output_text: str) -> list[LDMessage]:
152153
"""
153154
Constructs evaluation messages by combining judge's config messages with input/output.
154-
155+
155156
:param input_text: The input text
156157
:param output_text: The output text to evaluate
157158
:return: List of messages for evaluation
@@ -173,7 +174,7 @@ def _construct_evaluation_messages(self, input_text: str, output_text: str) -> l
173174
def _interpolate_message(self, content: str, variables: Dict[str, str]) -> str:
174175
"""
175176
Interpolates message content with variables using Mustache templating.
176-
177+
177178
:param content: The message content template
178179
:param variables: Variables to interpolate
179180
:return: Interpolated message content
@@ -184,7 +185,7 @@ def _interpolate_message(self, content: str, variables: Dict[str, str]) -> str:
184185
def _parse_evaluation_response(self, data: Dict[str, Any]) -> Dict[str, EvalScore]:
185186
"""
186187
Parses the structured evaluation response from the AI provider.
187-
188+
188189
:param data: The structured response data
189190
:return: Dictionary of evaluation scores keyed by metric key
190191
"""
@@ -227,5 +228,3 @@ def _parse_evaluation_response(self, data: Dict[str, Any]) -> Dict[str, EvalScor
227228
results[metric_key] = EvalScore(score=float(score), reasoning=reasoning)
228229

229230
return results
230-
231-

ldai/judge/evaluation_schema_builder.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ class EvaluationSchemaBuilder:
1313
def build(evaluation_metric_keys: list[str]) -> Dict[str, Any]:
1414
"""
1515
Build an evaluation response schema from evaluation metric keys.
16-
16+
1717
:param evaluation_metric_keys: List of evaluation metric keys
1818
:return: Schema dictionary for structured output
1919
"""
@@ -38,7 +38,7 @@ def build(evaluation_metric_keys: list[str]) -> Dict[str, Any]:
3838
def _build_key_properties(evaluation_metric_keys: list[str]) -> Dict[str, Any]:
3939
"""
4040
Build properties for each evaluation metric key.
41-
41+
4242
:param evaluation_metric_keys: List of evaluation metric keys
4343
:return: Dictionary of properties for each key
4444
"""
@@ -51,7 +51,7 @@ def _build_key_properties(evaluation_metric_keys: list[str]) -> Dict[str, Any]:
5151
def _build_key_schema(key: str) -> Dict[str, Any]:
5252
"""
5353
Build schema for a single evaluation metric key.
54-
54+
5555
:param key: Evaluation metric key
5656
:return: Schema dictionary for the key
5757
"""
@@ -72,4 +72,3 @@ def _build_key_schema(key: str) -> Dict[str, Any]:
7272
'required': ['score', 'reasoning'],
7373
'additionalProperties': False,
7474
}
75-

0 commit comments

Comments
 (0)