Skip to content

Commit b63dbb5

Browse files
authored
feat: Add Chat and Judge supporting methods (#64)
1 parent d248fa3 commit b63dbb5

File tree

16 files changed

+1742
-243
lines changed

16 files changed

+1742
-243
lines changed

ldai/__init__.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,41 @@
11
__version__ = "0.10.1" # x-release-please-version
2+
3+
# Export main client
4+
# Export chat
5+
from ldai.chat import Chat
6+
from ldai.client import LDAIClient
7+
# Export judge
8+
from ldai.judge import Judge
9+
# Export models for convenience
10+
from ldai.models import ( # Deprecated aliases for backward compatibility
11+
AIAgentConfig, AIAgentConfigDefault, AIAgentConfigRequest, AIAgents,
12+
AICompletionConfig, AICompletionConfigDefault, AIConfig, AIJudgeConfig,
13+
AIJudgeConfigDefault, JudgeConfiguration, LDAIAgent, LDAIAgentConfig,
14+
LDAIAgentDefaults, LDMessage, ModelConfig, ProviderConfig)
15+
# Export judge types
16+
from ldai.providers.types import EvalScore, JudgeResponse
17+
18+
__all__ = [
19+
'LDAIClient',
20+
'AIAgentConfig',
21+
'AIAgentConfigDefault',
22+
'AIAgentConfigRequest',
23+
'AIAgents',
24+
'AICompletionConfig',
25+
'AICompletionConfigDefault',
26+
'AIJudgeConfig',
27+
'AIJudgeConfigDefault',
28+
'Judge',
29+
'Chat',
30+
'EvalScore',
31+
'JudgeConfiguration',
32+
'JudgeResponse',
33+
'LDMessage',
34+
'ModelConfig',
35+
'ProviderConfig',
36+
# Deprecated exports
37+
'AIConfig',
38+
'LDAIAgent',
39+
'LDAIAgentConfig',
40+
'LDAIAgentDefaults',
41+
]

ldai/chat/__init__.py

Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
"""Chat implementation for managing AI chat conversations."""
2+
3+
import asyncio
4+
from typing import Any, Dict, List, Optional
5+
6+
from ldai.judge import Judge
7+
from ldai.models import AICompletionConfig, LDMessage
8+
from ldai.providers.ai_provider import AIProvider
9+
from ldai.providers.types import ChatResponse, JudgeResponse
10+
from ldai.tracker import LDAIConfigTracker
11+
12+
13+
class Chat:
14+
"""
15+
Concrete implementation of Chat that provides chat functionality
16+
by delegating to an AIProvider implementation.
17+
18+
This class handles conversation management and tracking, while delegating
19+
the actual model invocation to the provider.
20+
"""
21+
22+
def __init__(
23+
self,
24+
ai_config: AICompletionConfig,
25+
tracker: LDAIConfigTracker,
26+
provider: AIProvider,
27+
judges: Optional[Dict[str, Judge]] = None,
28+
logger: Optional[Any] = None,
29+
):
30+
"""
31+
Initialize the Chat.
32+
33+
:param ai_config: The completion AI configuration
34+
:param tracker: The tracker for the completion configuration
35+
:param provider: The AI provider to use for chat
36+
:param judges: Optional dictionary of judge instances keyed by their configuration keys
37+
:param logger: Optional logger for logging
38+
"""
39+
self._ai_config = ai_config
40+
self._tracker = tracker
41+
self._provider = provider
42+
self._judges = judges or {}
43+
self._logger = logger
44+
self._messages: List[LDMessage] = []
45+
46+
async def invoke(self, prompt: str) -> ChatResponse:
47+
"""
48+
Invoke the chat model with a prompt string.
49+
50+
This method handles conversation management and tracking, delegating to the provider's invoke_model method.
51+
52+
:param prompt: The user prompt to send to the chat model
53+
:return: ChatResponse containing the model's response and metrics
54+
"""
55+
# Convert prompt string to LDMessage with role 'user' and add to conversation history
56+
user_message: LDMessage = LDMessage(role='user', content=prompt)
57+
self._messages.append(user_message)
58+
59+
# Prepend config messages to conversation history for model invocation
60+
config_messages = self._ai_config.messages or []
61+
all_messages = config_messages + self._messages
62+
63+
# Delegate to provider-specific implementation with tracking
64+
response = await self._tracker.track_metrics_of(
65+
lambda: self._provider.invoke_model(all_messages),
66+
lambda result: result.metrics,
67+
)
68+
69+
# Start judge evaluations as async tasks (don't await them)
70+
if (
71+
self._ai_config.judge_configuration
72+
and self._ai_config.judge_configuration.judges
73+
and len(self._ai_config.judge_configuration.judges) > 0
74+
):
75+
response.evaluations = self._start_judge_evaluations(self._messages, response)
76+
77+
# Add the response message to conversation history
78+
self._messages.append(response.message)
79+
return response
80+
81+
def _start_judge_evaluations(
82+
self,
83+
messages: List[LDMessage],
84+
response: ChatResponse,
85+
) -> List[asyncio.Task[Optional[JudgeResponse]]]:
86+
"""
87+
Start judge evaluations as async tasks without awaiting them.
88+
89+
Returns a list of async tasks that can be awaited later.
90+
91+
:param messages: Array of messages representing the conversation history
92+
:param response: The AI response to be evaluated
93+
:return: List of async tasks that will return judge evaluation results
94+
"""
95+
if not self._ai_config.judge_configuration or not self._ai_config.judge_configuration.judges:
96+
return []
97+
98+
judge_configs = self._ai_config.judge_configuration.judges
99+
100+
# Start all judge evaluations as tasks
101+
async def evaluate_judge(judge_config):
102+
judge = self._judges.get(judge_config.key)
103+
if not judge:
104+
if self._logger:
105+
self._logger.warn(
106+
f"Judge configuration is not enabled: {judge_config.key}",
107+
)
108+
return None
109+
110+
eval_result = await judge.evaluate_messages(
111+
messages, response, judge_config.sampling_rate
112+
)
113+
114+
if eval_result and eval_result.success:
115+
self._tracker.track_judge_response(eval_result)
116+
117+
return eval_result
118+
119+
# Create tasks for each judge evaluation
120+
tasks = [
121+
asyncio.create_task(evaluate_judge(judge_config))
122+
for judge_config in judge_configs
123+
]
124+
125+
return tasks
126+
127+
def get_config(self) -> AICompletionConfig:
128+
"""
129+
Get the underlying AI configuration used to initialize this Chat.
130+
131+
:return: The AI completion configuration
132+
"""
133+
return self._ai_config
134+
135+
def get_tracker(self) -> LDAIConfigTracker:
136+
"""
137+
Get the underlying AI configuration tracker used to initialize this Chat.
138+
139+
:return: The tracker instance
140+
"""
141+
return self._tracker
142+
143+
def get_provider(self) -> AIProvider:
144+
"""
145+
Get the underlying AI provider instance.
146+
147+
This provides direct access to the provider for advanced use cases.
148+
149+
:return: The AI provider instance
150+
"""
151+
return self._provider
152+
153+
def get_judges(self) -> Dict[str, Judge]:
154+
"""
155+
Get the judges associated with this Chat.
156+
157+
Returns a dictionary of judge instances keyed by their configuration keys.
158+
159+
:return: Dictionary of judge instances
160+
"""
161+
return self._judges
162+
163+
def append_messages(self, messages: List[LDMessage]) -> None:
164+
"""
165+
Append messages to the conversation history.
166+
167+
Adds messages to the conversation history without invoking the model,
168+
which is useful for managing multi-turn conversations or injecting context.
169+
170+
:param messages: Array of messages to append to the conversation history
171+
"""
172+
self._messages.extend(messages)
173+
174+
def get_messages(self, include_config_messages: bool = False) -> List[LDMessage]:
175+
"""
176+
Get all messages in the conversation history.
177+
178+
:param include_config_messages: Whether to include the config messages from the AIConfig.
179+
Defaults to False.
180+
:return: Array of messages. When include_config_messages is True, returns both config
181+
messages and conversation history with config messages prepended. When False,
182+
returns only the conversation history messages.
183+
"""
184+
if include_config_messages:
185+
config_messages = self._ai_config.messages or []
186+
return config_messages + self._messages
187+
return list(self._messages)

0 commit comments

Comments
 (0)