1+ import json
12from pydantic import BaseModel
23import logging
3- import ollama
4- import openai
54
65from abc import ABC , abstractmethod
76
@@ -87,6 +86,13 @@ def execute(self, prompt: str) -> str:
8786
8887 Handles its own multi-turn conversation state.
8988 """
89+ try :
90+ import ollama
91+ except ImportError :
92+ raise ImportError (
93+ "Ollama is not installed. Please install it with: pip install talkpipe[ollama]"
94+ )
95+
9096 logger .debug (f"Adding user message to chat history: { prompt } " )
9197 self ._messages .append ({"role" : "user" , "content" : prompt })
9298
@@ -117,6 +123,13 @@ def is_available(self) -> bool:
117123 Returns:
118124 bool: True if the model is available, False otherwise.
119125 """
126+ try :
127+ import ollama
128+ except ImportError :
129+ raise ImportError (
130+ "Ollama is not installed. Please install it with: pip install talkpipe[ollama]"
131+ )
132+
120133 try :
121134 # Check if the model is available
122135 response = ollama .chat (self ._model_name , messages = [self ._system_message ], options = {"temperature" : self ._temperature })
@@ -126,12 +139,128 @@ def is_available(self) -> bool:
126139 return False
127140
128141
142+ class AnthropicPromptAdapter (AbstractLLMPromptAdapter ):
143+ """Prompt adapter for Anthropic Claude
144+
145+ """
146+
147+ def __init__ (self , model : str , system_prompt : str = "You are a helpful assistant." , multi_turn : bool = True , temperature : float = None , output_format : BaseModel = None ):
148+ try :
149+ import anthropic
150+ except ImportError :
151+ raise ImportError (
152+ "Anthropic is not installed. Please install it with: pip install talkpipe[anthropic]"
153+ )
154+
155+ if output_format :
156+ self .pydantic_json_schema = json .dumps (output_format .model_json_schema ())
157+ system_prompt += f"\n The output should be in the following JSON format:\n { self .pydantic_json_schema } "
158+ else :
159+ self .pydantic_json_schema = None
160+
161+ super ().__init__ (model , "anthropic" , system_prompt , multi_turn , temperature , output_format )
162+ self .client = anthropic .Anthropic ()
163+ self ._max_tokens = 4096 # Default max tokens for response
164+
165+ def execute (self , prompt : str ) -> str :
166+ """Execute the chat model.
167+
168+ Handles its own multi-turn conversation state.
169+ """
170+ try :
171+ import anthropic
172+ except ImportError :
173+ raise ImportError (
174+ "Anthropic is not installed. Please install it with: pip install talkpipe[anthropic]"
175+ )
176+
177+ logger .debug (f"Adding user message to chat history: { prompt } " )
178+ self ._messages .append ({"role" : "user" , "content" : prompt })
179+
180+ logger .debug (f"Sending chat request to Anthropic model { self ._model_name } " )
181+
182+ # Build request parameters
183+ request_params = {
184+ "model" : self ._model_name ,
185+ "messages" : self ._messages ,
186+ "system" : self ._system_message ["content" ],
187+ "max_tokens" : self ._max_tokens
188+ }
189+
190+ if self ._temperature_explicit :
191+ request_params ["temperature" ] = self ._temperature
192+
193+ response = self .client .messages .create (** request_params )
194+
195+ # Extract text content from response
196+ response_text = ""
197+ for block in response .content :
198+ if hasattr (block , 'text' ):
199+ response_text += block .text
200+
201+ if self ._multi_turn :
202+ logger .debug ("Multi-turn enabled, appending assistant response to chat history" )
203+ self ._messages .append ({"role" : "assistant" , "content" : response_text })
204+ else :
205+ logger .debug ("Single-turn mode, clearing message history" )
206+ self ._messages = []
207+
208+ # Handle output format if specified
209+ if self ._output_format :
210+ result = self ._output_format .model_validate_json (response_text )
211+ else :
212+ result = response_text
213+
214+ logger .debug (f"Returning response: { result } " )
215+ return result
216+
217+ def is_available (self ) -> bool :
218+ """Check if the chat model is available.
219+
220+ This method should be implemented in each subclass to check if
221+ the chat model is available.
222+ Returns:
223+ bool: True if the model is available, False otherwise.
224+ """
225+ try :
226+ import anthropic
227+ except ImportError :
228+ raise ImportError (
229+ "Anthropic is not installed. Please install it with: pip install talkpipe[anthropic]"
230+ )
231+
232+ try :
233+ # Check if the model is available by making a minimal request
234+ request_params = {
235+ "model" : self ._model_name ,
236+ "messages" : [{"role" : "user" , "content" : "test" }],
237+ "system" : self ._system_message ["content" ],
238+ "max_tokens" : 1
239+ }
240+
241+ if self ._temperature_explicit :
242+ request_params ["temperature" ] = self ._temperature
243+
244+ response = self .client .messages .create (** request_params )
245+ return True
246+ except Exception as e :
247+ logger .error (f"Model { self ._model_name } is not available: { e } " )
248+ return False
249+
250+
129251class OpenAIPromptAdapter (AbstractLLMPromptAdapter ):
130252 """Prompt adapter for OpenAI
131253
132254 """
133255
134256 def __init__ (self , model : str , system_prompt : str = "You are a helpful assistant." , multi_turn : bool = True , temperature : float = None , output_format : BaseModel = None ):
257+ try :
258+ import openai
259+ except ImportError :
260+ raise ImportError (
261+ "OpenAI is not installed. Please install it with: pip install talkpipe[openai]"
262+ )
263+
135264 super ().__init__ (model , "openai" , system_prompt , multi_turn , temperature , output_format )
136265 self .client = openai .OpenAI ()
137266
@@ -140,11 +269,18 @@ def execute(self, prompt: str) -> str:
140269
141270 Handles its own multi-turn conversation state.
142271 """
272+ try :
273+ import openai
274+ except ImportError :
275+ raise ImportError (
276+ "OpenAI is not installed. Please install it with: pip install talkpipe[openai]"
277+ )
278+
143279 logger .debug (f"Adding user message to chat history: { prompt } " )
144280 self ._messages .append ({"role" : "user" , "content" : prompt })
145281
146282 logger .debug (f"Sending chat request to OpenAI model { self ._model_name } " )
147-
283+
148284 # Build request parameters, only including temperature if explicitly set
149285 request_params = {
150286 "model" : self ._model_name ,
0 commit comments