|
2 | 2 | import logging |
3 | 3 | from typing import Optional, Callable |
4 | 4 | from openai import OpenAI |
| 5 | +from anthropic import Anthropic |
| 6 | + |
5 | 7 |
|
6 | 8 | logger = logging.getLogger(__name__) |
7 | 9 |
|
@@ -102,6 +104,85 @@ async def generate_response_stream(self, prompt: str, callback: Callable[[str], |
102 | 104 | callback(error_msg) |
103 | 105 | raise RuntimeError(f"Error generating streaming response with OpenAI: {str(e)}") |
104 | 106 |
|
| 107 | +class AnthropicService(AIService): |
| 108 | + """AI service using Anthropic models.""" |
| 109 | + def __init__(self, model: str, api_key: str): |
| 110 | + super().__init__(model) |
| 111 | + |
| 112 | + assert api_key is not None and api_key.strip() != "", "Anthropic API key cannot be empty" |
| 113 | + assert model is not None and model.strip() != "", "Model name cannot be empty" |
| 114 | + |
| 115 | + self.client = Anthropic(api_key=api_key) |
| 116 | + |
| 117 | + async def generate_response(self, prompt: str, structured_output: bool = False) -> str: |
| 118 | + assert prompt is not None, "Prompt cannot be None" |
| 119 | + |
| 120 | + try: |
| 121 | + system_prompt = """You are an AI assistant specialized in application deployment. |
| 122 | + Your task is to analyze code and configurations to suggest deployment strategies. |
| 123 | + Be precise and provide detailed reasoning for your suggestions.""" |
| 124 | + |
| 125 | + if structured_output: |
| 126 | + system_prompt += """ Include a 'reasoning' field in your JSON response that |
| 127 | + explains the rationale behind your suggestions in detail.""" |
| 128 | + |
| 129 | + response = self.client.messages.create( |
| 130 | + model=self.model, |
| 131 | + messages=[ |
| 132 | + { |
| 133 | + 'role': 'system', |
| 134 | + 'content': system_prompt |
| 135 | + }, |
| 136 | + { |
| 137 | + 'role': 'user', |
| 138 | + 'content': prompt |
| 139 | + } |
| 140 | + ] |
| 141 | + ) |
| 142 | + |
| 143 | + #assert response.choices and len(response.choices) > 0, "No se recibió respuesta del modelo" |
| 144 | + return str(response.content) |
| 145 | + except Exception as e: |
| 146 | + logger.error(f"Error generating response with Anthropic: {str(e)}") |
| 147 | + raise RuntimeError(f"Error generating response with Anthropic: {str(e)}") |
| 148 | + |
| 149 | + async def generate_response_stream(self, prompt: str, callback: Callable[[str], None]) -> str: |
| 150 | + assert prompt is not None, "Prompt cannot be None" |
| 151 | + assert callback is not None, "Callback function cannot be None" |
| 152 | + |
| 153 | + try: |
| 154 | + # Prepare system prompt |
| 155 | + system_prompt = """You are an AI assistant specialized in application deployment. |
| 156 | + Your task is to analyze code and configurations to suggest deployment strategies. |
| 157 | + Be precise and provide detailed reasoning for your suggestions.""" |
| 158 | + |
| 159 | + # Correctly implement streaming with Anthropic |
| 160 | + response = self.client.messages.create( |
| 161 | + model=self.model, |
| 162 | + messages=[ |
| 163 | + { |
| 164 | + 'role': 'system', |
| 165 | + 'content': system_prompt |
| 166 | + }, |
| 167 | + { |
| 168 | + 'role': 'user', |
| 169 | + 'content': prompt |
| 170 | + } |
| 171 | + ], |
| 172 | + stream=True |
| 173 | + ) |
| 174 | + |
| 175 | + async for chunk in response.text_stream: |
| 176 | + if chunk and len(chunk) > 0: |
| 177 | + text_chunk = chunk |
| 178 | + callback(text_chunk) |
| 179 | + |
| 180 | + except Exception as e: |
| 181 | + error_msg = f"Error generating streaming response: {str(e)}" |
| 182 | + logger.error(error_msg) |
| 183 | + callback(error_msg) |
| 184 | + raise RuntimeError(f"Error generating streaming response with Anthropic: {str(e)}") |
| 185 | + |
105 | 186 | class OllamaService(AIService): |
106 | 187 | """AI service using local Ollama models.""" |
107 | 188 |
|
|
0 commit comments