|
| 1 | +"""Basic agent adapter for testing and simple use cases.""" |
| 2 | + |
| 3 | +from typing import Any |
| 4 | + |
| 5 | +from codeoptix.adapters.base import AgentAdapter, AgentOutput |
| 6 | +from codeoptix.utils.llm import LLMClient |
| 7 | + |
| 8 | + |
| 9 | +class BasicAdapter(AgentAdapter): |
| 10 | + """ |
| 11 | + Basic agent adapter that works with any LLM provider. |
| 12 | +
|
| 13 | + This adapter doesn't require any external agent software and can be used |
| 14 | + for testing or simple evaluation scenarios. It uses the LLM directly |
| 15 | + with a simple coding assistant prompt. |
| 16 | + """ |
| 17 | + |
| 18 | + def __init__(self, config: dict[str, Any]): |
| 19 | + """Initialize basic adapter.""" |
| 20 | + super().__init__(config) |
| 21 | + |
| 22 | + # Get LLM configuration |
| 23 | + llm_config = config.get("llm_config", {}) |
| 24 | + if not llm_config: |
| 25 | + raise ValueError("BasicAdapter requires 'llm_config' in configuration") |
| 26 | + |
| 27 | + # Create LLM client |
| 28 | + from codeoptix.utils.llm import create_llm_client, LLMProvider |
| 29 | + |
| 30 | + provider_name = llm_config.get("provider", "ollama") |
| 31 | + self.llm_client: LLMClient = create_llm_client( |
| 32 | + LLMProvider(provider_name), llm_config.get("api_key") |
| 33 | + ) |
| 34 | + |
| 35 | + # Set model |
| 36 | + self.model = llm_config.get("model", "llama3.2:3b") |
| 37 | + |
| 38 | + # Set default prompt |
| 39 | + self._current_prompt = config.get("prompt") or self._get_default_prompt() |
| 40 | + |
| 41 | + def get_adapter_type(self) -> str: |
| 42 | + """Get adapter type.""" |
| 43 | + return "basic" |
| 44 | + |
| 45 | + def _get_default_prompt(self) -> str: |
| 46 | + """Get default basic coding assistant prompt.""" |
| 47 | + return """You are a helpful coding assistant. Your task is to write clean, secure, and well-tested code. |
| 48 | +
|
| 49 | +Guidelines: |
| 50 | +- Write secure code: validate inputs, avoid hardcoded secrets, use proper error handling |
| 51 | +- Write comprehensive tests: cover edge cases, use meaningful assertions |
| 52 | +- Follow coding best practices: clear variable names, proper structure, documentation |
| 53 | +- Consider the user's requirements and context provided |
| 54 | +
|
| 55 | +When given a coding task, provide: |
| 56 | +1. Well-structured, readable code |
| 57 | +2. Appropriate tests for the code |
| 58 | +3. Brief explanation of the implementation""" |
| 59 | + |
| 60 | + def get_prompt(self) -> str: |
| 61 | + """Get current system prompt.""" |
| 62 | + return self._current_prompt or self._get_default_prompt() |
| 63 | + |
| 64 | + def update_prompt(self, new_prompt: str) -> None: |
| 65 | + """Update the system prompt.""" |
| 66 | + self._current_prompt = new_prompt |
| 67 | + |
| 68 | + def execute(self, prompt: str, context: dict[str, Any] | None = None) -> "AgentOutput": |
| 69 | + """ |
| 70 | + Execute a coding task using the LLM directly. |
| 71 | +
|
| 72 | + Args: |
| 73 | + prompt: The coding task prompt |
| 74 | + context: Optional context information |
| 75 | +
|
| 76 | + Returns: |
| 77 | + AgentOutput with generated code and tests |
| 78 | + """ |
| 79 | + from codeoptix.adapters.base import AgentOutput |
| 80 | + |
| 81 | + context = context or {} |
| 82 | + |
| 83 | + # Build the full prompt |
| 84 | + full_prompt = self._build_full_prompt(prompt, context) |
| 85 | + |
| 86 | + # Get response from LLM |
| 87 | + messages = [ |
| 88 | + {"role": "system", "content": self._current_prompt}, |
| 89 | + {"role": "user", "content": full_prompt}, |
| 90 | + ] |
| 91 | + |
| 92 | + response = self.llm_client.chat_completion( |
| 93 | + messages=messages, model=self.model, temperature=0.7, max_tokens=2048 |
| 94 | + ) |
| 95 | + |
| 96 | + # Parse the response into code and tests |
| 97 | + code, tests = self._parse_response(response) |
| 98 | + |
| 99 | + return AgentOutput( |
| 100 | + code=code, |
| 101 | + tests=tests, |
| 102 | + prompt_used=self._current_prompt, |
| 103 | + metadata={"model": self.model, "adapter_type": "basic", "full_response": response}, |
| 104 | + ) |
| 105 | + |
| 106 | + def _build_full_prompt(self, prompt: str, context: dict[str, Any]) -> str: |
| 107 | + """Build the full prompt including context.""" |
| 108 | + parts = [] |
| 109 | + |
| 110 | + # Add context if provided |
| 111 | + if context.get("plan"): |
| 112 | + parts.append(f"Plan/Requirements: {context['plan']}") |
| 113 | + if context.get("existing_code"): |
| 114 | + parts.append(f"Existing Code:\n{context['existing_code']}") |
| 115 | + if context.get("requirements"): |
| 116 | + parts.append(f"Requirements: {context['requirements']}") |
| 117 | + |
| 118 | + # Add the main task |
| 119 | + parts.append(f"Task: {prompt}") |
| 120 | + |
| 121 | + # Add output format instructions |
| 122 | + parts.append(""" |
| 123 | +Please provide your response in the following format: |
| 124 | +
|
| 125 | +CODE: |
| 126 | +```python |
| 127 | +# Your code here |
| 128 | +``` |
| 129 | +
|
| 130 | +TESTS: |
| 131 | +```python |
| 132 | +# Your tests here |
| 133 | +``` |
| 134 | +
|
| 135 | +EXPLANATION: |
| 136 | +Brief explanation of your implementation. |
| 137 | +""") |
| 138 | + |
| 139 | + return "\n\n".join(parts) |
| 140 | + |
| 141 | + def _parse_response(self, response: str) -> tuple[str, str]: |
| 142 | + """Parse LLM response into code and tests.""" |
| 143 | + code = "" |
| 144 | + tests = "" |
| 145 | + |
| 146 | + # Simple parsing - look for CODE and TESTS sections |
| 147 | + lines = response.split("\n") |
| 148 | + current_section = None |
| 149 | + code_lines = [] |
| 150 | + test_lines = [] |
| 151 | + |
| 152 | + for line in lines: |
| 153 | + line_lower = line.lower().strip() |
| 154 | + if line_lower.startswith("code:") or "```" in line_lower: |
| 155 | + current_section = "code" |
| 156 | + continue |
| 157 | + elif line_lower.startswith("tests:") or line_lower.startswith("test:"): |
| 158 | + current_section = "tests" |
| 159 | + continue |
| 160 | + elif current_section == "code" and line.strip(): |
| 161 | + # Remove markdown code blocks |
| 162 | + if "```" in line: |
| 163 | + continue |
| 164 | + code_lines.append(line) |
| 165 | + elif current_section == "tests" and line.strip(): |
| 166 | + # Remove markdown code blocks |
| 167 | + if "```" in line: |
| 168 | + continue |
| 169 | + test_lines.append(line) |
| 170 | + |
| 171 | + # If no clear sections found, try to extract from the whole response |
| 172 | + if not code_lines and not test_lines: |
| 173 | + # Look for function definitions for code |
| 174 | + # Look for test functions for tests |
| 175 | + for line in lines: |
| 176 | + if line.strip().startswith("def ") and "test" in line.lower(): |
| 177 | + current_section = "tests" |
| 178 | + test_lines.append(line) |
| 179 | + elif line.strip().startswith("def ") and current_section != "tests": |
| 180 | + current_section = "code" |
| 181 | + code_lines.append(line) |
| 182 | + elif current_section == "code": |
| 183 | + code_lines.append(line) |
| 184 | + elif current_section == "tests": |
| 185 | + test_lines.append(line) |
| 186 | + |
| 187 | + code = "\n".join(code_lines).strip() |
| 188 | + tests = "\n".join(test_lines).strip() |
| 189 | + |
| 190 | + # Fallback if parsing failed |
| 191 | + if not code and not tests: |
| 192 | + # Assume the entire response is code |
| 193 | + code = response.strip() |
| 194 | + |
| 195 | + return code, tests |
0 commit comments