Skip to content

Commit b63fc68

Browse files
committed
Update docs
1 parent 1c8cbdd commit b63fc68

File tree

8 files changed

+44
-18
lines changed

8 files changed

+44
-18
lines changed

docs/guides/ollama-integration.md

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -300,6 +300,20 @@ export OLLAMA_BASE_URL=http://localhost:11435
300300
- You need maximum speed
301301
- You're okay with API costs
302302

303+
### ⚠️ Limitations
304+
305+
While Ollama works great for evaluations, there are some limitations:
306+
307+
#### Evolution Support
308+
- **Limited support for `codeoptix evolve`**: The evolution feature uses GEPA optimization, which requires processing very long prompts. Ollama may fail with 404 errors or timeouts on complex evolution tasks.
309+
- **Recommendation**: Use cloud providers (OpenAI, Anthropic, Google) for full evolution capabilities. For basic evolution testing, try smaller models like `llama3.1:8b` with minimal iterations.
310+
311+
#### Performance
312+
- Large models (e.g., `gpt-oss:120b`) require significant RAM and may be slow on consumer hardware.
313+
- Evolution tasks are computationally intensive and may not complete reliably with Ollama.
314+
315+
For advanced features like evolution, consider cloud providers or contact us for tailored enterprise solutions.
316+
303317
---
304318

305319
## 📚 Next Steps

docs/index.md

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,14 +54,15 @@ When AI coding agents dazzle with impressive code but leave you wondering about
5454
-**CI/CD Integration** - Automated quality gates and GitHub Actions support
5555

5656
!!! tip "Ollama Support - No API Key Required!"
57-
**CodeOptiX now supports Ollama** - use local models without API keys:
57+
**CodeOptiX supports Ollama** for evaluations - use local models without API keys:
5858

5959
- ✅ **Ollama integration** - Run evaluations with local models
6060
- ✅ **No API key needed** - Perfect for open-source users
6161
- ✅ **Privacy-friendly** - All processing happens locally
6262
- ✅ **Free to use** - No cloud costs
63+
- ⚠️ **Limited evolution support** - Use cloud providers for `codeoptix evolve`
6364

64-
**See [Ollama Integration](guides/ollama-integration/) for setup instructions.**
65+
**See [Ollama Integration](guides/ollama-integration/) for setup and limitations.**
6566

6667
**Cloud providers** (OpenAI, Anthropic, Google) still require API keys. See [Installation](getting-started/installation/#setting-up-llm-providers) for setup.
6768

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "codeoptix"
3-
version = "0.1.1"
3+
version = "0.1.2"
44
description = "Agentic Code Optimization & Deep Evaluation for Superior Coding Agent Experience. Built by Superagentic AI."
55
readme = "README.md"
66
requires-python = ">=3.12"

src/codeoptix/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,4 @@
55
Built by Superagentic AI - Advancing AI agent optimization and autonomous systems.
66
"""
77

8-
__version__ = "0.1.1"
8+
__version__ = "0.1.2"

src/codeoptix/adapters/basic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ def __init__(self, config: dict[str, Any]):
2929

3030
provider_name = llm_config.get("provider", "ollama")
3131
self.llm_client: LLMClient = create_llm_client(
32-
LLMProvider(provider_name), llm_config.get("api_key")
32+
LLMProvider(provider_name), llm_config.get("api_key"), llm_config.get("model")
3333
)
3434

3535
# Set model

src/codeoptix/cli.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,11 @@ def eval(agent, behaviors, output, config, llm_provider, llm_api_key, context, f
147147
sys.exit(1)
148148

149149
# Normalize provider name and decide if we need an API key
150-
llm_provider = (llm_provider or os.getenv("CODEOPTIX_LLM_PROVIDER", "openai")).lower()
150+
llm_provider = (
151+
eval_config.get("llm_provider")
152+
or llm_provider
153+
or os.getenv("CODEOPTIX_LLM_PROVIDER", "openai")
154+
).lower()
151155
is_ollama = llm_provider == "ollama"
152156

153157
# Create adapter
@@ -222,7 +226,10 @@ def eval(agent, behaviors, output, config, llm_provider, llm_api_key, context, f
222226
click.echo("🧠 Using local Ollama provider.")
223227

224228
try:
225-
llm_client = create_llm_client(llm_provider_enum, api_key=api_key)
229+
llm_config = adapter_config["llm_config"]
230+
llm_client = create_llm_client(
231+
LLMProvider(llm_config["provider"]), llm_config.get("api_key"), llm_config.get("model")
232+
)
226233
except Exception as e:
227234
click.echo(f"❌ Error: Failed to create LLM client: {e}", err=True)
228235
if "api_key" in str(e).lower():
@@ -470,9 +477,11 @@ def evolve(input, reflection, output, iterations, config, llm_provider, llm_api_
470477
adapter = create_adapter(agent_type, adapter_config)
471478
click.echo(f"✅ Created adapter: {adapter.get_adapter_type()}")
472479

473-
# Create LLM client
474-
llm_provider_enum = LLMProvider[llm_provider.upper()]
475-
llm_client = create_llm_client(llm_provider_enum, api_key=llm_api_key)
480+
# Create LLM client (use same config as adapter for consistency)
481+
llm_config = adapter_config["llm_config"]
482+
llm_client = create_llm_client(
483+
LLMProvider(llm_config["provider"]), llm_config.get("api_key"), llm_config.get("model")
484+
)
476485

477486
# Create evaluation engine
478487
eval_engine_config = evolve_config.get("evaluation", {})

src/codeoptix/utils/llm.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@ def get_available_models(self) -> list[str]:
214214
class OllamaClient(LLMClient):
215215
"""Ollama local model client (http://localhost:11434)."""
216216

217-
def __init__(self, api_key: str | None = None):
217+
def __init__(self, api_key: str | None = None, model: str = "llama3.1", **kwargs: Any):
218218
"""Initialize Ollama client.
219219
220220
api_key is unused but kept for interface compatibility.
@@ -250,7 +250,7 @@ def chat_completion(
250250
"""Generate a chat completion using a local Ollama model."""
251251
payload = {
252252
"model": model,
253-
"prompt": messages[0]["content"], # Use first message content as prompt
253+
"prompt": messages[0]["content"],
254254
"stream": False,
255255
"options": {
256256
"temperature": temperature,
@@ -315,14 +315,16 @@ def get_available_models(self) -> list[str]:
315315
return models
316316

317317

318-
def create_llm_client(provider: LLMProvider, api_key: str | None = None) -> LLMClient:
318+
def create_llm_client(
319+
provider: LLMProvider, api_key: str | None = None, model: str | None = None
320+
) -> LLMClient:
319321
"""Factory function to create an LLM client."""
320322
if provider == LLMProvider.ANTHROPIC:
321-
return AnthropicClient(api_key=api_key)
323+
return AnthropicClient(api_key=api_key, model=model or "claude-3-5-sonnet-20241022")
322324
if provider == LLMProvider.OPENAI:
323-
return OpenAIClient(api_key=api_key)
325+
return OpenAIClient(api_key=api_key, model=model or "gpt-4o")
324326
if provider == LLMProvider.GOOGLE:
325-
return GoogleClient(api_key=api_key)
327+
return GoogleClient(api_key=api_key, model=model or "gemini-1.5-pro")
326328
if provider == LLMProvider.OLLAMA:
327-
return OllamaClient(api_key=api_key)
329+
return OllamaClient(model=model or "llama3.1")
328330
raise ValueError(f"Unsupported provider: {provider}")

uv.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)