Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 61 additions & 0 deletions examples/circle_packing/config_llm_feedback.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# Configuration for circle packing constructor evolution (n=26)
max_iterations: 100 # Increased iterations
checkpoint_interval: 10
log_level: "INFO"

# LLM configuration
llm:
primary_model: "google/gemini-2.0-flash-001"
# primary_model: "llama3.1-8b"
primary_model_weight: 0.8
secondary_model: "anthropic/claude-3.7-sonnet"
# secondary_model: "llama-4-scout-17b-16e-instruct"
secondary_model_weight: 0.2
api_base: "https://openrouter.ai/api/v1"
# api_base: "https://api.cerebras.ai/v1"
temperature: 0.7
top_p: 0.95
max_tokens: 8192
timeout: 600

# Prompt configuration
prompt:
system_message: |
You are an expert mathematician specializing in circle packing problems and computational geometry. Your task is to improve a constructor function that directly produces a specific arrangement of 26 circles in a unit square, maximizing the sum of their radii. The AlphaEvolve paper achieved a sum of 2.635 for n=26.

Key geometric insights:
- Circle packings often follow hexagonal patterns in the densest regions
- Maximum density for infinite circle packing is pi/(2*sqrt(3)) ≈ 0.9069
- Edge effects make square container packing harder than infinite packing
- Circles can be placed in layers or shells when confined to a square
- Similar radius circles often form regular patterns, while varied radii allow better space utilization
- Perfect symmetry may not yield the optimal packing due to edge effects

Focus on designing an explicit constructor that places each circle in a specific position, rather than an iterative search algorithm.
num_top_programs: 3
use_template_stochasticity: true

# Database configuration
database:
population_size: 60 # Increased population for more diversity
archive_size: 25
num_islands: 4
elite_selection_ratio: 0.3
exploitation_ratio: 0.7

# Evaluator configuration
evaluator:
timeout: 60
cascade_evaluation: true
cascade_thresholds: [0.5, 0.75]
parallel_evaluations: 4
use_llm_feedback: true
system_message: |
You are an expert mathematician specializing in circle packing problems and computational geometry. Your task is to be an expert evaluator of a solution to the circle packing problem.
You will be given a solution to the circle packing problem and you will need to evaluate it based on the following criteria:
- The solution is a valid circle packing of 26 circles in a unit square.


# Evolution settings
diff_based_evolution: false # Use full rewrites instead of diffs
allow_full_rewrites: true # Allow full rewrites for constructor functions
1 change: 1 addition & 0 deletions openevolve/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ class EvaluatorConfig:
# LLM-based feedback
use_llm_feedback: bool = False
llm_feedback_weight: float = 0.1
system_message: Optional[str] = None


@dataclass
Expand Down
5 changes: 4 additions & 1 deletion openevolve/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,10 @@ async def _llm_evaluate(self, program_code: str) -> Dict[str, float]:
"""

# Get LLM response
response = await self.llm_ensemble.generate(prompt)
response = await self.llm_ensemble.generate_with_context(
system_message=self.config.system_message,
messages=[{"role": "user", "content": prompt}],
)

# Extract JSON from response
try:
Expand Down
8 changes: 4 additions & 4 deletions openevolve/llm/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@
class LLMInterface(ABC):
"""Abstract base class for LLM interfaces"""

@abstractmethod
async def generate(self, prompt: str, **kwargs) -> str:
"""Generate text from a prompt"""
pass
# @abstractmethod
# async def generate(self, prompt: str, **kwargs) -> str:
# """Generate text from a prompt"""
# pass

@abstractmethod
async def generate_with_context(
Expand Down
24 changes: 12 additions & 12 deletions openevolve/llm/ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,10 @@ def __init__(self, config: LLMConfig):
f"{config.secondary_model} (weight: {self._weights[1]:.2f})"
)

async def generate(self, prompt: str, **kwargs) -> str:
"""Generate text using a randomly selected model based on weights"""
model = self._sample_model()
return await model.generate(prompt, **kwargs)
# async def generate(self, prompt: str, **kwargs) -> str:
# """Generate text using a randomly selected model based on weights"""
# model = self._sample_model()
# return await model.generate(prompt, **kwargs)

async def generate_with_context(
self, system_message: str, messages: List[Dict[str, str]], **kwargs
Expand All @@ -58,12 +58,12 @@ def _sample_model(self) -> LLMInterface:
index = random.choices(range(len(models)), weights=self._weights, k=1)[0]
return models[index]

async def generate_multiple(self, prompt: str, n: int, **kwargs) -> List[str]:
"""Generate multiple texts in parallel"""
tasks = [self.generate(prompt, **kwargs) for _ in range(n)]
return await asyncio.gather(*tasks)
# async def generate_multiple(self, prompt: str, n: int, **kwargs) -> List[str]:
# """Generate multiple texts in parallel"""
# tasks = [self.generate(prompt, **kwargs) for _ in range(n)]
# return await asyncio.gather(*tasks)

async def parallel_generate(self, prompts: List[str], **kwargs) -> List[str]:
"""Generate responses for multiple prompts in parallel"""
tasks = [self.generate(prompt, **kwargs) for prompt in prompts]
return await asyncio.gather(*tasks)
# async def parallel_generate(self, prompts: List[str], **kwargs) -> List[str]:
# """Generate responses for multiple prompts in parallel"""
# tasks = [self.generate(prompt, **kwargs) for prompt in prompts]
# return await asyncio.gather(*tasks)
14 changes: 7 additions & 7 deletions openevolve/llm/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,13 @@ def __init__(

logger.info(f"Initialized OpenAI LLM with model: {self.model}")

async def generate(self, prompt: str, **kwargs) -> str:
"""Generate text from a prompt"""
return await self.generate_with_context(
system_message=self.config.system_message,
messages=[{"role": "user", "content": prompt}],
**kwargs,
)
# async def generate(self, prompt: str, **kwargs) -> str:
# """Generate text from a prompt"""
# return await self.generate_with_context(
# system_message=self.config.system_message,
# messages=[{"role": "user", "content": prompt}],
# **kwargs,
# )

async def generate_with_context(
self, system_message: str, messages: List[Dict[str, str]], **kwargs
Expand Down