Skip to content

Commit 2701f08

Browse files
authored
Merge pull request #56 from jvm123/bug-LLMConfig
Bug fix: config.yaml model ensemble syntax not correctly loaded into LLMConfig object
2 parents 4b099e3 + 5897987 commit 2701f08

File tree

2 files changed

+8
-5
lines changed

2 files changed

+8
-5
lines changed

openevolve/config.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,6 @@ class LLMConfig(LLMModelConfig):
4040

4141
# API configuration
4242
api_base: str = "https://api.openai.com/v1"
43-
name: str = "gpt-4o"
4443

4544
# Generation parameters
4645
system_message: Optional[str] = "system_message"
@@ -60,10 +59,10 @@ class LLMConfig(LLMModelConfig):
6059
evaluator_models: List[LLMModelConfig] = field(default_factory=lambda: [])
6160

6261
# Backwardes compatibility with primary_model(_weight) options
63-
primary_model: str = "gemini-2.0-flash-lite"
64-
primary_model_weight: float = 0.8
65-
secondary_model: str = "gemini-2.0-flash"
66-
secondary_model_weight: float = 0.2
62+
primary_model: str = None
63+
primary_model_weight: float = None
64+
secondary_model: str = None
65+
secondary_model_weight: float = None
6766

6867
def __post_init__(self):
6968
"""Post-initialization to set up model configurations"""

openevolve/llm/openai.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,4 +107,8 @@ async def _call_api(self, params: Dict[str, Any]) -> str:
107107
response = await loop.run_in_executor(
108108
None, lambda: self.client.chat.completions.create(**params)
109109
)
110+
# Logging of system prompt, user message and response content
111+
logger = logging.getLogger(__name__)
112+
logger.debug(f"API parameters: {params}")
113+
logger.debug(f"API response: {response.choices[0].message.content}")
110114
return response.choices[0].message.content

0 commit comments

Comments
 (0)