Skip to content

Commit 0b52621

Browse files
committed
comment out llm.generate method
1 parent b64befa commit 0b52621

File tree

3 files changed

+23
-23
lines changed

3 files changed

+23
-23
lines changed

openevolve/llm/base.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,10 @@
99
class LLMInterface(ABC):
1010
"""Abstract base class for LLM interfaces"""
1111

12-
@abstractmethod
13-
async def generate(self, prompt: str, **kwargs) -> str:
14-
"""Generate text from a prompt"""
15-
pass
12+
# @abstractmethod
13+
# async def generate(self, prompt: str, **kwargs) -> str:
14+
# """Generate text from a prompt"""
15+
# pass
1616

1717
@abstractmethod
1818
async def generate_with_context(

openevolve/llm/ensemble.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -40,10 +40,10 @@ def __init__(self, config: LLMConfig):
4040
f"{config.secondary_model} (weight: {self._weights[1]:.2f})"
4141
)
4242

43-
async def generate(self, prompt: str, **kwargs) -> str:
44-
"""Generate text using a randomly selected model based on weights"""
45-
model = self._sample_model()
46-
return await model.generate(prompt, **kwargs)
43+
# async def generate(self, prompt: str, **kwargs) -> str:
44+
# """Generate text using a randomly selected model based on weights"""
45+
# model = self._sample_model()
46+
# return await model.generate(prompt, **kwargs)
4747

4848
async def generate_with_context(
4949
self, system_message: str, messages: List[Dict[str, str]], **kwargs
@@ -58,12 +58,12 @@ def _sample_model(self) -> LLMInterface:
5858
index = random.choices(range(len(models)), weights=self._weights, k=1)[0]
5959
return models[index]
6060

61-
async def generate_multiple(self, prompt: str, n: int, **kwargs) -> List[str]:
62-
"""Generate multiple texts in parallel"""
63-
tasks = [self.generate(prompt, **kwargs) for _ in range(n)]
64-
return await asyncio.gather(*tasks)
61+
# async def generate_multiple(self, prompt: str, n: int, **kwargs) -> List[str]:
62+
# """Generate multiple texts in parallel"""
63+
# tasks = [self.generate(prompt, **kwargs) for _ in range(n)]
64+
# return await asyncio.gather(*tasks)
6565

66-
async def parallel_generate(self, prompts: List[str], **kwargs) -> List[str]:
67-
"""Generate responses for multiple prompts in parallel"""
68-
tasks = [self.generate(prompt, **kwargs) for prompt in prompts]
69-
return await asyncio.gather(*tasks)
66+
# async def parallel_generate(self, prompts: List[str], **kwargs) -> List[str]:
67+
# """Generate responses for multiple prompts in parallel"""
68+
# tasks = [self.generate(prompt, **kwargs) for prompt in prompts]
69+
# return await asyncio.gather(*tasks)

openevolve/llm/openai.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -34,13 +34,13 @@ def __init__(
3434

3535
logger.info(f"Initialized OpenAI LLM with model: {self.model}")
3636

37-
async def generate(self, prompt: str, **kwargs) -> str:
38-
"""Generate text from a prompt"""
39-
return await self.generate_with_context(
40-
system_message=self.config.system_message,
41-
messages=[{"role": "user", "content": prompt}],
42-
**kwargs,
43-
)
37+
# async def generate(self, prompt: str, **kwargs) -> str:
38+
# """Generate text from a prompt"""
39+
# return await self.generate_with_context(
40+
# system_message=self.config.system_message,
41+
# messages=[{"role": "user", "content": prompt}],
42+
# **kwargs,
43+
# )
4444

4545
async def generate_with_context(
4646
self, system_message: str, messages: List[Dict[str, str]], **kwargs

0 commit comments

Comments
 (0)