@@ -40,10 +40,10 @@ def __init__(self, config: LLMConfig):
4040 f"{ config .secondary_model } (weight: { self ._weights [1 ]:.2f} )"
4141 )
4242
43- async def generate (self , prompt : str , ** kwargs ) -> str :
44- """Generate text using a randomly selected model based on weights"""
45- model = self ._sample_model ()
46- return await model .generate (prompt , ** kwargs )
43+ # async def generate(self, prompt: str, **kwargs) -> str:
44+ # """Generate text using a randomly selected model based on weights"""
45+ # model = self._sample_model()
46+ # return await model.generate(prompt, **kwargs)
4747
4848 async def generate_with_context (
4949 self , system_message : str , messages : List [Dict [str , str ]], ** kwargs
@@ -58,12 +58,12 @@ def _sample_model(self) -> LLMInterface:
5858 index = random .choices (range (len (models )), weights = self ._weights , k = 1 )[0 ]
5959 return models [index ]
6060
61- async def generate_multiple (self , prompt : str , n : int , ** kwargs ) -> List [str ]:
62- """Generate multiple texts in parallel"""
63- tasks = [self .generate (prompt , ** kwargs ) for _ in range (n )]
64- return await asyncio .gather (* tasks )
61+ # async def generate_multiple(self, prompt: str, n: int, **kwargs) -> List[str]:
62+ # """Generate multiple texts in parallel"""
63+ # tasks = [self.generate(prompt, **kwargs) for _ in range(n)]
64+ # return await asyncio.gather(*tasks)
6565
66- async def parallel_generate (self , prompts : List [str ], ** kwargs ) -> List [str ]:
67- """Generate responses for multiple prompts in parallel"""
68- tasks = [self .generate (prompt , ** kwargs ) for prompt in prompts ]
69- return await asyncio .gather (* tasks )
66+ # async def parallel_generate(self, prompts: List[str], **kwargs) -> List[str]:
67+ # """Generate responses for multiple prompts in parallel"""
68+ # tasks = [self.generate(prompt, **kwargs) for prompt in prompts]
69+ # return await asyncio.gather(*tasks)
0 commit comments