@@ -80,8 +80,7 @@ def generate_text(
8080 temperature : float = 0.01 ,
8181 stop : t .Optional [t .List [str ]] = None ,
8282 callbacks : Callbacks = None ,
83- ) -> LLMResult :
84- ...
83+ ) -> LLMResult : ...
8584
8685 @abstractmethod
8786 async def agenerate_text (
@@ -91,7 +90,11 @@ async def agenerate_text(
9190 temperature : t .Optional [float ] = 0.01 ,
9291 stop : t .Optional [t .List [str ]] = None ,
9392 callbacks : Callbacks = None ,
94- ) -> LLMResult :
93+ ) -> LLMResult : ...
94+
95+ @abstractmethod
96+ def is_finished (self , response : LLMResult ) -> bool :
97+ """Check if the LLM response is finished/complete."""
9598 ...
9699
97100 async def generate (
@@ -335,7 +338,7 @@ def check_args(
335338 ) -> dict [str , t .Any ]:
336339 if n != 1 :
337340 logger .warning ("n values greater than 1 not support for LlamaIndex LLMs" )
338- if temperature != 1e-8 :
341+ if temperature != 0.01 :
339342 logger .info ("temperature kwarg passed to LlamaIndex LLM" )
340343 if stop is not None :
341344 logger .info ("stop kwarg passed to LlamaIndex LLM" )
@@ -359,7 +362,7 @@ def generate_text(
359362 self ,
360363 prompt : PromptValue ,
361364 n : int = 1 ,
362- temperature : float = 1e-8 ,
365+ temperature : float = 0.01 ,
363366 stop : t .Optional [t .List [str ]] = None ,
364367 callbacks : Callbacks = None ,
365368 ) -> LLMResult :
0 commit comments