@@ -249,13 +249,13 @@ def stream_chat(
249249 model : Optional [str ] = None ,
250250 max_tokens : int = None ,
251251 temperature : float = 1 ,
252- parallel_tool_calls : bool = False ,
253252 token : Optional [str ] = None ,
254253 timeout = (5 , 60 ),
255254 usage : bool = False ,
256255 extra_headers : Optional [Dict [str , str ]] = None ,
257256 debug : bool = False ,
258257 raw : bool = False ,
258+ parallel_tool_calls : bool = False ,
259259 ** kwargs ,
260260 ):
261261 headers , data = self ._process_input (
@@ -295,10 +295,12 @@ def chat(
295295 model : Optional [str ] = None ,
296296 max_tokens : int = None ,
297297 temperature : float = 1 ,
298- parallel_tool_calls : bool = False ,
299298 token : Optional [str ] = None ,
300299 usage : bool = False ,
301300 extra_headers : Optional [Dict [str , str ]] = None ,
301+ debug : bool = False ,
302+ timeout = (5 , 60 ),
303+ parallel_tool_calls : bool = False ,
302304 ** kwargs ,
303305 ) -> Any :
304306 output = ""
@@ -313,6 +315,8 @@ def chat(
313315 token = token ,
314316 extra_headers = extra_headers ,
315317 raw = False ,
318+ debug = debug ,
319+ timeout = timeout ,
316320 ** kwargs ,
317321 ):
318322 if isinstance (x , dict ):
@@ -340,11 +344,11 @@ async def stream_chat_async(
340344 temperature : float = 1 ,
341345 parallel_tool_calls : bool = False ,
342346 token : Optional [str ] = None ,
343- timeout = (5 , 60 ),
344347 usage : bool = False ,
345348 extra_headers : Optional [Dict [str , str ]] = None ,
346349 debug : bool = False ,
347350 raw : bool = False ,
351+ timeout = (5 , 60 ),
348352 ** kwargs ,
349353 ):
350354 headers , data = self ._process_input (
@@ -391,6 +395,7 @@ async def chat_async(
391395 token : Optional [str ] = None ,
392396 usage : bool = False ,
393397 extra_headers : Optional [Dict [str , str ]] = None ,
398+ timeout = (5 , 60 ),
394399 ** kwargs ,
395400 ) -> Any :
396401 output = ""
@@ -404,6 +409,7 @@ async def chat_async(
404409 token = token ,
405410 extra_headers = extra_headers ,
406411 raw = False ,
412+ timeout = timeout ,
407413 ** kwargs ,
408414 ):
409415 if isinstance (x , dict ):
@@ -1208,6 +1214,7 @@ def __init__(
12081214 base_url : str = "https://api.mistral.ai/v1/chat/completions" ,
12091215 extra_headers : Optional [Dict [str , str ]] = None ,
12101216 api_token : Optional [str ] = None ,
1217+ ** kwargs ,
12111218 ):
12121219 super ().__init__ (
12131220 id = id ,
@@ -1247,6 +1254,7 @@ def __init__(
12471254 base_url : str = "https://api.groq.com/openai/v1/chat/completions" ,
12481255 extra_headers : Optional [Dict [str , str ]] = None ,
12491256 api_token : Optional [str ] = None ,
1257+ ** kwargs ,
12501258 ):
12511259 super ().__init__ (
12521260 id = id ,
@@ -1287,6 +1295,7 @@ def __init__(
12871295 org_id : Optional [str ] = None ,
12881296 extra_headers : Optional [Dict [str , str ]] = None ,
12891297 api_token : Optional [str ] = None ,
1298+ ** kwargs ,
12901299 ):
12911300 if extra_headers is None :
12921301 extra_headers = {}
@@ -1348,6 +1357,7 @@ def __init__(
13481357 self ,
13491358 id : str ,
13501359 base_url : str = "http://localhost:11434/v1/chat/completions" ,
1360+ ** kwargs ,
13511361 ):
13521362 super ().__init__ (
13531363 id = id ,
0 commit comments