File tree Expand file tree Collapse file tree 4 files changed +425
-370
lines changed Expand file tree Collapse file tree 4 files changed +425
-370
lines changed Original file line number Diff line number Diff line change @@ -413,7 +413,7 @@ class ChatWatsonx(BaseChatModel):
413
413
from ibm_watsonx_ai.foundation_models.schema import TextChatParameters
414
414
415
415
parameters = TextChatParameters(
416
- max_tokens =100,
416
+ max_completion_tokens =100,
417
417
temperature=0.5,
418
418
top_p=1,
419
419
)
@@ -513,6 +513,12 @@ class ChatWatsonx(BaseChatModel):
513
513
max_tokens : Optional [int ] = None
514
514
"""The maximum number of tokens that can be generated in the chat completion.
515
515
The total length of input tokens and generated tokens is limited by the
516
+ model's context length.
517
+ This value is now deprecated in favor of 'max_completion_tokens' parameter."""
518
+
519
+ max_completion_tokens : Optional [int ] = None
520
+ """The maximum number of tokens that can be generated in the chat completion.
521
+ The total length of input tokens and generated tokens is limited by the
516
522
model's context length."""
517
523
518
524
n : Optional [int ] = None
@@ -1018,6 +1024,7 @@ def _get_supported_chat_params() -> list[str]:
1018
1024
"logprobs" ,
1019
1025
"top_logprobs" ,
1020
1026
"max_tokens" ,
1027
+ "max_completion_tokens" ,
1021
1028
"n" ,
1022
1029
"presence_penalty" ,
1023
1030
"response_format" ,
You can’t perform that action at this time.
0 commit comments