Skip to content

Commit 99337b6

Browse files
Added 'max_completion_tokens' params in ChatWatsonx (#96)
* Added 'max_completion_tokens' params * poetry update * Update unit test
1 parent 151fc30 commit 99337b6

File tree

4 files changed

+425
-370
lines changed

4 files changed

+425
-370
lines changed

libs/ibm/langchain_ibm/chat_models.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -413,7 +413,7 @@ class ChatWatsonx(BaseChatModel):
413413
from ibm_watsonx_ai.foundation_models.schema import TextChatParameters
414414
415415
parameters = TextChatParameters(
416-
max_tokens=100,
416+
max_completion_tokens=100,
417417
temperature=0.5,
418418
top_p=1,
419419
)
@@ -513,6 +513,12 @@ class ChatWatsonx(BaseChatModel):
513513
max_tokens: Optional[int] = None
514514
"""The maximum number of tokens that can be generated in the chat completion.
515515
The total length of input tokens and generated tokens is limited by the
516+
model's context length.
517+
This value is now deprecated in favor of 'max_completion_tokens' parameter."""
518+
519+
max_completion_tokens: Optional[int] = None
520+
"""The maximum number of tokens that can be generated in the chat completion.
521+
The total length of input tokens and generated tokens is limited by the
516522
model's context length."""
517523

518524
n: Optional[int] = None
@@ -1018,6 +1024,7 @@ def _get_supported_chat_params() -> list[str]:
10181024
"logprobs",
10191025
"top_logprobs",
10201026
"max_tokens",
1027+
"max_completion_tokens",
10211028
"n",
10221029
"presence_penalty",
10231030
"response_format",

0 commit comments

Comments
 (0)