Skip to content

Commit e651b6e

Browse files
authored
Merge pull request #5 from chupark/pcw_openai_compatible
2 parents b1fe889 + 791d29f commit e651b6e

File tree

2 files changed

+16
-3
lines changed

2 files changed

+16
-3
lines changed

python/dify_plugin/entities/model/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ class DefaultParameterName(Enum):
2222
PRESENCE_PENALTY = "presence_penalty"
2323
FREQUENCY_PENALTY = "frequency_penalty"
2424
MAX_TOKENS = "max_tokens"
25+
MAX_COMPLETION_TOKENS = "max_completion_tokens"
2526
RESPONSE_FORMAT = "response_format"
2627
JSON_SCHEMA = "json_schema"
2728

python/dify_plugin/interfaces/model/openai_compatible/llm.py

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,11 @@ def validate_credentials(self, model: str, credentials: dict) -> None:
180180
endpoint_url += "/"
181181

182182
# prepare the payload for a simple ping to the model
183-
data = {"model": credentials.get("endpoint_model_name", model), "max_tokens": 5}
183+
if credentials.get("reasoning_thought_support") == "supported":
184+
# for reasoning thought support, they use max_completion_tokens
185+
data = {"model": credentials.get("endpoint_model_name", model), "max_completion_tokens": 5}
186+
else:
187+
data = {"model": credentials.get("endpoint_model_name", model), "max_tokens": 5}
184188

185189
completion_type = LLMMode.value_of(credentials["mode"])
186190

@@ -256,6 +260,14 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode
256260
"""
257261
features = []
258262

263+
# for reasoning thought support, they use max_completion_tokens
264+
if credentials.get("reasoning_thought_support") == "supported":
265+
max_token_param_name = DefaultParameterName.MAX_COMPLETION_TOKENS.value
266+
max_token_param_label = "Max Completion Tokens"
267+
else:
268+
max_token_param_name = DefaultParameterName.MAX_TOKENS.value
269+
max_token_param_label = "Max Tokens"
270+
259271
function_calling_type = credentials.get("function_calling_type", "no_call")
260272
if function_calling_type == "function_call":
261273
features.append(ModelFeature.TOOL_CALL)
@@ -338,8 +350,8 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode
338350
max=2,
339351
),
340352
ParameterRule(
341-
name=DefaultParameterName.MAX_TOKENS.value,
342-
label=I18nObject(en_US="Max Tokens", zh_Hans="最大标记"),
353+
name=max_token_param_name,
354+
label=I18nObject(en_US=max_token_param_label, zh_Hans="最大标记"),
343355
help=I18nObject(
344356
en_US="Maximum length of tokens for the model response.",
345357
zh_Hans="模型回答的tokens的最大长度。",

0 commit comments

Comments
 (0)