Skip to content

Commit 85c620a

Browse files
bsbdstanbro
andauthored
fix: revert incorrect base_url fallback logic that breaks task execution (#141)
* fix: revert incorrect base_url fallback logic that breaks task execution PR #138 (cee1e49) introduced a provider compatibility system that incorrectly overrode user-configured base URLs with default values. The problematic code used: base_url=model.base_url or default_base_url_for_provider(...) This fails when base_url is an empty string ("") because Python's `or` operator treats empty strings as falsy, causing the fallback to activate even when the user explicitly configured a base URL. For OpenAI-compatible providers like DashScope that store base_url="" in the database, this caused requests to be sent to api.openai.com instead of their configured endpoints. As a result, task execution failed to start completely - tasks showed blank responses with no backend execution activity. This fix reverts to direct provider string comparison and removes the fallback logic, trusting the database configuration as-is. Fixes task execution regression introduced in cee1e49 (PR #138) * fix: add coding plan providers --------- Co-authored-by: tanbro <tanbro@163.com>
1 parent cee1e49 commit 85c620a

File tree

2 files changed

+36
-21
lines changed

2 files changed

+36
-21
lines changed

src/xagent/core/model/chat/basic/adapter.py

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,6 @@
11
import os
22

33
from ....model import ChatModelConfig, ModelConfig
4-
from ....model.providers import (
5-
default_base_url_for_provider,
6-
provider_compatibility_for_provider,
7-
)
84
from ....retry import create_retry_wrapper
95
from ..error import retry_on
106
from .azure_openai import AzureOpenAILLM
@@ -23,25 +19,26 @@ def create_base_llm(model: ModelConfig) -> BaseLLM:
2319
if not isinstance(model, ChatModelConfig):
2420
raise TypeError(f"Invalid model type: {type(model).__name__}")
2521

26-
compatibility = provider_compatibility_for_provider(model.model_provider)
27-
28-
if compatibility == "openai_compatible":
22+
if model.model_provider == "openai":
2923
llm: BaseLLM = OpenAILLM(
3024
model_name=model.model_name,
3125
api_key=model.api_key,
32-
base_url=model.base_url
33-
or default_base_url_for_provider(model.model_provider),
26+
base_url=model.base_url,
3427
default_temperature=model.default_temperature,
3528
default_max_tokens=model.default_max_tokens,
3629
timeout=model.timeout,
3730
abilities=model.abilities,
3831
)
39-
elif compatibility == "claude_compatible":
40-
llm = ClaudeLLM(
32+
elif model.model_provider in (
33+
"alibaba-coding-plan",
34+
"alibaba-coding-plan-cn",
35+
"zai-coding-plan",
36+
"zhipuai-coding-plan",
37+
):
38+
llm = OpenAILLM(
4139
model_name=model.model_name,
4240
api_key=model.api_key,
43-
base_url=model.base_url
44-
or default_base_url_for_provider(model.model_provider),
41+
base_url=model.base_url,
4542
default_temperature=model.default_temperature,
4643
default_max_tokens=model.default_max_tokens,
4744
timeout=model.timeout,
@@ -78,6 +75,16 @@ def create_base_llm(model: ModelConfig) -> BaseLLM:
7875
timeout=model.timeout,
7976
abilities=model.abilities,
8077
)
78+
elif model.model_provider == "claude":
79+
llm = ClaudeLLM(
80+
model_name=model.model_name,
81+
api_key=model.api_key,
82+
base_url=model.base_url,
83+
default_temperature=model.default_temperature,
84+
default_max_tokens=model.default_max_tokens,
85+
timeout=model.timeout,
86+
abilities=model.abilities,
87+
)
8188
elif model.model_provider == "xinference":
8289
llm = XinferenceLLM(
8390
model_name=model.model_name,

src/xagent/core/model/chat/langchain.py

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,6 @@
1010
from langchain_openai import AzureChatOpenAI, ChatOpenAI
1111

1212
from ...model import ChatModelConfig, ModelConfig
13-
from ...model.providers import (
14-
default_base_url_for_provider,
15-
provider_compatibility_for_provider,
16-
)
1713
from ...retry import ExponentialBackoff, RetryStrategy, create_retry_wrapper
1814
from .error import retry_on
1915

@@ -101,16 +97,28 @@ def create_base_chat_model(
10197
raise TypeError(f"Unsupported Chat model type: {type(model).__name__}")
10298

10399
temp = temperature if temperature is not None else model.default_temperature
104-
compatibility = provider_compatibility_for_provider(model.model_provider)
105100

106-
if compatibility == "openai_compatible":
101+
if model.model_provider == "openai":
107102
return ChatOpenAI(
108103
model=model.model_name,
109104
temperature=temp,
110105
max_tokens=model.default_max_tokens,
111106
api_key=model.api_key,
112-
base_url=model.base_url
113-
or default_base_url_for_provider(model.model_provider),
107+
base_url=model.base_url,
108+
timeout=model.timeout,
109+
)
110+
elif model.model_provider in (
111+
"alibaba-coding-plan",
112+
"alibaba-coding-plan-cn",
113+
"zai-coding-plan",
114+
"zhipuai-coding-plan",
115+
):
116+
return ChatOpenAI(
117+
model=model.model_name,
118+
temperature=temp,
119+
max_tokens=model.default_max_tokens,
120+
api_key=model.api_key,
121+
base_url=model.base_url,
114122
timeout=model.timeout,
115123
)
116124
elif model.model_provider == "zhipu":

0 commit comments

Comments
 (0)