Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions frontend/src/components/pages/models.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,26 @@ const LOCAL_PROVIDER_CONFIGS: Record<string, Partial<ProviderConfig>> = {
category: ["llm", "embedding"],
defaultBaseUrl: "https://api.openai.com/v1"
},
"zai-coding-plan": {
icon: <img src="/zhipu.svg" alt="Z.AI" className="w-6 h-6" />,
category: ["llm"],
defaultBaseUrl: "https://api.z.ai/api/coding/paas/v4"
},
"zhipuai-coding-plan": {
icon: <img src="/zhipu.svg" alt="Zhipu" className="w-6 h-6" />,
category: ["llm"],
defaultBaseUrl: "https://open.bigmodel.cn/api/coding/paas/v4"
},
"alibaba-coding-plan": {
icon: <img src="/dashscope.png" alt="Alibaba Bailian" className="w-6 h-6" />,
category: ["llm"],
defaultBaseUrl: "https://coding-intl.dashscope.aliyuncs.com/v1"
},
"alibaba-coding-plan-cn": {
icon: <img src="/dashscope.png" alt="Alibaba Bailian" className="w-6 h-6" />,
category: ["llm"],
defaultBaseUrl: "https://coding.dashscope.aliyuncs.com/v1"
},
azure_openai: {
icon: <Zap className="w-6 h-6 text-blue-500" />,
category: ["llm"]
Expand Down
12 changes: 12 additions & 0 deletions frontend/src/i18n/locales/en.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1242,6 +1242,18 @@ Build when you need.`
openai: {
description: "Access GPT-4o, GPT-4 Turbo, and DALL-E 3 models. Industry standard for reasoning and creativity.",
},
"zai-coding-plan": {
description: "GLM Coding Plan endpoints via Z.AI.",
},
"zhipuai-coding-plan": {
description: "GLM Coding Plan endpoints via Zhipu AI.",
},
"alibaba-coding-plan": {
description: "Alibaba Bailian Coding Plan endpoint.",
},
"alibaba-coding-plan-cn": {
description: "Alibaba Bailian Coding Plan endpoint (China).",
},
azure_openai: {
description: "Enterprise-grade OpenAI models hosted on Azure.",
},
Expand Down
12 changes: 12 additions & 0 deletions frontend/src/i18n/locales/zh.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1242,6 +1242,18 @@ Build when you need.`
openai: {
description: "访问 GPT-4o、GPT-4 Turbo 和 DALL-E 3 模型。推理和创造力的行业标准。",
},
"zai-coding-plan": {
description: "Z.AI GLM Coding Plan 端点。",
},
"zhipuai-coding-plan": {
description: "智谱 GLM Coding Plan 端点(中国区)。",
},
"alibaba-coding-plan": {
description: "阿里云百炼 Coding Plan 端点。",
},
"alibaba-coding-plan-cn": {
description: "阿里云百炼 Coding Plan 端点(中国区)。",
},
azure_openai: {
description: "Azure 托管的企业级 OpenAI 模型。",
},
Expand Down
32 changes: 20 additions & 12 deletions src/xagent/core/model/chat/basic/adapter.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
import os

from ....model import ChatModelConfig, ModelConfig
from ....model.providers import (
default_base_url_for_provider,
provider_compatibility_for_provider,
)
from ....retry import create_retry_wrapper
from ..error import retry_on
from .azure_openai import AzureOpenAILLM
Expand All @@ -19,11 +23,25 @@ def create_base_llm(model: ModelConfig) -> BaseLLM:
if not isinstance(model, ChatModelConfig):
raise TypeError(f"Invalid model type: {type(model).__name__}")

if model.model_provider == "openai":
compatibility = provider_compatibility_for_provider(model.model_provider)

if compatibility == "openai_compatible":
llm: BaseLLM = OpenAILLM(
model_name=model.model_name,
api_key=model.api_key,
base_url=model.base_url,
base_url=model.base_url
or default_base_url_for_provider(model.model_provider),
default_temperature=model.default_temperature,
default_max_tokens=model.default_max_tokens,
timeout=model.timeout,
abilities=model.abilities,
)
elif compatibility == "claude_compatible":
llm = ClaudeLLM(
model_name=model.model_name,
api_key=model.api_key,
base_url=model.base_url
or default_base_url_for_provider(model.model_provider),
default_temperature=model.default_temperature,
default_max_tokens=model.default_max_tokens,
timeout=model.timeout,
Expand Down Expand Up @@ -60,16 +78,6 @@ def create_base_llm(model: ModelConfig) -> BaseLLM:
timeout=model.timeout,
abilities=model.abilities,
)
elif model.model_provider == "claude":
llm = ClaudeLLM(
model_name=model.model_name,
api_key=model.api_key,
base_url=model.base_url,
default_temperature=model.default_temperature,
default_max_tokens=model.default_max_tokens,
timeout=model.timeout,
abilities=model.abilities,
)
elif model.model_provider == "xinference":
llm = XinferenceLLM(
model_name=model.model_name,
Expand Down
10 changes: 8 additions & 2 deletions src/xagent/core/model/chat/langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@
from langchain_openai import AzureChatOpenAI, ChatOpenAI

from ...model import ChatModelConfig, ModelConfig
from ...model.providers import (
default_base_url_for_provider,
provider_compatibility_for_provider,
)
from ...retry import ExponentialBackoff, RetryStrategy, create_retry_wrapper
from .error import retry_on

Expand Down Expand Up @@ -97,14 +101,16 @@ def create_base_chat_model(
raise TypeError(f"Unsupported Chat model type: {type(model).__name__}")

temp = temperature if temperature is not None else model.default_temperature
compatibility = provider_compatibility_for_provider(model.model_provider)

if model.model_provider == "openai":
if compatibility == "openai_compatible":
return ChatOpenAI(
model=model.model_name,
temperature=temp,
max_tokens=model.default_max_tokens,
api_key=model.api_key,
base_url=model.base_url,
base_url=model.base_url
or default_base_url_for_provider(model.model_provider),
timeout=model.timeout,
)
elif model.model_provider == "zhipu":
Expand Down
146 changes: 146 additions & 0 deletions src/xagent/core/model/providers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
from typing import Any, Optional

_PROVIDER_ALIASES: dict[str, str] = {
"zai_coding_plan": "zai-coding-plan",
"zhipuai_coding_plan": "zhipuai-coding-plan",
"alibaba_coding_plan": "alibaba-coding-plan",
"alibaba_coding_plan_cn": "alibaba-coding-plan-cn",
}

# Provider default base URLs used when callers omit an explicit base URL.
_DEFAULT_BASE_URL_BY_PROVIDER: dict[str, str] = {
"openai": "https://api.openai.com/v1",
"dashscope": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"zhipu": "https://open.bigmodel.cn/api/paas/v4",
# Opencode / models.dev naming
"zai-coding-plan": "https://api.z.ai/api/coding/paas/v4",
"zhipuai-coding-plan": "https://open.bigmodel.cn/api/coding/paas/v4",
# Alibaba Bailian (Model Studio) coding plan
"alibaba-coding-plan": "https://coding-intl.dashscope.aliyuncs.com/v1",
"alibaba-coding-plan-cn": "https://coding.dashscope.aliyuncs.com/v1",
}

_CURATED_MODELS_BY_PROVIDER: dict[str, tuple[str, ...]] = {
"alibaba-coding-plan": (
"glm-4.7",
"glm-5",
"qwen3-coder-next",
"qwen3-coder-plus",
"qwen3-max-2026-01-23",
"qwen3.5-plus",
),
"alibaba-coding-plan-cn": (
"glm-4.7",
"glm-5",
"qwen3-coder-next",
"qwen3-coder-plus",
"qwen3-max-2026-01-23",
"qwen3.5-plus",
),
}

_SUPPORTED_PROVIDER_METADATA: tuple[dict[str, Any], ...] = (
{
"id": "openai",
"name": "OpenAI",
"description": "OpenAI API compatible models",
"requires_base_url": False,
"compatibility": "openai_compatible",
},
{
"id": "claude",
"name": "Anthropic Claude",
"description": "Anthropic's Claude models",
"requires_base_url": False,
"compatibility": "claude_compatible",
},
{
"id": "gemini",
"name": "Google Gemini",
"description": "Google's Gemini models",
"requires_base_url": False,
},
{
"id": "xinference",
"name": "Xinference",
"description": "Xinference models for local inference",
"requires_base_url": True,
},
{
"id": "dashscope",
"name": "DashScope",
"description": "Alibaba Cloud's DashScope models",
"requires_base_url": False,
},
{
"id": "alibaba-coding-plan",
"name": "Alibaba Coding Plan",
"description": "Alibaba Bailian (Model Studio) coding plan",
"requires_base_url": False,
"compatibility": "openai_compatible",
},
{
"id": "alibaba-coding-plan-cn",
"name": "Alibaba Coding Plan (China)",
"description": "Alibaba Bailian (Model Studio) coding plan (China)",
"requires_base_url": False,
"compatibility": "openai_compatible",
},
{
"id": "zhipu",
"name": "Zhipu AI",
"description": "Zhipu AI models (GLM series) using zai SDK",
"requires_base_url": False,
},
{
"id": "zai-coding-plan",
"name": "Z.AI Coding Plan",
"description": "GLM coding plan via Z.AI",
"requires_base_url": False,
"compatibility": "openai_compatible",
},
{
"id": "zhipuai-coding-plan",
"name": "Zhipu AI Coding Plan",
"description": "GLM coding plan via Zhipu AI",
"requires_base_url": False,
"compatibility": "openai_compatible",
},
)


def _normalize_provider(provider: str) -> str:
return provider.lower().strip()


def canonical_provider_name(provider: str) -> str:
normalized = _normalize_provider(provider)
return _PROVIDER_ALIASES.get(normalized, normalized)


def default_base_url_for_provider(provider: str) -> Optional[str]:
return _DEFAULT_BASE_URL_BY_PROVIDER.get(canonical_provider_name(provider))


def curated_models_for_provider(provider: str) -> tuple[str, ...]:
return _CURATED_MODELS_BY_PROVIDER.get(canonical_provider_name(provider), ())


def provider_compatibility_for_provider(provider: str) -> Optional[str]:
provider_id = canonical_provider_name(provider)
for provider_info in _SUPPORTED_PROVIDER_METADATA:
if provider_info["id"] == provider_id:
compatibility = provider_info.get("compatibility")
return str(compatibility) if compatibility is not None else None
return None


def get_supported_provider_metadata() -> list[dict[str, Any]]:
providers: list[dict[str, Any]] = []
for provider in _SUPPORTED_PROVIDER_METADATA:
provider_info = dict(provider)
default_base_url = default_base_url_for_provider(provider_info["id"])
if default_base_url is not None:
provider_info["default_base_url"] = default_base_url
providers.append(provider_info)
return providers
9 changes: 6 additions & 3 deletions src/xagent/web/api/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
ImageModelConfig,
ModelConfig,
)
from xagent.core.model.providers import default_base_url_for_provider
from xagent.core.utils.security import redact_sensitive_text

from ..auth_dependencies import get_current_user
Expand Down Expand Up @@ -59,12 +60,14 @@ async def create_model(
detail="Only administrators can share models with all users",
)

base_url = model.base_url or default_base_url_for_provider(model.model_provider)

if model.category == "llm":
config: ModelConfig = ChatModelConfig(
id=model.model_id,
model_name=model.model_name,
model_provider=model.model_provider,
base_url=model.base_url,
base_url=base_url,
api_key=model.api_key,
default_temperature=model.temperature,
timeout=180.0,
Expand All @@ -76,7 +79,7 @@ async def create_model(
id=model.model_id,
model_name=model.model_name,
model_provider=model.model_provider,
base_url=model.base_url,
base_url=base_url,
api_key=model.api_key,
timeout=180.0,
abilities=model.abilities,
Expand All @@ -88,7 +91,7 @@ async def create_model(
id=model.model_id,
model_name=model.model_name,
model_provider=model.model_provider,
base_url=model.base_url,
base_url=base_url,
api_key=model.api_key,
default_temperature=model.temperature,
timeout=180.0,
Expand Down
2 changes: 2 additions & 0 deletions src/xagent/web/schemas/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,6 +334,8 @@ class ProviderInfo(BaseModel):
name: str
description: str
requires_base_url: bool
default_base_url: Optional[str] = None
compatibility: Optional[str] = None


class SupportedProvidersResponse(BaseModel):
Expand Down
Loading
Loading