Skip to content

Commit 469d07e

Browse files
committed
✨ feat: implement SiliconFlow integration for v0.2
- Add SiliconFlow model provider with regional endpoint support - Create modular model architecture (qwen, siliconflow modules) - Implement ChatOpenAI workaround for function calling limitations - Add comprehensive E2E test suite for SiliconFlow models - Update tests to use context parameter and shared TEST_MODEL constant - Add regional endpoint support with cn/prc and en/international aliases - Include region normalization utilities for consistent handling - Update all imports and dependencies for modular structure SiliconFlow integration uses OpenAI-compatible API with proper regional routing to avoid API quota issues in testing while maintaining full compatibility with the ReAct agent pattern.
1 parent f5129ba commit 469d07e

File tree

14 files changed

+772
-102
lines changed

14 files changed

+772
-102
lines changed

.env.example

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,9 @@ OPENAI_API_KEY=sk-...
88
# DashScope (Qwen models)
99
DASHSCOPE_API_KEY=sk-...
1010

11+
# SiliconFlow
12+
SILICONFLOW_API_KEY=sk-...
13+
1114
# LangSmith (tracing)
1215
LANGCHAIN_TRACING_V2=true
1316
LANGCHAIN_PROJECT=langgraph-up-react

src/common/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,14 @@
22

33
from . import prompts
44
from .context import Context
5-
from .models import create_qwen_model, get_supported_qwen_models
5+
from .models import create_qwen_model, create_siliconflow_model
66
from .tools import web_search
77
from .utils import load_chat_model
88

99
__all__ = [
1010
"Context",
1111
"create_qwen_model",
12-
"get_supported_qwen_models",
12+
"create_siliconflow_model",
1313
"web_search",
1414
"load_chat_model",
1515
"prompts",

src/common/models/__init__.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
"""Model integrations for the ReAct agent."""
2+
3+
from .qwen import create_qwen_model
4+
from .siliconflow import create_siliconflow_model
5+
6+
__all__ = [
7+
"create_qwen_model",
8+
"create_siliconflow_model",
9+
]

src/common/models.py renamed to src/common/models/qwen.py

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
1-
"""Custom model integrations for ReAct agent."""
1+
"""Qwen model integrations for ReAct agent."""
22

33
import os
4-
from typing import Any, List, Optional, Union
4+
from typing import Any, Optional, Union
55

66
from langchain_qwq import ChatQwen, ChatQwQ
77

8+
from ..utils import normalize_region
9+
810

911
def create_qwen_model(
1012
model_name: str,
@@ -19,7 +21,7 @@ def create_qwen_model(
1921
model_name: The model name (e.g., 'qwq-32b-preview', 'qwen-plus')
2022
api_key: DashScope API key (defaults to env var DASHSCOPE_API_KEY)
2123
base_url: Custom base URL for API (optional)
22-
region: Region setting ('prc' for China, 'international' for global)
24+
region: Region setting ('prc'/'cn' for China, 'international'/'en' for global)
2325
Defaults to env var REGION
2426
**kwargs: Additional model parameters
2527
@@ -36,10 +38,12 @@ def create_qwen_model(
3638

3739
# Set base URL based on region if not explicitly provided
3840
if base_url is None and region:
39-
if region.lower() == "prc":
41+
# Normalize region aliases
42+
normalized_region = normalize_region(region)
43+
if normalized_region == "prc":
4044
# China mainland endpoint
4145
base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
42-
elif region.lower() == "international":
46+
elif normalized_region == "international":
4347
# International endpoint
4448
base_url = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
4549

@@ -55,15 +59,3 @@ def create_qwen_model(
5559
return ChatQwQ(**config)
5660
else:
5761
return ChatQwen(**config)
58-
59-
60-
def get_supported_qwen_models() -> List[str]:
61-
"""Get list of supported Qwen models."""
62-
return [
63-
"qwen-plus",
64-
"qwen-turbo",
65-
"qwen-max",
66-
"qwq-32b-preview",
67-
"qvq-72b-preview",
68-
# Add more Qwen models as they become available
69-
]

src/common/models/siliconflow.py

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
"""SiliconFlow model integrations for ReAct agent."""
2+
3+
import os
4+
from typing import Any, Optional
5+
6+
# NOTE: Using ChatOpenAI instead of ChatSiliconFlow because langchain-siliconflow v0.1.1
7+
# does not support function calling (bind_tools raises NotImplementedError).
8+
# We'll switch back to ChatSiliconFlow once they add function calling support.
9+
from langchain_openai import ChatOpenAI
10+
11+
from ..utils import normalize_region
12+
13+
14+
def create_siliconflow_model(
15+
model_name: str,
16+
api_key: Optional[str] = None,
17+
base_url: Optional[str] = None,
18+
region: Optional[str] = None,
19+
**kwargs: Any,
20+
) -> ChatOpenAI:
21+
"""Create a SiliconFlow model using ChatOpenAI (OpenAI-compatible API).
22+
23+
NOTE: Using ChatOpenAI instead of ChatSiliconFlow because langchain-siliconflow v0.1.1
24+
does not support function calling (bind_tools raises NotImplementedError).
25+
SiliconFlow provides OpenAI-compatible API endpoints, so we use ChatOpenAI directly.
26+
27+
Args:
28+
model_name: The model name (e.g., 'Qwen/Qwen3-8B', 'THUDM/GLM-4.1V-9B-Thinking')
29+
api_key: SiliconFlow API key (defaults to env var SILICONFLOW_API_KEY)
30+
base_url: Custom base URL for API (optional)
31+
region: Region setting ('prc'/'cn' for China, 'international'/'en' for global)
32+
Defaults to env var REGION
33+
**kwargs: Additional model parameters
34+
35+
Returns:
36+
Configured ChatOpenAI instance pointing to SiliconFlow API
37+
"""
38+
# Get API key from env if not provided
39+
if api_key is None:
40+
api_key = os.getenv("SILICONFLOW_API_KEY")
41+
42+
# Get region from env if not provided
43+
if region is None:
44+
region = os.getenv("REGION")
45+
46+
# Set base URL based on region if not explicitly provided
47+
if base_url is None and region:
48+
# Normalize region aliases
49+
normalized_region = normalize_region(region)
50+
if normalized_region == "prc":
51+
base_url = "https://api.siliconflow.cn/v1"
52+
elif normalized_region == "international":
53+
base_url = "https://api.siliconflow.com/v1"
54+
55+
# Default to PRC endpoint if no region specified
56+
if base_url is None:
57+
base_url = "https://api.siliconflow.cn/v1"
58+
59+
# Create ChatOpenAI configuration for SiliconFlow
60+
config = {
61+
"model": model_name,
62+
"api_key": api_key,
63+
"base_url": base_url,
64+
**kwargs
65+
}
66+
67+
return ChatOpenAI(**config)

src/common/utils.py

Lines changed: 28 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,31 @@
11
"""Utility & helper functions."""
22

3-
from typing import Union
3+
from typing import Optional, Union
44

55
from langchain.chat_models import init_chat_model
66
from langchain_core.language_models import BaseChatModel
77
from langchain_core.messages import BaseMessage
88
from langchain_qwq import ChatQwen, ChatQwQ
99

10-
from .models import create_qwen_model
10+
11+
def normalize_region(region: str) -> Optional[str]:
12+
"""Normalize region aliases to standard values.
13+
14+
Args:
15+
region: Region string to normalize
16+
17+
Returns:
18+
Normalized region ('prc' or 'international') or None if invalid
19+
"""
20+
if not region:
21+
return None
22+
23+
region_lower = region.lower()
24+
if region_lower in ("prc", "cn"):
25+
return "prc"
26+
elif region_lower in ("international", "en"):
27+
return "international"
28+
return None
1129

1230

1331
def get_message_text(msg: BaseMessage) -> str:
@@ -31,10 +49,17 @@ def load_chat_model(
3149
fully_specified_name (str): String in the format 'provider:model'.
3250
"""
3351
provider, model = fully_specified_name.split(":", maxsplit=1)
52+
provider_lower = provider.lower()
3453

3554
# Handle Qwen models specially with dashscope integration
36-
if provider.lower() == "qwen":
55+
if provider_lower == "qwen":
56+
from .models import create_qwen_model
3757
return create_qwen_model(model)
3858

59+
# Handle SiliconFlow models
60+
if provider_lower == "siliconflow":
61+
from .models import create_siliconflow_model
62+
return create_siliconflow_model(model)
63+
3964
# Use standard langchain initialization for other providers
4065
return init_chat_model(model, model_provider=provider)

tests/conftest.py

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,9 @@
88
from langchain_core.messages import HumanMessage
99
from langgraph_sdk import get_client
1010

11+
# Default test model - use SiliconFlow to avoid API quota issues
12+
TEST_MODEL = "siliconflow:Qwen/Qwen3-8B"
13+
1114

1215
@pytest.fixture(scope="session", autouse=True)
1316
def load_env():
@@ -21,7 +24,7 @@ def load_env():
2124

2225
# Ensure required environment variables are available for tests
2326
# You can add fallback values or skip tests if keys are missing
24-
required_keys = ["DASHSCOPE_API_KEY", "TAVILY_API_KEY"]
27+
required_keys = ["DASHSCOPE_API_KEY", "TAVILY_API_KEY", "SILICONFLOW_API_KEY"]
2528
missing_keys = [key for key in required_keys if not os.getenv(key)]
2629

2730
if missing_keys:
@@ -36,11 +39,22 @@ async def langgraph_client():
3639

3740
@pytest.fixture
3841
async def assistant_id(langgraph_client):
39-
"""Get the first available assistant ID for testing."""
40-
assistants = await langgraph_client.assistants.search()
41-
if not assistants:
42-
pytest.skip("No assistants found for e2e testing")
43-
return assistants[0]["assistant_id"]
42+
"""Create an assistant with SiliconFlow Qwen3-8B model for testing."""
43+
assistant = await langgraph_client.assistants.create(
44+
graph_id="agent",
45+
context={
46+
"model": TEST_MODEL,
47+
},
48+
)
49+
assistant_id = assistant["assistant_id"]
50+
51+
yield assistant_id
52+
53+
# Cleanup
54+
try:
55+
await langgraph_client.assistants.delete(assistant_id)
56+
except Exception:
57+
pass # Ignore cleanup errors
4458

4559

4660
class TestHelpers:

tests/e2e_tests/test_deepwiki.py

Lines changed: 12 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2,35 +2,33 @@
22

33
import pytest
44

5+
from ..conftest import TEST_MODEL
6+
57

68
@pytest.fixture
79
async def assistant_deepwiki_disabled(langgraph_client):
810
"""Create an assistant with deepwiki explicitly disabled."""
9-
config = {
10-
"configurable": {
11-
"enable_deepwiki": False,
12-
"system_prompt": "You are a helpful AI assistant.",
13-
}
14-
}
1511
assistant = await langgraph_client.assistants.create(
1612
graph_id="agent",
17-
config=config,
13+
context={
14+
"model": TEST_MODEL,
15+
"enable_deepwiki": False,
16+
"system_prompt": "You are a helpful AI assistant.",
17+
},
1818
)
1919
return assistant["assistant_id"]
2020

2121

2222
@pytest.fixture
2323
async def assistant_deepwiki_enabled(langgraph_client):
2424
"""Create an assistant with deepwiki explicitly enabled."""
25-
config = {
26-
"configurable": {
27-
"enable_deepwiki": True,
28-
"system_prompt": "You are a helpful AI assistant with access to deepwiki tools. When asked to use deepwiki tools, you must use them to get current documentation.",
29-
}
30-
}
3125
assistant = await langgraph_client.assistants.create(
3226
graph_id="agent",
33-
config=config,
27+
context={
28+
"model": TEST_MODEL,
29+
"enable_deepwiki": True,
30+
"system_prompt": "You are a helpful AI assistant with access to deepwiki tools. When asked to use deepwiki tools, you must use them to get current documentation.",
31+
},
3432
)
3533
return assistant["assistant_id"]
3634

@@ -312,11 +310,6 @@ async def test_deepwiki_configuration_persistence_e2e(
312310
# Test with deepwiki enabled configuration
313311
input_data = {
314312
"messages": [{"role": "human", "content": "Hello, please keep it brief"}],
315-
"configurable": {
316-
"enable_deepwiki": True,
317-
"max_search_results": 3,
318-
"system_prompt": "You are a helpful AI assistant with deepwiki access.",
319-
},
320313
}
321314

322315
# This should execute without errors even if deepwiki tools aren't used

0 commit comments

Comments
 (0)