Skip to content

Commit 6df0e63

Browse files
committed
Merge branch 'main' into dafult_broswer_and_model
2 parents 05d4191 + 82b1133 commit 6df0e63

File tree

4 files changed

+48
-2
lines changed

4 files changed

+48
-2
lines changed

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
browser-use==0.1.47
1+
browser-use==0.1.48
22
pyperclip==1.9.0
33
gradio==5.27.0
44
json-repair

src/utils/config.py

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,5 +59,32 @@
5959
"Pro/THUDM/glm-4-9b-chat",
6060
],
6161
"ibm": ["ibm/granite-vision-3.1-2b-preview", "meta-llama/llama-4-maverick-17b-128e-instruct-fp8",
62-
"meta-llama/llama-3-2-90b-vision-instruct"]
62+
"meta-llama/llama-3-2-90b-vision-instruct"],
63+
"modelscope":[
64+
"Qwen/Qwen2.5-Coder-32B-Instruct",
65+
"Qwen/Qwen2.5-Coder-14B-Instruct",
66+
"Qwen/Qwen2.5-Coder-7B-Instruct",
67+
"Qwen/Qwen2.5-72B-Instruct",
68+
"Qwen/Qwen2.5-32B-Instruct",
69+
"Qwen/Qwen2.5-14B-Instruct",
70+
"Qwen/Qwen2.5-7B-Instruct",
71+
"Qwen/QwQ-32B-Preview",
72+
"Qwen/Qwen2.5-VL-3B-Instruct",
73+
"Qwen/Qwen2.5-VL-7B-Instruct",
74+
"Qwen/Qwen2.5-VL-32B-Instruct",
75+
"Qwen/Qwen2.5-VL-72B-Instruct",
76+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
77+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
78+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
79+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
80+
"deepseek-ai/DeepSeek-R1",
81+
"deepseek-ai/DeepSeek-V3",
82+
"Qwen/Qwen3-1.7B",
83+
"Qwen/Qwen3-4B",
84+
"Qwen/Qwen3-8B",
85+
"Qwen/Qwen3-14B",
86+
"Qwen/Qwen3-30B-A3B",
87+
"Qwen/Qwen3-32B",
88+
"Qwen/Qwen3-235B-A22B",
89+
],
6390
}

src/utils/llm_provider.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -323,5 +323,20 @@ def get_llm_model(provider: str, **kwargs):
323323
model_name=kwargs.get("model_name", "Qwen/QwQ-32B"),
324324
temperature=kwargs.get("temperature", 0.0),
325325
)
326+
elif provider == "modelscope":
327+
if not kwargs.get("api_key", ""):
328+
api_key = os.getenv("MODELSCOPE_API_KEY", "")
329+
else:
330+
api_key = kwargs.get("api_key")
331+
if not kwargs.get("base_url", ""):
332+
base_url = os.getenv("MODELSCOPE_ENDPOINT", "")
333+
else:
334+
base_url = kwargs.get("base_url")
335+
return ChatOpenAI(
336+
api_key=api_key,
337+
base_url=base_url,
338+
model_name=kwargs.get("model_name", "Qwen/QwQ-32B"),
339+
temperature=kwargs.get("temperature", 0.0),
340+
)
326341
else:
327342
raise ValueError(f"Unsupported provider: {provider}")

src/webui/webui_manager.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from typing import Optional, Dict, List
88
import uuid
99
import asyncio
10+
import time
1011

1112
from gradio.components import Component
1213
from browser_use.browser.browser import Browser
@@ -108,6 +109,9 @@ def load_config(self, config_path: str):
108109
update_components[comp] = comp.__class__(value=comp_val, type="messages")
109110
else:
110111
update_components[comp] = comp.__class__(value=comp_val)
112+
if comp_id == "agent_settings.planner_llm_provider":
113+
yield update_components # yield provider, let callback run
114+
time.sleep(0.1) # wait for Gradio UI callback
111115

112116
config_status = self.id_to_component["load_save_config.config_status"]
113117
update_components.update(

0 commit comments

Comments
 (0)