Skip to content

Commit 5226fd7

Browse files
authored
Merge pull request #8 from warmshao/dev
Dev
2 parents a65ca85 + 3287b6e commit 5226fd7

File tree

6 files changed

+31
-9
lines changed

6 files changed

+31
-9
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ This project builds upon the foundation of the [browser-use](https://github.com/
66

77
1. **A Brand New WebUI:** We offer a comprehensive web interface that supports a wide range of `browser-use` functionalities. This UI is designed to be user-friendly and enables easy interaction with the browser agent.
88

9-
2. **Expanded LLM Support:** We've integrated support for various Large Language Models (LLMs), including: Gemini, OpenAI, Azure OpenAI, Anthropic, DeepSeek etc. And we plan to add support for even more models in the future.
9+
2. **Expanded LLM Support:** We've integrated support for various Large Language Models (LLMs), including: Gemini, OpenAI, Azure OpenAI, Anthropic, DeepSeek, Ollama etc. And we plan to add support for even more models in the future.
1010

1111
3. **Custom Browser Support:** You can use your own browser with our tool, eliminating the need to re-login to sites or deal with other authentication challenges. This feature also supports high-definition screen recording.
1212

requirements.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
browser-use
22
langchain-google-genai
33
pyperclip
4-
gradio
4+
gradio
5+
langchain-ollama

src/utils/utils.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from langchain_openai import ChatOpenAI, AzureChatOpenAI
1212
from langchain_anthropic import ChatAnthropic
1313
from langchain_google_genai import ChatGoogleGenerativeAI
14+
from langchain_ollama import ChatOllama
1415

1516

1617
def get_llm_model(provider: str, **kwargs):
@@ -39,7 +40,7 @@ def get_llm_model(provider: str, **kwargs):
3940
)
4041
elif provider == 'openai':
4142
if not kwargs.get("base_url", ""):
42-
base_url = "https://api.openai.com/v1"
43+
base_url = os.getenv("OPENAI_ENDPOINT", "https://api.openai.com/v1")
4344
else:
4445
base_url = kwargs.get("base_url")
4546

@@ -66,7 +67,7 @@ def get_llm_model(provider: str, **kwargs):
6667
api_key = kwargs.get("api_key")
6768

6869
return ChatOpenAI(
69-
model=kwargs.get("model_name", 'gpt-4o'),
70+
model=kwargs.get("model_name", 'deepseek-chat'),
7071
temperature=kwargs.get("temperature", 0.0),
7172
base_url=base_url,
7273
api_key=api_key
@@ -81,6 +82,11 @@ def get_llm_model(provider: str, **kwargs):
8182
temperature=kwargs.get("temperature", 0.0),
8283
google_api_key=api_key,
8384
)
85+
elif provider == 'ollama':
86+
return ChatOllama(
87+
model=kwargs.get("model_name", 'qwen2.5:7b'),
88+
temperature=kwargs.get("temperature", 0.0),
89+
)
8490
elif provider == "azure_openai":
8591
if not kwargs.get("base_url", ""):
8692
base_url = os.getenv("AZURE_OPENAI_ENDPOINT", "")

tests/test_browser_use.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,9 +105,15 @@ async def test_browser_use_custom():
105105
# api_key=os.getenv("GOOGLE_API_KEY", "")
106106
# )
107107

108+
# llm = utils.get_llm_model(
109+
# provider="deepseek",
110+
# model_name="deepseek-chat",
111+
# temperature=0.8
112+
# )
113+
108114
llm = utils.get_llm_model(
109-
provider="deepseek",
110-
model_name="deepseek-chat",
115+
provider="ollama",
116+
model_name="qwen2.5:7b",
111117
temperature=0.8
112118
)
113119

tests/test_llm_api.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,6 @@ def test_deepseek_model():
106106
base_url=os.getenv("DEEPSEEK_ENDPOINT", ""),
107107
api_key=os.getenv("DEEPSEEK_API_KEY", "")
108108
)
109-
pdb.set_trace()
110109
message = HumanMessage(
111110
content=[
112111
{"type": "text", "text": "who are you?"}
@@ -116,8 +115,17 @@ def test_deepseek_model():
116115
print(ai_msg.content)
117116

118117

118+
def test_ollama_model():
119+
from langchain_ollama import ChatOllama
120+
121+
llm = ChatOllama(model="qwen2.5:7b")
122+
ai_msg = llm.invoke("Sing a ballad of LangChain.")
123+
print(ai_msg.content)
124+
125+
119126
if __name__ == '__main__':
120127
# test_openai_model()
121128
# test_gemini_model()
122129
# test_azure_openai_model()
123-
test_deepseek_model()
130+
# test_deepseek_model()
131+
test_ollama_model()

webui.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,8 @@ def main():
255255
use_vision = gr.Checkbox(label="use vision", value=True)
256256
with gr.Row():
257257
llm_provider = gr.Dropdown(
258-
["anthropic", "openai", "gemini", "azure_openai", "deepseek"], label="LLM Provider", value="gemini"
258+
["anthropic", "openai", "gemini", "azure_openai", "deepseek", "ollama"], label="LLM Provider",
259+
value="gemini"
259260
)
260261
llm_model_name = gr.Textbox(label="LLM Model Name", value="gemini-2.0-flash-exp")
261262
llm_temperature = gr.Number(label="LLM Temperature", value=1.0)

0 commit comments

Comments
 (0)