Skip to content

Commit 75ab505

Browse files
committed
fix deepseek-r1 ollama
1 parent 7c89496 commit 75ab505

File tree

5 files changed

+33
-27
lines changed

5 files changed

+33
-27
lines changed

src/agent/custom_agent.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -242,17 +242,17 @@ async def step(self, step_info: Optional[CustomAgentStepInfo] = None) -> None:
242242
logger.info(f"🧠 All Memory: \n{step_info.memory}")
243243
self._save_conversation(input_messages, model_output)
244244
if self.model_name != "deepseek-reasoner":
245-
# remove pre-prev message
246-
self.message_manager._remove_last_state_message()
245+
# remove prev message
246+
self.message_manager._remove_state_message_by_index(-1)
247247
except Exception as e:
248248
# model call failed, remove last state message from history
249-
self.message_manager._remove_last_state_message()
249+
self.message_manager._remove_state_message_by_index(-1)
250250
raise e
251251

252+
actions: list[ActionModel] = model_output.action
252253
result: list[ActionResult] = await self.controller.multi_act(
253-
model_output.action, self.browser_context
254+
actions, self.browser_context
254255
)
255-
actions: list[ActionModel] = model_output.action
256256
if len(result) != len(actions):
257257
# I think something changes, such information should let LLM know
258258
for ri in range(len(result), len(actions)):
@@ -261,6 +261,9 @@ async def step(self, step_info: Optional[CustomAgentStepInfo] = None) -> None:
261261
error=f"{actions[ri].model_dump_json(exclude_unset=True)} is Failed to execute. \
262262
Something new appeared after action {actions[len(result) - 1].model_dump_json(exclude_unset=True)}",
263263
is_done=False))
264+
if len(actions) == 0:
265+
# TODO: fix no action case
266+
result = [ActionResult(is_done=True, extracted_content=step_info.memory, include_in_memory=True)]
264267
self._last_result = result
265268
self._last_actions = actions
266269
if len(result) > 0 and result[-1].is_done:

src/agent/custom_massage_manager.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -70,18 +70,6 @@ def cut_messages(self):
7070
while diff > 0 and len(self.history.messages) > min_message_len:
7171
self.history.remove_message(min_message_len) # alway remove the oldest message
7272
diff = self.history.total_tokens - self.max_input_tokens
73-
74-
def _remove_state_message_by_index(self, remove_ind=-1) -> None:
75-
"""Remove last state message from history"""
76-
i = 0
77-
remove_cnt = 0
78-
while len(self.history.messages) and i <= len(self.history.messages):
79-
i += 1
80-
if isinstance(self.history.messages[-i].message, HumanMessage):
81-
remove_cnt += 1
82-
if remove_cnt == abs(remove_ind):
83-
self.history.remove_message(-i)
84-
break
8573

8674
def add_state_message(
8775
self,
@@ -115,3 +103,15 @@ def _count_text_tokens(self, text: str) -> int:
115103
len(text) // self.estimated_characters_per_token
116104
) # Rough estimate if no tokenizer available
117105
return tokens
106+
107+
def _remove_state_message_by_index(self, remove_ind=-1) -> None:
108+
"""Remove last state message from history"""
109+
i = len(self.history.messages) - 1
110+
remove_cnt = 0
111+
while i >= 0:
112+
if isinstance(self.history.messages[i].message, HumanMessage):
113+
remove_cnt += 1
114+
if remove_cnt == abs(remove_ind):
115+
self.history.remove_message(i)
116+
break
117+
i -= 1

src/agent/custom_prompts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ def get_user_message(self) -> HumanMessage:
183183

184184
state_description = f"""
185185
{step_info_description}
186-
1. Task: {self.step_info.task}
186+
1. Task: {self.step_info.task}.
187187
2. Hints(Optional):
188188
{self.step_info.add_infos}
189189
3. Memory:

src/utils/utils.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -94,12 +94,11 @@ def get_llm_model(provider: str, **kwargs):
9494
else:
9595
base_url = kwargs.get("base_url")
9696

97-
if kwargs.get("model_name", "qwen2.5:7b").startswith("deepseek-r1"):
97+
if "deepseek-r1" in kwargs.get("model_name", "qwen2.5:7b"):
9898
return DeepSeekR1ChatOllama(
99-
model=kwargs.get("model_name", "deepseek-r1:7b"),
99+
model=kwargs.get("model_name", "deepseek-r1:14b"),
100100
temperature=kwargs.get("temperature", 0.0),
101101
num_ctx=kwargs.get("num_ctx", 32000),
102-
num_predict=kwargs.get("num_predict", 1024),
103102
base_url=kwargs.get("base_url", base_url),
104103
)
105104
else:

tests/test_browser_use.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,14 @@ async def test_browser_use_org():
3232
# api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
3333
# )
3434

35+
# llm = utils.get_llm_model(
36+
# provider="deepseek",
37+
# model_name="deepseek-chat",
38+
# temperature=0.8
39+
# )
40+
3541
llm = utils.get_llm_model(
36-
provider="deepseek",
37-
model_name="deepseek-chat",
38-
temperature=0.8
42+
provider="ollama", model_name="deepseek-r1:14b", temperature=0.5
3943
)
4044

4145
window_w, window_h = 1920, 1080
@@ -152,9 +156,9 @@ async def test_browser_use_custom():
152156
controller = CustomController()
153157
use_own_browser = True
154158
disable_security = True
155-
use_vision = True # Set to False when using DeepSeek
159+
use_vision = False # Set to False when using DeepSeek
156160

157-
max_actions_per_step = 10
161+
max_actions_per_step = 1
158162
playwright = None
159163
browser = None
160164
browser_context = None
@@ -189,7 +193,7 @@ async def test_browser_use_custom():
189193
)
190194
)
191195
agent = CustomAgent(
192-
task="go to google.com and type 'OpenAI' click search and give me the first url",
196+
task="Search 'Nvidia' and give me the first url",
193197
add_infos="", # some hints for llm to complete the task
194198
llm=llm,
195199
browser=browser,

0 commit comments

Comments
 (0)