7
7
from __future__ import annotations
8
8
9
9
import logging
10
- from datetime import datetime
11
10
from typing import List , Optional , Type
12
11
13
- from langchain_anthropic import ChatAnthropic
12
+ from browser_use .agent .message_manager .service import MessageManager
13
+ from browser_use .agent .message_manager .views import MessageHistory
14
+ from browser_use .agent .prompts import SystemPrompt
15
+ from browser_use .agent .views import ActionResult , AgentStepInfo
16
+ from browser_use .browser .views import BrowserState
14
17
from langchain_core .language_models import BaseChatModel
15
18
from langchain_core .messages import (
16
- AIMessage ,
17
- BaseMessage ,
18
19
HumanMessage ,
20
+ AIMessage
19
21
)
20
- from langchain_openai import ChatOpenAI
21
-
22
- from browser_use .agent .message_manager .views import MessageHistory , MessageMetadata
23
- from browser_use .agent .prompts import AgentMessagePrompt , SystemPrompt
24
- from browser_use .agent .views import ActionResult , AgentOutput , AgentStepInfo
25
- from browser_use .browser .views import BrowserState
26
- from browser_use .agent .message_manager .service import MessageManager
27
22
28
23
from .custom_prompts import CustomAgentMessagePrompt
29
24
@@ -43,14 +38,53 @@ def __init__(
43
38
include_attributes : list [str ] = [],
44
39
max_error_length : int = 400 ,
45
40
max_actions_per_step : int = 10 ,
41
+ tool_call_in_content : bool = False ,
46
42
):
47
- super ().__init__ (llm , task , action_descriptions , system_prompt_class , max_input_tokens ,
48
- estimated_tokens_per_character , image_tokens , include_attributes , max_error_length ,
49
- max_actions_per_step )
43
+ super ().__init__ (
44
+ llm = llm ,
45
+ task = task ,
46
+ action_descriptions = action_descriptions ,
47
+ system_prompt_class = system_prompt_class ,
48
+ max_input_tokens = max_input_tokens ,
49
+ estimated_tokens_per_character = estimated_tokens_per_character ,
50
+ image_tokens = image_tokens ,
51
+ include_attributes = include_attributes ,
52
+ max_error_length = max_error_length ,
53
+ max_actions_per_step = max_actions_per_step ,
54
+ tool_call_in_content = tool_call_in_content ,
55
+ )
50
56
51
- # Move Task info to state_message
57
+ # Custom: Move Task info to state_message
52
58
self .history = MessageHistory ()
53
59
self ._add_message_with_tokens (self .system_prompt )
60
+ tool_calls = [
61
+ {
62
+ 'name' : 'AgentOutput' ,
63
+ 'args' : {
64
+ 'current_state' : {
65
+ 'evaluation_previous_goal' : 'Unknown - No previous actions to evaluate.' ,
66
+ 'memory' : '' ,
67
+ 'next_goal' : 'Obtain task from user' ,
68
+ },
69
+ 'action' : [],
70
+ },
71
+ 'id' : '' ,
72
+ 'type' : 'tool_call' ,
73
+ }
74
+ ]
75
+ if self .tool_call_in_content :
76
+ # openai throws error if tool_calls are not responded -> move to content
77
+ example_tool_call = AIMessage (
78
+ content = f'{ tool_calls } ' ,
79
+ tool_calls = [],
80
+ )
81
+ else :
82
+ example_tool_call = AIMessage (
83
+ content = f'' ,
84
+ tool_calls = tool_calls ,
85
+ )
86
+
87
+ self ._add_message_with_tokens (example_tool_call )
54
88
55
89
def add_state_message (
56
90
self ,
@@ -68,7 +102,9 @@ def add_state_message(
68
102
msg = HumanMessage (content = str (r .extracted_content ))
69
103
self ._add_message_with_tokens (msg )
70
104
if r .error :
71
- msg = HumanMessage (content = str (r .error )[- self .max_error_length :])
105
+ msg = HumanMessage (
106
+ content = str (r .error )[- self .max_error_length :]
107
+ )
72
108
self ._add_message_with_tokens (msg )
73
109
result = None # if result in history, we dont want to add it again
74
110
0 commit comments