@@ -69,6 +69,31 @@ async def stop_agent():
69
69
gr .update (value = "Stop" , interactive = True ),
70
70
gr .update (interactive = True )
71
71
)
72
+
73
+ async def stop_research_agent ():
74
+ """Request the agent to stop and update UI with enhanced feedback"""
75
+ global _global_agent_state , _global_browser_context , _global_browser
76
+
77
+ try :
78
+ # Request stop
79
+ _global_agent_state .request_stop ()
80
+
81
+ # Update UI immediately
82
+ message = "Stop requested - the agent will halt at the next safe point"
83
+ logger .info (f"🛑 { message } " )
84
+
85
+ # Return UI updates
86
+ return ( # errors_output
87
+ gr .update (value = "Stopping..." , interactive = False ), # stop_button
88
+ gr .update (interactive = False ), # run_button
89
+ )
90
+ except Exception as e :
91
+ error_msg = f"Error during stop: { str (e )} "
92
+ logger .error (error_msg )
93
+ return (
94
+ gr .update (value = "Stop" , interactive = True ),
95
+ gr .update (interactive = True )
96
+ )
72
97
73
98
async def run_browser_agent (
74
99
agent_type ,
@@ -598,8 +623,12 @@ async def close_global_browser():
598
623
await _global_browser .close ()
599
624
_global_browser = None
600
625
601
- async def run_deep_search (research_task , max_search_iteration_input , max_query_per_iter_input , llm_provider , llm_model_name , llm_temperature , llm_base_url , llm_api_key , use_vision , headless ):
626
+ async def run_deep_search (research_task , max_search_iteration_input , max_query_per_iter_input , llm_provider , llm_model_name , llm_temperature , llm_base_url , llm_api_key , use_vision , use_own_browser , headless ):
602
627
from src .utils .deep_research import deep_research
628
+ global _global_agent_state
629
+
630
+ # Clear any previous stop request
631
+ _global_agent_state .clear_stop ()
603
632
604
633
llm = utils .get_llm_model (
605
634
provider = llm_provider ,
@@ -608,12 +637,15 @@ async def run_deep_search(research_task, max_search_iteration_input, max_query_p
608
637
base_url = llm_base_url ,
609
638
api_key = llm_api_key ,
610
639
)
611
- markdown_content , file_path = await deep_research (research_task , llm ,
640
+ markdown_content , file_path = await deep_research (research_task , llm , _global_agent_state ,
612
641
max_search_iterations = max_search_iteration_input ,
613
642
max_query_num = max_query_per_iter_input ,
614
643
use_vision = use_vision ,
615
- headless = headless )
616
- return markdown_content , file_path
644
+ headless = headless ,
645
+ use_own_browser = use_own_browser
646
+ )
647
+
648
+ return markdown_content , file_path , gr .update (value = "Stop" , interactive = True ), gr .update (interactive = True )
617
649
618
650
619
651
def create_ui (config , theme_name = "Ocean" ):
@@ -815,57 +847,17 @@ def create_ui(config, theme_name="Ocean"):
815
847
label = "Live Browser View" ,
816
848
)
817
849
818
- with gr .TabItem ("🧐 Deep Research" ):
819
- with gr .Group ():
820
- research_task_input = gr .Textbox (label = "Research Task" , lines = 5 , value = "Compose a report on the use of Reinforcement Learning for training Large Language Models, encompassing its origins, current advancements, and future prospects, substantiated with examples of relevant models and techniques. The report should reflect original insights and analysis, moving beyond mere summarization of existing literature." )
821
- with gr .Row ():
822
- max_search_iteration_input = gr .Number (label = "Max Search Iteration" , value = 20 , precision = 0 ) # precision=0 确保是整数
823
- max_query_per_iter_input = gr .Number (label = "Max Query per Iteration" , value = 5 , precision = 0 ) # precision=0 确保是整数
824
- research_button = gr .Button ("Run Deep Research" )
825
- markdown_output_display = gr .Markdown (label = "Research Report" )
826
- markdown_download = gr .File (label = "Download Research Report" )
827
-
828
-
829
- with gr .TabItem ("📁 Configuration" , id = 5 ):
830
- with gr .Group ():
831
- config_file_input = gr .File (
832
- label = "Load Config File" ,
833
- file_types = [".pkl" ],
834
- interactive = True
835
- )
836
-
837
- load_config_button = gr .Button ("Load Existing Config From File" , variant = "primary" )
838
- save_config_button = gr .Button ("Save Current Config" , variant = "primary" )
839
-
840
- config_status = gr .Textbox (
841
- label = "Status" ,
842
- lines = 2 ,
843
- interactive = False
844
- )
845
-
846
- load_config_button .click (
847
- fn = update_ui_from_config ,
848
- inputs = [config_file_input ],
849
- outputs = [
850
- agent_type , max_steps , max_actions_per_step , use_vision , tool_calling_method ,
851
- llm_provider , llm_model_name , llm_temperature , llm_base_url , llm_api_key ,
852
- use_own_browser , keep_browser_open , headless , disable_security , enable_recording ,
853
- window_w , window_h , save_recording_path , save_trace_path , save_agent_history_path ,
854
- task , config_status
855
- ]
856
- )
850
+ with gr .TabItem ("🧐 Deep Research" , id = 5 ):
851
+ research_task_input = gr .Textbox (label = "Research Task" , lines = 5 , value = "Compose a report on the use of Reinforcement Learning for training Large Language Models, encompassing its origins, current advancements, and future prospects, substantiated with examples of relevant models and techniques. The report should reflect original insights and analysis, moving beyond mere summarization of existing literature." )
852
+ with gr .Row ():
853
+ max_search_iteration_input = gr .Number (label = "Max Search Iteration" , value = 20 , precision = 0 ) # precision=0 确保是整数
854
+ max_query_per_iter_input = gr .Number (label = "Max Query per Iteration" , value = 5 , precision = 0 ) # precision=0 确保是整数
855
+ with gr .Row ():
856
+ research_button = gr .Button ("▶️ Run Deep Research" , variant = "primary" , scale = 2 )
857
+ stop_research_button = gr .Button ("⏹️ Stop" , variant = "stop" , scale = 1 )
858
+ markdown_output_display = gr .Markdown (label = "Research Report" )
859
+ markdown_download = gr .File (label = "Download Research Report" )
857
860
858
- save_config_button .click (
859
- fn = save_current_config ,
860
- inputs = [
861
- agent_type , max_steps , max_actions_per_step , use_vision , tool_calling_method ,
862
- llm_provider , llm_model_name , llm_temperature , llm_base_url , llm_api_key ,
863
- use_own_browser , keep_browser_open , headless , disable_security ,
864
- enable_recording , window_w , window_h , save_recording_path , save_trace_path ,
865
- save_agent_history_path , task ,
866
- ],
867
- outputs = [config_status ]
868
- )
869
861
870
862
with gr .TabItem ("📊 Results" , id = 6 ):
871
863
with gr .Group ():
@@ -929,9 +921,15 @@ def create_ui(config, theme_name="Ocean"):
929
921
# Run Deep Research
930
922
research_button .click (
931
923
fn = run_deep_search ,
932
- inputs = [research_task_input , max_search_iteration_input , max_query_per_iter_input , llm_provider , llm_model_name , llm_temperature , llm_base_url , llm_api_key , use_vision , headless ],
933
- outputs = [markdown_output_display , markdown_download ]
934
- )
924
+ inputs = [research_task_input , max_search_iteration_input , max_query_per_iter_input , llm_provider , llm_model_name , llm_temperature , llm_base_url , llm_api_key , use_vision , use_own_browser , headless ],
925
+ outputs = [markdown_output_display , markdown_download , stop_research_button , research_button ]
926
+ )
927
+ # Bind the stop button click event after errors_output is defined
928
+ stop_research_button .click (
929
+ fn = stop_research_agent ,
930
+ inputs = [],
931
+ outputs = [stop_research_button , research_button ],
932
+ )
935
933
936
934
with gr .TabItem ("🎥 Recordings" , id = 7 ):
937
935
def list_recordings (save_recording_path ):
@@ -966,6 +964,48 @@ def list_recordings(save_recording_path):
966
964
inputs = save_recording_path ,
967
965
outputs = recordings_gallery
968
966
)
967
+
968
+ with gr .TabItem ("📁 Configuration" , id = 8 ):
969
+ with gr .Group ():
970
+ config_file_input = gr .File (
971
+ label = "Load Config File" ,
972
+ file_types = [".pkl" ],
973
+ interactive = True
974
+ )
975
+
976
+ load_config_button = gr .Button ("Load Existing Config From File" , variant = "primary" )
977
+ save_config_button = gr .Button ("Save Current Config" , variant = "primary" )
978
+
979
+ config_status = gr .Textbox (
980
+ label = "Status" ,
981
+ lines = 2 ,
982
+ interactive = False
983
+ )
984
+
985
+ load_config_button .click (
986
+ fn = update_ui_from_config ,
987
+ inputs = [config_file_input ],
988
+ outputs = [
989
+ agent_type , max_steps , max_actions_per_step , use_vision , tool_calling_method ,
990
+ llm_provider , llm_model_name , llm_temperature , llm_base_url , llm_api_key ,
991
+ use_own_browser , keep_browser_open , headless , disable_security , enable_recording ,
992
+ window_w , window_h , save_recording_path , save_trace_path , save_agent_history_path ,
993
+ task , config_status
994
+ ]
995
+ )
996
+
997
+ save_config_button .click (
998
+ fn = save_current_config ,
999
+ inputs = [
1000
+ agent_type , max_steps , max_actions_per_step , use_vision , tool_calling_method ,
1001
+ llm_provider , llm_model_name , llm_temperature , llm_base_url , llm_api_key ,
1002
+ use_own_browser , keep_browser_open , headless , disable_security ,
1003
+ enable_recording , window_w , window_h , save_recording_path , save_trace_path ,
1004
+ save_agent_history_path , task ,
1005
+ ],
1006
+ outputs = [config_status ]
1007
+ )
1008
+
969
1009
970
1010
# Attach the callback to the LLM provider dropdown
971
1011
llm_provider .change (
0 commit comments