@@ -143,7 +143,7 @@ def get_model_name_with_subgraph_tag(model_path):
143143 return f"{ fields [- 2 ]} _{ fields [- 1 ]} " if re .match (pattern , fields [- 1 ]) else fields [- 1 ]
144144
145145
146- def run_decomposer (
146+ def run_naive_decomposer (
147147 framework : str ,
148148 model_path : str ,
149149 output_dir : str ,
@@ -170,8 +170,8 @@ def run_decomposer(
170170 json .dumps (decorator_config ).encode ()
171171 ).decode ()
172172
173- print (f"[Decomposing ] model_path: { model_path } " )
174- print (f"[Decomposing ] split_positions: { split_positions } " )
173+ print (f"[Decomposition ] model_path: { model_path } " )
174+ print (f"[Decomposition ] split_positions: { split_positions } " )
175175
176176 cmd = [
177177 sys .executable ,
@@ -215,8 +215,8 @@ def run_evaluation(
215215 for item in (f"--{ key } " , str (value ))
216216 ]
217217
218- print (f"[Batch Testing ] Logging to: { log_path } " )
219- print (f"[Command] { ' ' .join (cmd )} " )
218+ print (f"[Evaluation ] Logging to: { log_path } " )
219+ print (f"[Evaluation] command: { ' ' .join (cmd )} " )
220220
221221 os .makedirs (os .path .dirname (log_path ), exist_ok = True )
222222 with open (log_path , "w" ) as f :
@@ -286,7 +286,7 @@ def generate_refined_tasks(base_output_dir, current_pass_id):
286286 prev_max_subgraph_size = prev_config .get ("max_subgraph_size" )
287287 max_subgraph_size = prev_max_subgraph_size // 2
288288
289- if not prev_incorrect_subgraphs or prev_max_subgraph_size <= 1 :
289+ if not prev_incorrect_subgraphs :
290290 return {}, {}, max_subgraph_size
291291
292292 print ("[Analysis] Refining splits based on previous incorrect models ..." )
@@ -338,7 +338,7 @@ def execute_decomposition_phase(max_subgraph_size, tasks_map, framework, pass_wo
338338 print (f"decomposed_samples_dir: { decomposed_samples_dir } " )
339339
340340 for model_name , task_info in tasks_map .items ():
341- print (f"[Decomposing ] max_subgraph_size: { max_subgraph_size } " )
341+ print (f"[Decomposition ] max_subgraph_size: { max_subgraph_size } " )
342342 original_path = task_info ["original_path" ]
343343 split_positions = calculate_split_positions_for_subgraph (
344344 task_info ["subgraph_size" ], max_subgraph_size
@@ -350,15 +350,15 @@ def execute_decomposition_phase(max_subgraph_size, tasks_map, framework, pass_wo
350350 rectified_model_path
351351 ), f"{ rectified_model_path } does not exist."
352352
353- success = run_decomposer (
353+ success = run_naive_decomposer (
354354 framework , rectified_model_path , decomposed_samples_dir , split_positions
355355 )
356356 if not success :
357357 failed_decomposition .append (rectified_model_path )
358358
359359 num_decomposed_samples = count_samples (decomposed_samples_dir )
360360 print (
361- f"[Decomposing ] number of graphs: { len (tasks_map )} -> { num_decomposed_samples } " ,
361+ f"[Decomposition ] number of graphs: { len (tasks_map )} -> { num_decomposed_samples } " ,
362362 flush = True ,
363363 )
364364 if (
@@ -455,15 +455,15 @@ def main(args):
455455 # --- Step 4: Testing ---
456456 pass_log_path = os .path .join (pass_work_dir , "batch_test_result.log" )
457457 if task_controller .task_scheduler ["run_evaluation" ]:
458- print ("\n --- Phase 2: Batch Testing ---" )
458+ print ("\n --- Phase 2: Evaluation ---" )
459459 run_evaluation (args .framework , args .test_config , pass_work_dir , pass_log_path )
460460
461461 # --- Step 5: Analysis ---
462462 next_round_models = set ()
463463 if task_controller .task_scheduler ["post_analysis" ]:
464464 print ("\n --- Phase 3: Analysis ---" )
465465 next_round_models = get_incorrect_models (args .tolerance , pass_log_path )
466- print (f"[Result ] Found { len (next_round_models )} incorrect subgraphs." )
466+ print (f"[Analysis ] Found { len (next_round_models )} incorrect subgraphs.\n " )
467467
468468 # --- Step 6: Save State ---
469469 save_decompose_config (
0 commit comments