Skip to content

Commit 5249675

Browse files
committed
Move the log with "[Processing]" hint earlier.
1 parent 231824f commit 5249675

File tree

4 files changed

+34
-36
lines changed

4 files changed

+34
-36
lines changed

graph_net/paddle/test_compiler.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,6 @@
44
from pathlib import Path
55
import sys
66
import os
7-
from dataclasses import dataclass
8-
from contextlib import contextmanager
9-
import time
10-
import math
117
import numpy as np
128
import random
139
import platform
@@ -62,7 +58,7 @@ def get_hardward_name(args):
6258
)
6359
)
6460
)
65-
except Exception as e:
61+
except Exception:
6662
pass
6763
elif args.device == "cpu":
6864
hardware = platform.processor()
@@ -128,7 +124,7 @@ def get_static_model(args, model):
128124
backend=None,
129125
)
130126
static_model.eval()
131-
program = static_model.forward.concrete_program.main_program
127+
program = static_model.forward.concrete_program.main_program # noqa
132128
return static_model
133129

134130

@@ -308,11 +304,16 @@ def check_and_print_gpu_utilization(compiler):
308304

309305

310306
def test_single_model(args):
307+
model_path = os.path.normpath(args.model_path)
308+
test_compiler_util.print_with_log_prompt(
309+
"[Processing]", model_path, args.log_prompt
310+
)
311+
311312
compiler = get_compiler_backend(args)
312313
check_and_print_gpu_utilization(compiler)
313314

314-
input_dict = get_input_dict(args.model_path)
315-
model = get_model(args.model_path)
315+
input_dict = get_input_dict(model_path)
316+
model = get_model(model_path)
316317
model.eval()
317318

318319
test_compiler_util.print_basic_config(
@@ -341,7 +342,7 @@ def test_single_model(args):
341342
compiled_time_stats = {}
342343
try:
343344
print("Run model in compiled mode.", file=sys.stderr, flush=True)
344-
input_spec = get_input_spec(args.model_path)
345+
input_spec = get_input_spec(model_path)
345346
compiled_model = compiler(model, input_spec)
346347
compiled_out, compiled_time_stats = measure_performance(
347348
lambda: compiled_model(**input_dict), args, compiler, profile=False

graph_net/paddle/test_reference_device.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,9 @@
11
import argparse
2-
import importlib.util
32
import paddle
4-
import time
5-
import numpy as np
6-
import random
73
import os
84
from pathlib import Path
95
from contextlib import redirect_stdout, redirect_stderr
106
import json
11-
import re
127
import sys
138
import traceback
149

@@ -28,18 +23,23 @@ def get_reference_output_path(reference_dir, model_path):
2823

2924

3025
def test_single_model(args):
31-
ref_log = get_reference_log_path(args.reference_dir, args.model_path)
32-
ref_dump = get_reference_output_path(args.reference_dir, args.model_path)
26+
model_path = os.path.normpath(args.model_path)
27+
ref_log = get_reference_log_path(args.reference_dir, model_path)
28+
ref_dump = get_reference_output_path(args.reference_dir, model_path)
3329
print(f"Reference log path: {ref_log}", file=sys.stderr, flush=True)
3430
print(f"Reference outputs path: {ref_dump}", file=sys.stderr, flush=True)
3531

3632
with open(ref_log, "w", encoding="utf-8") as log_f:
3733
with redirect_stdout(log_f), redirect_stderr(log_f):
34+
test_compiler_util.print_with_log_prompt(
35+
"[Processing]", model_path, args.log_prompt
36+
)
37+
3838
compiler = test_compiler.get_compiler_backend(args)
3939
test_compiler.check_and_print_gpu_utilization(compiler)
4040

41-
input_dict = test_compiler.get_input_dict(args.model_path)
42-
model = test_compiler.get_model(args.model_path)
41+
input_dict = test_compiler.get_input_dict(model_path)
42+
model = test_compiler.get_model(model_path)
4343
model.eval()
4444

4545
test_compiler_util.print_with_log_prompt(
@@ -55,7 +55,7 @@ def test_single_model(args):
5555
success = False
5656
time_stats = {}
5757
try:
58-
input_spec = test_compiler.get_input_spec(args.model_path)
58+
input_spec = test_compiler.get_input_spec(model_path)
5959
compiled_model = compiler(model, input_spec)
6060
outputs, time_stats = test_compiler.measure_performance(
6161
lambda: compiled_model(**input_dict),

graph_net/paddle/test_target_device.py

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,6 @@
11
import argparse
2-
import importlib.util
3-
import time
4-
import numpy as np
5-
import random
62
import os
7-
from pathlib import Path
83
import json
9-
import re
104
import sys
115
import traceback
126

@@ -64,11 +58,17 @@ def update_args_and_set_seed(args, model_path):
6458

6559

6660
def test_single_model(args):
61+
model_path = os.path.normpath(args.model_path)
62+
test_compiler_util.print_with_log_prompt(
63+
"[Processing]", model_path, args.log_prompt
64+
)
65+
args = update_args_and_set_seed(args, model_path)
66+
6767
compiler = test_compiler.get_compiler_backend(args)
6868
test_compiler.check_and_print_gpu_utilization(compiler)
6969

70-
input_dict = test_compiler.get_input_dict(args.model_path)
71-
model = test_compiler.get_model(args.model_path)
70+
input_dict = test_compiler.get_input_dict(model_path)
71+
model = test_compiler.get_model(model_path)
7272
model.eval()
7373

7474
test_compiler_util.print_basic_config(
@@ -80,7 +80,7 @@ def test_single_model(args):
8080
success = False
8181
time_stats = {}
8282
try:
83-
input_spec = test_compiler.get_input_spec(args.model_path)
83+
input_spec = test_compiler.get_input_spec(model_path)
8484
compiled_model = compiler(model, input_spec)
8585
outputs, time_stats = test_compiler.measure_performance(
8686
lambda: compiled_model(**input_dict), args, compiler, profile=False
@@ -95,17 +95,17 @@ def test_single_model(args):
9595

9696
test_compiler_util.print_running_status(args, success)
9797

98-
model_name = test_compiler_util.get_model_name(args.model_path)
99-
if test_compiler_util.get_subgraph_tag(args.model_path):
100-
model_name += "_" + test_compiler_util.get_subgraph_tag(args.model_path)
98+
model_name = test_compiler_util.get_model_name(model_path)
99+
if test_compiler_util.get_subgraph_tag(model_path):
100+
model_name += "_" + test_compiler_util.get_subgraph_tag(model_path)
101101

102102
ref_dump = test_reference_device.get_reference_output_path(
103-
args.reference_dir, args.model_path
103+
args.reference_dir, model_path
104104
)
105105
ref_out = paddle.load(str(ref_dump))
106106

107107
ref_log = test_reference_device.get_reference_log_path(
108-
args.reference_dir, args.model_path
108+
args.reference_dir, model_path
109109
)
110110
ref_time_stats = parse_time_stats_from_reference_log(ref_log)
111111

@@ -170,7 +170,6 @@ def main(args):
170170
test_compiler.init_env(args)
171171

172172
if path_utils.is_single_model_dir(args.model_path):
173-
args = update_args_and_set_seed(args, args.model_path)
174173
test_single_model(args)
175174
else:
176175
test_multi_models(args)

graph_net/test_compiler_util.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -143,8 +143,6 @@ def print_with_log_prompt(key, value, log_prompt):
143143

144144
def print_basic_config(args, hardware_name, compile_framework_version):
145145
model_path = os.path.normpath(args.model_path)
146-
print_with_log_prompt("[Processing]", model_path, args.log_prompt)
147-
148146
model_name = get_model_name(model_path)
149147
print_with_log_prompt("[Config] model:", model_name, args.log_prompt)
150148

0 commit comments

Comments
 (0)