Skip to content

Commit de0a106

Browse files
committed
revert file
1 parent fa3021e commit de0a106

File tree

2 files changed

+5
-10
lines changed

2 files changed

+5
-10
lines changed

graph_net/paddle/test_compiler.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -112,15 +112,12 @@ def test_single_model(args):
112112
print("-- Run with compiled mode")
113113
build_strategy = paddle.static.BuildStrategy()
114114
# build_strategy.build_cinn_pass = True
115-
compilation_start_time = time.time()
116115
compiled_model = paddle.jit.to_static(
117116
model_dy,
118117
input_spec=input_spec,
119118
build_strategy=build_strategy,
120119
full_graph=True,
121120
)
122-
compilation_end_time = time.time()
123-
compilation_duration = compilation_end_time - compilation_start_time
124121
compiled_model.eval()
125122
for _ in range(args.warmup if args.warmup > 0 else 0):
126123
compiled_model(**input_dict)
@@ -170,7 +167,7 @@ def print_cmp(key, func, **kwargs):
170167
print_cmp("cmp.diff_count_atol2_rtol1", get_cmp_diff_count, atol=1e-2, rtol=1e-1)
171168

172169
print(
173-
f"{args.log_prompt} duration model_path:{args.model_path} eager:{eager_duration_box.value} compiled:{compiled_duration_box.value} compilation_time:{compilation_duration}",
170+
f"{args.log_prompt} duration model_path:{args.model_path} eager:{eager_duration_box.value} compiled:{compiled_duration_box.value}",
174171
file=sys.stderr,
175172
)
176173

graph_net/torch/utils.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,6 @@
1010
import inspect
1111
import math
1212

13-
kLiteralTensorSize = 64
14-
1513

1614
def apply_templates(forward_code: str) -> str:
1715
tab = " "
@@ -54,7 +52,7 @@ def process_tensor(tensor):
5452

5553
info = tensor_info(tensor)
5654
if tensor.dtype in [torch.int8, torch.int16, torch.int32, torch.int64]:
57-
if tensor.numel() < kLiteralTensorSize:
55+
if tensor.numel() < 64:
5856
return {
5957
"type": "small_int_tensor",
6058
"data": tensor.clone(),
@@ -67,7 +65,7 @@ def process_tensor(tensor):
6765
"max_val": tensor.max().item(),
6866
"info": info,
6967
}
70-
elif tensor.numel() < kLiteralTensorSize:
68+
elif tensor.numel() < 64:
7169
return {"type": "small_tensor", "data": tensor.clone(), "info": info}
7270
else:
7371
return {"type": "random_tensor", "info": info}
@@ -82,7 +80,7 @@ def process_tensor(tensor):
8280
def handle_named_tensors(tensor):
8381
info = tensor_info(tensor)
8482
if tensor.dtype in [torch.int8, torch.int16, torch.int32, torch.int64]:
85-
if tensor.numel() < kLiteralTensorSize:
83+
if tensor.numel() < 64:
8684
return {
8785
"info": info,
8886
"data": tensor.clone(),
@@ -95,7 +93,7 @@ def handle_named_tensors(tensor):
9593
"max_val": tensor.max().item(),
9694
"type": "big_int_tensor_by_range",
9795
}
98-
if tensor.numel() < kLiteralTensorSize:
96+
if tensor.numel() < 64:
9997
return {"info": info, "data": tensor.clone(), "type": "small_tensor"}
10098
else:
10199
return {"info": info, "data": None, "type": "random_tensor"}

0 commit comments

Comments
 (0)