Skip to content

Commit a4280d4

Browse files
authored
test/deprecated/ir/inference modify use_mkldnn [fluid_ops] (#74504)
* Fix * Fix
1 parent 5fe5131 commit a4280d4

File tree

4 files changed

+13
-13
lines changed

4 files changed

+13
-13
lines changed

test/deprecated/ir/inference/inference_pass_test.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def __init__(self, methodName='runTest'):
3737
self.feeds = None
3838
self.fetch_list = None
3939

40-
self.enable_mkldnn = False
40+
self.enable_onednn = False
4141
self.enable_onednn_bfloat16 = False
4242
self.enable_trt = False
4343
self.enable_tensorrt_varseqlen = False
@@ -284,10 +284,10 @@ def check_output_with_option(
284284
)
285285

286286
# Check whether the onednn results and the CPU results are the same.
287-
if (not use_gpu) and self.enable_mkldnn:
287+
if (not use_gpu) and self.enable_onednn:
288288
onednn_outputs = self._get_inference_outs(
289289
self._get_analysis_config(
290-
use_gpu=use_gpu, use_onednn=self.enable_mkldnn
290+
use_gpu=use_gpu, use_onednn=self.enable_onednn
291291
)
292292
)
293293

test/deprecated/ir/inference/quant_dequant_test.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def __init__(self, methodName='runTest'):
4646
self.test_startup_program = paddle.static.Program()
4747
self.feeds = None
4848
self.fetch_list = None
49-
self.enable_mkldnn = False
49+
self.enable_onednn = False
5050
self.enable_onednn_bfloat16 = False
5151
self.enable_trt = False
5252
self.enable_tensorrt_varseqlen = True
@@ -387,10 +387,10 @@ def check_output_with_option(
387387
)
388388

389389
# Check whether the onednn results and the CPU results are the same.
390-
if (not use_gpu) and self.enable_mkldnn:
390+
if (not use_gpu) and self.enable_onednn:
391391
onednn_outputs = self._get_inference_outs(
392392
self._get_analysis_config(
393-
use_gpu=use_gpu, use_onednn=self.enable_mkldnn
393+
use_gpu=use_gpu, use_onednn=self.enable_onednn
394394
)
395395
)
396396

test/deprecated/ir/inference/test_trt_inference_predictor_deprecated.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -96,14 +96,14 @@ def load(self, config_arg, inputs=None, outputs=None):
9696
elif self.args.precision == 'int8':
9797
precision_mode = paddle_infer.PrecisionType.Int8
9898

99-
if self.args.enable_mkldnn and not self.args.enable_gpu:
99+
if self.args.enable_onednn and not self.args.enable_gpu:
100100
config.disable_gpu()
101101
config.enable_onednn()
102102
if self.args.precision == 'int8':
103103
config.enable_onednn_int8(
104104
{"conv2d", "depthwise_conv2d", "transpose2", "pool2d"}
105105
)
106-
if not self.args.enable_mkldnn and not self.args.enable_gpu:
106+
if not self.args.enable_onednn and not self.args.enable_gpu:
107107
config.disable_gpu()
108108
# config.enable_onednn()
109109
if self.args.enable_profile:
@@ -251,7 +251,7 @@ def parse_args():
251251
parser.add_argument(
252252
'--paddle_params_file', type=str, default="model.pdiparams"
253253
)
254-
parser.add_argument('--enable_mkldnn', type=str2bool, default=False)
254+
parser.add_argument('--enable_onednn', type=str2bool, default=False)
255255
parser.add_argument('--enable_gpu', type=str2bool, default=True)
256256
parser.add_argument('--enable_trt', type=str2bool, default=True)
257257
parser.add_argument('--enable_dynamic_shape', type=str2bool, default=True)

test/ir/inference/test_onednn_batch_norm_act_fuse_pass.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,9 @@ def sample_program_config(self, draw):
3333
momentum = draw(st.floats(min_value=0.0, max_value=5))
3434
trainable_statistics = False
3535
use_global_stats = draw(st.booleans())
36-
use_mkldnn1 = draw(st.sampled_from([True]))
36+
use_onednn1 = draw(st.sampled_from([True]))
3737
use_cudnn = draw(st.booleans())
38-
use_mkldnn2 = draw(st.sampled_from([True]))
38+
use_onednn2 = draw(st.sampled_from([True]))
3939
batch_size = draw(st.integers(min_value=1, max_value=4))
4040
channel = draw(st.integers(min_value=1, max_value=64))
4141
input_dim1 = draw(st.integers(min_value=1, max_value=512))
@@ -78,15 +78,15 @@ def generate_weight():
7878
'momentum': momentum,
7979
'trainable_statistics': trainable_statistics,
8080
'use_global_stats': use_global_stats,
81-
'use_mkldnn': use_mkldnn1,
81+
'use_mkldnn': use_onednn1,
8282
},
8383
)
8484

8585
relu_op = OpConfig(
8686
type='relu',
8787
inputs={'X': ['norm_output']},
8888
outputs={'Out': ['relu_output']},
89-
attrs={'use_cudnn': use_cudnn, 'use_mkldnn': use_mkldnn2},
89+
attrs={'use_cudnn': use_cudnn, 'use_mkldnn': use_onednn2},
9090
)
9191

9292
model_net = [batch_norm_op, relu_op]

0 commit comments

Comments
 (0)