diff --git a/test/deprecated/ir/inference/inference_pass_test.py b/test/deprecated/ir/inference/inference_pass_test.py index acf9b68aefa45..2ee848a9a4dcf 100644 --- a/test/deprecated/ir/inference/inference_pass_test.py +++ b/test/deprecated/ir/inference/inference_pass_test.py @@ -37,7 +37,7 @@ def __init__(self, methodName='runTest'): self.feeds = None self.fetch_list = None - self.enable_mkldnn = False + self.enable_onednn = False self.enable_onednn_bfloat16 = False self.enable_trt = False self.enable_tensorrt_varseqlen = False @@ -284,10 +284,10 @@ def check_output_with_option( ) # Check whether the onednn results and the CPU results are the same. - if (not use_gpu) and self.enable_mkldnn: + if (not use_gpu) and self.enable_onednn: onednn_outputs = self._get_inference_outs( self._get_analysis_config( - use_gpu=use_gpu, use_onednn=self.enable_mkldnn + use_gpu=use_gpu, use_onednn=self.enable_onednn ) ) diff --git a/test/deprecated/ir/inference/quant_dequant_test.py b/test/deprecated/ir/inference/quant_dequant_test.py index cb3ddc06b76f1..416384fca581c 100644 --- a/test/deprecated/ir/inference/quant_dequant_test.py +++ b/test/deprecated/ir/inference/quant_dequant_test.py @@ -46,7 +46,7 @@ def __init__(self, methodName='runTest'): self.test_startup_program = paddle.static.Program() self.feeds = None self.fetch_list = None - self.enable_mkldnn = False + self.enable_onednn = False self.enable_onednn_bfloat16 = False self.enable_trt = False self.enable_tensorrt_varseqlen = True @@ -387,10 +387,10 @@ def check_output_with_option( ) # Check whether the onednn results and the CPU results are the same. - if (not use_gpu) and self.enable_mkldnn: + if (not use_gpu) and self.enable_onednn: onednn_outputs = self._get_inference_outs( self._get_analysis_config( - use_gpu=use_gpu, use_onednn=self.enable_mkldnn + use_gpu=use_gpu, use_onednn=self.enable_onednn ) ) diff --git a/test/deprecated/ir/inference/test_trt_inference_predictor_deprecated.py b/test/deprecated/ir/inference/test_trt_inference_predictor_deprecated.py index 5f2f954479678..9a5a0ec8fb7e2 100644 --- a/test/deprecated/ir/inference/test_trt_inference_predictor_deprecated.py +++ b/test/deprecated/ir/inference/test_trt_inference_predictor_deprecated.py @@ -96,14 +96,14 @@ def load(self, config_arg, inputs=None, outputs=None): elif self.args.precision == 'int8': precision_mode = paddle_infer.PrecisionType.Int8 - if self.args.enable_mkldnn and not self.args.enable_gpu: + if self.args.enable_onednn and not self.args.enable_gpu: config.disable_gpu() config.enable_onednn() if self.args.precision == 'int8': config.enable_onednn_int8( {"conv2d", "depthwise_conv2d", "transpose2", "pool2d"} ) - if not self.args.enable_mkldnn and not self.args.enable_gpu: + if not self.args.enable_onednn and not self.args.enable_gpu: config.disable_gpu() # config.enable_onednn() if self.args.enable_profile: @@ -251,7 +251,7 @@ def parse_args(): parser.add_argument( '--paddle_params_file', type=str, default="model.pdiparams" ) - parser.add_argument('--enable_mkldnn', type=str2bool, default=False) + parser.add_argument('--enable_onednn', type=str2bool, default=False) parser.add_argument('--enable_gpu', type=str2bool, default=True) parser.add_argument('--enable_trt', type=str2bool, default=True) parser.add_argument('--enable_dynamic_shape', type=str2bool, default=True) diff --git a/test/ir/inference/test_onednn_batch_norm_act_fuse_pass.py b/test/ir/inference/test_onednn_batch_norm_act_fuse_pass.py index 5c7c091ca4f44..a807bee4a9992 100644 --- a/test/ir/inference/test_onednn_batch_norm_act_fuse_pass.py +++ b/test/ir/inference/test_onednn_batch_norm_act_fuse_pass.py @@ -33,9 +33,9 @@ def sample_program_config(self, draw): momentum = draw(st.floats(min_value=0.0, max_value=5)) trainable_statistics = False use_global_stats = draw(st.booleans()) - use_mkldnn1 = draw(st.sampled_from([True])) + use_onednn1 = draw(st.sampled_from([True])) use_cudnn = draw(st.booleans()) - use_mkldnn2 = draw(st.sampled_from([True])) + use_onednn2 = draw(st.sampled_from([True])) batch_size = draw(st.integers(min_value=1, max_value=4)) channel = draw(st.integers(min_value=1, max_value=64)) input_dim1 = draw(st.integers(min_value=1, max_value=512)) @@ -78,7 +78,7 @@ def generate_weight(): 'momentum': momentum, 'trainable_statistics': trainable_statistics, 'use_global_stats': use_global_stats, - 'use_mkldnn': use_mkldnn1, + 'use_mkldnn': use_onednn1, }, ) @@ -86,7 +86,7 @@ def generate_weight(): type='relu', inputs={'X': ['norm_output']}, outputs={'Out': ['relu_output']}, - attrs={'use_cudnn': use_cudnn, 'use_mkldnn': use_mkldnn2}, + attrs={'use_cudnn': use_cudnn, 'use_mkldnn': use_onednn2}, ) model_net = [batch_norm_op, relu_op]