From f049578765deb13340ff3fc3a1ab85e9a6bbe07d Mon Sep 17 00:00:00 2001 From: co63oc Date: Sat, 9 Aug 2025 13:53:09 +0800 Subject: [PATCH 1/2] Fix --- test/deprecated/ir/inference/CMakeLists.txt | 2 +- test/deprecated/ir/inference/auto_scan_test.py | 8 ++++---- test/deprecated/ir/inference/inference_pass_test.py | 10 +++++----- test/deprecated/ir/inference/quant_dequant_test.py | 10 +++++----- .../test_trt_inference_predictor_deprecated.py | 6 +++--- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/test/deprecated/ir/inference/CMakeLists.txt b/test/deprecated/ir/inference/CMakeLists.txt index 86f03ba89d9850..7fcff5451e2d2c 100755 --- a/test/deprecated/ir/inference/CMakeLists.txt +++ b/test/deprecated/ir/inference/CMakeLists.txt @@ -56,7 +56,7 @@ if(WIN32) endif() -if(NOT WITH_MKLDNN +if(NOT WITH_ONEDNN AND NOT TENSORRT_FOUND AND NOT WITH_GPU) foreach(target ${TEST_INFERENCE_CPU_UT}) diff --git a/test/deprecated/ir/inference/auto_scan_test.py b/test/deprecated/ir/inference/auto_scan_test.py index 752b5f32d011ba..16a8dbf24c8f30 100755 --- a/test/deprecated/ir/inference/auto_scan_test.py +++ b/test/deprecated/ir/inference/auto_scan_test.py @@ -226,7 +226,7 @@ def create_inference_config( self, passes: list[str] | None = None, use_gpu: bool = False, - use_mkldnn: bool = False, + use_onednn: bool = False, use_xpu: bool = False, ir_optim: bool | None = None, ): @@ -238,7 +238,7 @@ def create_inference_config( config.switch_ir_optim(ir_optim) if use_gpu: config.enable_use_gpu(100, 0) - if not use_mkldnn: + if not use_onednn: config.disable_onednn() if use_xpu: config.enable_xpu() @@ -337,7 +337,7 @@ def run_test(self, quant=False, *args, **kwargs): def inference_config_str(self, config) -> str: dic = {} enable_onednn = config.onednn_enabled() - dic["use_mkldnn"] = enable_onednn + dic["use_onednn"] = enable_onednn enable_gpu = config.use_gpu() dic["use_gpu"] = enable_gpu return str(dic) @@ -573,7 +573,7 @@ def run_test(self, quant=False, prog_configs=None): def inference_config_str(self, config) -> str: dic = {} enable_onednn = config.onednn_enabled() - dic["use_mkldnn"] = enable_onednn + dic["use_onednn"] = enable_onednn enable_gpu = config.use_gpu() dic['use_gpu'] = enable_gpu enable_xpu = config.use_xpu() diff --git a/test/deprecated/ir/inference/inference_pass_test.py b/test/deprecated/ir/inference/inference_pass_test.py index 739716382f50bd..582fd57b1de8cd 100644 --- a/test/deprecated/ir/inference/inference_pass_test.py +++ b/test/deprecated/ir/inference/inference_pass_test.py @@ -37,7 +37,7 @@ def __init__(self, methodName='runTest'): self.feeds = None self.fetch_list = None - self.enable_mkldnn = False + self.enable_onednn = False self.enable_onednn_bfloat16 = False self.enable_trt = False self.enable_tensorrt_varseqlen = False @@ -129,7 +129,7 @@ def _get_inference_outs(self, config): return outs def _get_analysis_config( - self, use_gpu=False, use_trt=False, use_mkldnn=False + self, use_gpu=False, use_trt=False, use_onednn=False ): ''' Return a new object of AnalysisConfig. @@ -177,7 +177,7 @@ def _get_analysis_config( if self.enable_tensorrt_varseqlen: config.enable_tensorrt_varseqlen() - elif use_mkldnn: + elif use_onednn: config.enable_onednn() if self.enable_onednn_bfloat16: config.enable_onednn_bfloat16() @@ -284,10 +284,10 @@ def check_output_with_option( ) # Check whether the onednn results and the CPU results are the same. - if (not use_gpu) and self.enable_mkldnn: + if (not use_gpu) and self.enable_onednn: onednn_outputs = self._get_inference_outs( self._get_analysis_config( - use_gpu=use_gpu, use_mkldnn=self.enable_mkldnn + use_gpu=use_gpu, use_onednn=self.enable_onednn ) ) diff --git a/test/deprecated/ir/inference/quant_dequant_test.py b/test/deprecated/ir/inference/quant_dequant_test.py index 69f2ddfaaa4fda..b13f1020e9ec26 100644 --- a/test/deprecated/ir/inference/quant_dequant_test.py +++ b/test/deprecated/ir/inference/quant_dequant_test.py @@ -46,7 +46,7 @@ def __init__(self, methodName='runTest'): self.test_startup_program = paddle.static.Program() self.feeds = None self.fetch_list = None - self.enable_mkldnn = False + self.enable_onednn = False self.enable_onednn_bfloat16 = False self.enable_trt = False self.enable_tensorrt_varseqlen = True @@ -190,7 +190,7 @@ def _get_inference_outs(self, config): return outs def _get_analysis_config( - self, use_gpu=False, use_trt=False, use_mkldnn=False + self, use_gpu=False, use_trt=False, use_onednn=False ): ''' Return a new object of AnalysisConfig. @@ -230,7 +230,7 @@ def _get_analysis_config( if self.enable_tensorrt_varseqlen: config.enable_tensorrt_varseqlen() - elif use_mkldnn: + elif use_onednn: config.enable_onednn() if self.enable_onednn_bfloat16: config.enable_onednn_bfloat16() @@ -387,10 +387,10 @@ def check_output_with_option( ) # Check whether the onednn results and the CPU results are the same. - if (not use_gpu) and self.enable_mkldnn: + if (not use_gpu) and self.enable_onednn: onednn_outputs = self._get_inference_outs( self._get_analysis_config( - use_gpu=use_gpu, use_mkldnn=self.enable_mkldnn + use_gpu=use_gpu, use_onednn=self.enable_onednn ) ) diff --git a/test/deprecated/ir/inference/test_trt_inference_predictor_deprecated.py b/test/deprecated/ir/inference/test_trt_inference_predictor_deprecated.py index 5f2f954479678a..9a5a0ec8fb7e26 100644 --- a/test/deprecated/ir/inference/test_trt_inference_predictor_deprecated.py +++ b/test/deprecated/ir/inference/test_trt_inference_predictor_deprecated.py @@ -96,14 +96,14 @@ def load(self, config_arg, inputs=None, outputs=None): elif self.args.precision == 'int8': precision_mode = paddle_infer.PrecisionType.Int8 - if self.args.enable_mkldnn and not self.args.enable_gpu: + if self.args.enable_onednn and not self.args.enable_gpu: config.disable_gpu() config.enable_onednn() if self.args.precision == 'int8': config.enable_onednn_int8( {"conv2d", "depthwise_conv2d", "transpose2", "pool2d"} ) - if not self.args.enable_mkldnn and not self.args.enable_gpu: + if not self.args.enable_onednn and not self.args.enable_gpu: config.disable_gpu() # config.enable_onednn() if self.args.enable_profile: @@ -251,7 +251,7 @@ def parse_args(): parser.add_argument( '--paddle_params_file', type=str, default="model.pdiparams" ) - parser.add_argument('--enable_mkldnn', type=str2bool, default=False) + parser.add_argument('--enable_onednn', type=str2bool, default=False) parser.add_argument('--enable_gpu', type=str2bool, default=True) parser.add_argument('--enable_trt', type=str2bool, default=True) parser.add_argument('--enable_dynamic_shape', type=str2bool, default=True) From 0be0d8c6c5242e2f836709ba58205391b40aace1 Mon Sep 17 00:00:00 2001 From: co63oc Date: Mon, 11 Aug 2025 13:04:03 +0800 Subject: [PATCH 2/2] Fix --- test/ir/inference/test_onednn_batch_norm_act_fuse_pass.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/ir/inference/test_onednn_batch_norm_act_fuse_pass.py b/test/ir/inference/test_onednn_batch_norm_act_fuse_pass.py index 5c7c091ca4f445..a807bee4a9992e 100644 --- a/test/ir/inference/test_onednn_batch_norm_act_fuse_pass.py +++ b/test/ir/inference/test_onednn_batch_norm_act_fuse_pass.py @@ -33,9 +33,9 @@ def sample_program_config(self, draw): momentum = draw(st.floats(min_value=0.0, max_value=5)) trainable_statistics = False use_global_stats = draw(st.booleans()) - use_mkldnn1 = draw(st.sampled_from([True])) + use_onednn1 = draw(st.sampled_from([True])) use_cudnn = draw(st.booleans()) - use_mkldnn2 = draw(st.sampled_from([True])) + use_onednn2 = draw(st.sampled_from([True])) batch_size = draw(st.integers(min_value=1, max_value=4)) channel = draw(st.integers(min_value=1, max_value=64)) input_dim1 = draw(st.integers(min_value=1, max_value=512)) @@ -78,7 +78,7 @@ def generate_weight(): 'momentum': momentum, 'trainable_statistics': trainable_statistics, 'use_global_stats': use_global_stats, - 'use_mkldnn': use_mkldnn1, + 'use_mkldnn': use_onednn1, }, ) @@ -86,7 +86,7 @@ def generate_weight(): type='relu', inputs={'X': ['norm_output']}, outputs={'Out': ['relu_output']}, - attrs={'use_cudnn': use_cudnn, 'use_mkldnn': use_mkldnn2}, + attrs={'use_cudnn': use_cudnn, 'use_mkldnn': use_onednn2}, ) model_net = [batch_norm_op, relu_op]