Skip to content

Commit 5affe1a

Browse files
co63ocmaxiaolong001
authored andcommitted
test/ directory modify use_mkldnn [fluid_ops] - part (PaddlePaddle#74487)
* Fix * Fix * Fix
1 parent 52352c9 commit 5affe1a

File tree

115 files changed

+305
-285
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

115 files changed

+305
-285
lines changed

paddle/fluid/framework/ir/onednn/activation_onednn_fuse_pass.h

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,12 +59,21 @@ inline std::unordered_map<std::string, std::string> GetAttributeMap(
5959
inline void SetActivationAttrs(paddle::framework::OpDesc* fused_op,
6060
paddle::framework::OpDesc* act_op,
6161
const std::string& act_type) {
62-
if (fused_op->HasAttr("use_mkldnn")) {
62+
bool use_mkldnn = false;
63+
if (fused_op->HasAttr("use_mkldnn") && !fused_op->HasAttr("use_onednn")) {
6364
PADDLE_ENFORCE(PADDLE_GET_CONST(bool, fused_op->GetAttr("use_mkldnn")),
6465
common::errors::PreconditionNotMet(
65-
"oneDNN activation fuses require use_mkldnn=True"));
66+
"oneDNN activation fuses require use_onednn=True"));
67+
}
68+
if (fused_op->HasAttr("use_mkldnn")) {
69+
use_mkldnn = PADDLE_GET_CONST(bool, fused_op->GetAttr("use_mkldnn"));
70+
}
71+
if (!use_mkldnn && fused_op->HasAttr("use_onednn")) {
72+
PADDLE_ENFORCE(PADDLE_GET_CONST(bool, fused_op->GetAttr("use_onednn")),
73+
common::errors::PreconditionNotMet(
74+
"oneDNN activation fuses require use_onednn=True"));
6675
}
67-
fused_op->SetAttr("use_mkldnn", true);
76+
fused_op->SetAttr("use_onednn", true);
6877

6978
auto attr_map = GetAttributeMap(act_type);
7079
for (const auto& attr : attr_map) {

python/paddle/static/quantization/quant_int8_onednn_pass.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ def _transform_to_conv_onednn(self, graph, op_node):
177177
conv_op_node.set_attr("Scale_weights", scale_w)
178178
conv_op_node.set_attr("Scale_in", scale_in)
179179
conv_op_node.set_attr("Scale_out", 1.0)
180-
conv_op_node.set_attr("use_mkldnn", 1)
180+
conv_op_node.set_attr("use_onednn", 1)
181181
conv_op_node.set_attr("force_fp32_output", 1)
182182
graph.link_to(input_var_node, conv_op_node)
183183
graph.link_to(weight_var_node, conv_op_node)
@@ -223,7 +223,7 @@ def _transform_to_mul_onednn(self, graph, op_node):
223223
mul_op_node.set_attr("scale_y", scale_w)
224224
mul_op_node.set_attr("scale_x", scale_in)
225225
mul_op_node.set_attr("scale_out", 1.0)
226-
mul_op_node.set_attr("use_mkldnn", 1)
226+
mul_op_node.set_attr("use_onednn", 1)
227227
mul_op_node.set_attr("force_fp32_output", 1)
228228
graph.link_to(input_var_node, mul_op_node)
229229
graph.link_to(weight_var_node, mul_op_node)
@@ -248,7 +248,7 @@ def _transform_to_quantize_onednn(self, graph, op_node):
248248
op_type='quantize',
249249
attrs={
250250
'data_format': 'ONEDNNLAYOUT',
251-
'use_mkldnn': 1,
251+
'use_onednn': 1,
252252
'Scale': scale_in,
253253
'is_negative_input': 1,
254254
},

test/deprecated/ir/inference/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ if(WIN32)
5656

5757
endif()
5858

59-
if(NOT WITH_MKLDNN
59+
if(NOT WITH_ONEDNN
6060
AND NOT TENSORRT_FOUND
6161
AND NOT WITH_GPU)
6262
foreach(target ${TEST_INFERENCE_CPU_UT})

test/deprecated/ir/inference/auto_scan_test.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ def create_inference_config(
226226
self,
227227
passes: list[str] | None = None,
228228
use_gpu: bool = False,
229-
use_mkldnn: bool = False,
229+
use_onednn: bool = False,
230230
use_xpu: bool = False,
231231
ir_optim: bool | None = None,
232232
):
@@ -238,7 +238,7 @@ def create_inference_config(
238238
config.switch_ir_optim(ir_optim)
239239
if use_gpu:
240240
config.enable_use_gpu(100, 0)
241-
if not use_mkldnn:
241+
if not use_onednn:
242242
config.disable_onednn()
243243
if use_xpu:
244244
config.enable_xpu()
@@ -337,7 +337,7 @@ def run_test(self, quant=False, *args, **kwargs):
337337
def inference_config_str(self, config) -> str:
338338
dic = {}
339339
enable_onednn = config.onednn_enabled()
340-
dic["use_mkldnn"] = enable_onednn
340+
dic["use_onednn"] = enable_onednn
341341
enable_gpu = config.use_gpu()
342342
dic["use_gpu"] = enable_gpu
343343
return str(dic)
@@ -573,7 +573,7 @@ def run_test(self, quant=False, prog_configs=None):
573573
def inference_config_str(self, config) -> str:
574574
dic = {}
575575
enable_onednn = config.onednn_enabled()
576-
dic["use_mkldnn"] = enable_onednn
576+
dic["use_onednn"] = enable_onednn
577577
enable_gpu = config.use_gpu()
578578
dic['use_gpu'] = enable_gpu
579579
enable_xpu = config.use_xpu()

test/deprecated/ir/inference/inference_pass_test.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ def _get_inference_outs(self, config):
129129
return outs
130130

131131
def _get_analysis_config(
132-
self, use_gpu=False, use_trt=False, use_mkldnn=False
132+
self, use_gpu=False, use_trt=False, use_onednn=False
133133
):
134134
'''
135135
Return a new object of AnalysisConfig.
@@ -177,7 +177,7 @@ def _get_analysis_config(
177177
if self.enable_tensorrt_varseqlen:
178178
config.enable_tensorrt_varseqlen()
179179

180-
elif use_mkldnn:
180+
elif use_onednn:
181181
config.enable_onednn()
182182
if self.enable_onednn_bfloat16:
183183
config.enable_onednn_bfloat16()
@@ -186,7 +186,7 @@ def _get_analysis_config(
186186
def check_output(self, atol=1e-3):
187187
'''
188188
Check whether calculating on CPU and GPU, enable TensorRT
189-
or disable TensorRT, enable MKLDNN or disable MKLDNN
189+
or disable TensorRT, enable ONEDNN or disable ONEDNN
190190
are all the same.
191191
'''
192192
self.assertFalse(
@@ -201,7 +201,7 @@ def check_output_with_option(
201201
):
202202
'''
203203
Check whether calculating on CPU and GPU, enable TensorRT
204-
or disable TensorRT, enable MKLDNN or disable MKLDNN
204+
or disable TensorRT, enable ONEDNN or disable ONEDNN
205205
are all the same.
206206
'''
207207
place = base.CUDAPlace(0) if use_gpu else base.CPUPlace()
@@ -287,13 +287,13 @@ def check_output_with_option(
287287
if (not use_gpu) and self.enable_mkldnn:
288288
onednn_outputs = self._get_inference_outs(
289289
self._get_analysis_config(
290-
use_gpu=use_gpu, use_mkldnn=self.enable_mkldnn
290+
use_gpu=use_gpu, use_onednn=self.enable_mkldnn
291291
)
292292
)
293293

294294
self.assertTrue(
295295
len(paddle_outs) == len(onednn_outputs),
296-
"The number of outputs is different between CPU and MKLDNN. ",
296+
"The number of outputs is different between CPU and ONEDNN. ",
297297
)
298298

299299
if self.enable_onednn_bfloat16:
@@ -304,7 +304,7 @@ def check_output_with_option(
304304
onednn_output,
305305
rtol=1e-05,
306306
atol=atol,
307-
err_msg='Output has diff between CPU and MKLDNN. ',
307+
err_msg='Output has diff between CPU and ONEDNN. ',
308308
)
309309

310310
class TensorRTParam:

test/deprecated/ir/inference/quant_dequant_test.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ def _get_inference_outs(self, config):
190190
return outs
191191

192192
def _get_analysis_config(
193-
self, use_gpu=False, use_trt=False, use_mkldnn=False
193+
self, use_gpu=False, use_trt=False, use_onednn=False
194194
):
195195
'''
196196
Return a new object of AnalysisConfig.
@@ -230,7 +230,7 @@ def _get_analysis_config(
230230
if self.enable_tensorrt_varseqlen:
231231
config.enable_tensorrt_varseqlen()
232232

233-
elif use_mkldnn:
233+
elif use_onednn:
234234
config.enable_onednn()
235235
if self.enable_onednn_bfloat16:
236236
config.enable_onednn_bfloat16()
@@ -241,7 +241,7 @@ def check_output_with_option(
241241
):
242242
'''
243243
Check whether calculating on CPU and GPU, enable TensorRT
244-
or disable TensorRT, enable MKLDNN or disable MKLDNN
244+
or disable TensorRT, enable ONEDNN or disable ONEDNN
245245
are all the same.
246246
'''
247247
place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace()
@@ -390,13 +390,13 @@ def check_output_with_option(
390390
if (not use_gpu) and self.enable_mkldnn:
391391
onednn_outputs = self._get_inference_outs(
392392
self._get_analysis_config(
393-
use_gpu=use_gpu, use_mkldnn=self.enable_mkldnn
393+
use_gpu=use_gpu, use_onednn=self.enable_mkldnn
394394
)
395395
)
396396

397397
self.assertTrue(
398398
len(paddle_outs) == len(onednn_outputs),
399-
"The number of outputs is different between CPU and MKLDNN. ",
399+
"The number of outputs is different between CPU and ONEDNN. ",
400400
)
401401

402402
if self.enable_onednn_bfloat16:
@@ -407,7 +407,7 @@ def check_output_with_option(
407407
onednn_output,
408408
rtol=1e-05,
409409
atol=atol,
410-
err_msg='Output has diff between CPU and MKLDNN. ',
410+
err_msg='Output has diff between CPU and ONEDNN. ',
411411
)
412412

413413
class TensorRTParam:

test/deprecated/legacy_test/test_batch_norm_op_deprecated.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,7 @@ def test_with_place(place, data_layout, shape):
303303
"epsilon": epsilon,
304304
"is_test": False,
305305
"data_layout": data_layout,
306-
"use_mkldnn": self.use_onednn,
306+
"use_onednn": self.use_onednn,
307307
"fuse_with_relu": self.fuse_with_relu,
308308
"use_global_stats": self.use_global_stats,
309309
}

test/deprecated/legacy_test/test_layer_norm_op_deprecated.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -142,10 +142,10 @@ def check_forward_backward(
142142
has_scale=True,
143143
has_bias=True,
144144
y_grad_scale=1.0,
145-
use_mkldnn=False,
145+
use_onednn=False,
146146
):
147147
def test_with_place(
148-
place, shape, begin_norm_axis, use_mkldnn=use_mkldnn
148+
place, shape, begin_norm_axis, use_onednn=use_onednn
149149
):
150150
# attr
151151
epsilon = 0.00001
@@ -221,7 +221,7 @@ def test_with_place(
221221
attrs={
222222
"epsilon": epsilon,
223223
"begin_norm_axis": begin_norm_axis,
224-
"use_mkldnn": use_mkldnn,
224+
"use_onednn": use_onednn,
225225
},
226226
)
227227
# generate backward op_desc

test/deprecated/legacy_test/test_program_deprecated.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ class TestProgramProto(unittest.TestCase):
153153
def test_update_op(self):
154154
program = build_program()
155155
a = program.desc.serialize_to_string()
156-
program.current_block().ops[0]._set_attr('use_mkldnn', True)
156+
program.current_block().ops[0]._set_attr('use_onednn', True)
157157
self.assertTrue(program.desc.need_update())
158158
b = program.desc.serialize_to_string()
159159
self.assertFalse(a == b)
@@ -230,7 +230,7 @@ def test_program_update(self):
230230
hash1 = program.desc.cached_hash_str()
231231
id1 = id(program)
232232
# change mul's attr
233-
program.current_block().ops[0]._set_attr('use_mkldnn', True)
233+
program.current_block().ops[0]._set_attr('use_onednn', True)
234234
program.current_block().ops[0]._set_attr('scale_x', 2.0)
235235
hash2 = program.desc.cached_hash_str()
236236
id2 = id(program)

test/deprecated/mkldnn/CMakeLists.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
file(
2-
GLOB TEST_MKLDNN_LISTS
2+
GLOB TEST_ONEDNN_LISTS
33
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}"
44
"test_*.py")
5-
string(REPLACE ".py" "" TEST_MKLDNN_LISTS "${TEST_MKLDNN_LISTS}")
5+
string(REPLACE ".py" "" TEST_ONEDNN_LISTS "${TEST_ONEDNN_LISTS}")
66
if(WIN32)
77
message(STATUS "Skip tests unrelated to onednn/mkldnn")
88
elseif(WITH_ONEDNN)
9-
foreach(target ${TEST_MKLDNN_LISTS})
9+
foreach(target ${TEST_ONEDNN_LISTS})
1010
py_test_modules(${target} MODULES ${target})
1111
set_tests_properties(${target} PROPERTIES LABELS "RUN_TYPE=INFER" TIMEOUT
1212
120)

0 commit comments

Comments
 (0)