Skip to content

Commit 0e4d8df

Browse files
add power condition in blas.cpp
1 parent 0feb31e commit 0e4d8df

File tree

1 file changed

+18
-15
lines changed

1 file changed

+18
-15
lines changed

aten/src/ATen/native/Blas.cpp

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -297,22 +297,25 @@ _scaled_mm_out_cpu(const Tensor& mat1, const Tensor& mat2,
297297
bool use_fast_accum,
298298
Tensor& out) {
299299
#if AT_MKLDNN_ENABLED()
300-
if (at::globalContext().userEnabledMkldnn()) {
301-
bool mixed_dtype = mat1.scalar_type() != mat2.scalar_type();
302-
if ((!mixed_dtype && cpuinfo_has_x86_amx_int8()) ||
303-
(mixed_dtype && cpuinfo_has_x86_amx_fp16())) {
304-
return mkldnn_scaled_mm(
305-
mat1,
306-
mat2,
307-
scale_a,
308-
scale_b,
309-
bias,
310-
scale_result,
311-
out_dtype,
312-
use_fast_accum,
313-
out);
300+
#ifndef __powerpc__
301+
if (at::globalContext().userEnabledMkldnn()) {
302+
bool mixed_dtype = mat1.scalar_type() != mat2.scalar_type();
303+
if ((!mixed_dtype && cpuinfo_has_x86_amx_int8()) ||
304+
(mixed_dtype && cpuinfo_has_x86_amx_fp16())) {
305+
return mkldnn_scaled_mm(
306+
mat1,
307+
mat2,
308+
scale_a,
309+
scale_b,
310+
bias,
311+
scale_result,
312+
out_dtype,
313+
use_fast_accum,
314+
out);
315+
}
316+
314317
}
315-
}
318+
#endif
316319
#endif
317320
{
318321
return _scaled_mm_out_cpu_emulated(mat1, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum, out);

0 commit comments

Comments
 (0)