@@ -2239,28 +2239,26 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
22392239 if (VT.getVectorElementType() == MVT::i64) {
22402240 setPartialReduceMLAAction(MLAOps, VT,
22412241 MVT::getVectorVT(MVT::i8, NumElts * 8), Custom);
2242- setPartialReduceMLAAction(
2243- MLAOps, VT, MVT::getVectorVT(MVT::i16, NumElts * 4), Custom);
2244- setPartialReduceMLAAction(
2245- MLAOps, VT, MVT::getVectorVT(MVT::i32, NumElts * 2), Custom);
2242+ setPartialReduceMLAAction(MLAOps, VT,
2243+ MVT::getVectorVT(MVT::i16, NumElts * 4), Custom);
2244+ setPartialReduceMLAAction(MLAOps, VT,
2245+ MVT::getVectorVT(MVT::i32, NumElts * 2), Custom);
22462246 } else if (VT.getVectorElementType() == MVT::i32) {
22472247 setPartialReduceMLAAction(MLAOps, VT,
22482248 MVT::getVectorVT(MVT::i8, NumElts * 4), Custom);
2249- setPartialReduceMLAAction(
2250- MLAOps, VT, MVT::getVectorVT(MVT::i16, NumElts * 2), Custom);
2249+ setPartialReduceMLAAction(MLAOps, VT,
2250+ MVT::getVectorVT(MVT::i16, NumElts * 2), Custom);
22512251 } else if (VT.getVectorElementType() == MVT::i16) {
22522252 setPartialReduceMLAAction(MLAOps, VT,
22532253 MVT::getVectorVT(MVT::i8, NumElts * 2), Custom);
22542254 }
2255- if(Subtarget->hasMatMulInt8()) {
2255+ if (Subtarget->hasMatMulInt8()) {
22562256 if (VT.getVectorElementType() == MVT::i32)
2257- setPartialReduceMLAAction(ISD::PARTIAL_REDUCE_SUMLA, VT,
2258- MVT::getVectorVT(MVT::i8, NumElts * 4),
2259- Custom);
2257+ setPartialReduceMLAAction(ISD::PARTIAL_REDUCE_SUMLA, VT,
2258+ MVT::getVectorVT(MVT::i8, NumElts * 4), Custom);
22602259 else if (VT.getVectorElementType() == MVT::i64)
2261- setPartialReduceMLAAction(ISD::PARTIAL_REDUCE_SUMLA, VT,
2262- MVT::getVectorVT(MVT::i8, NumElts * 8),
2263- Custom);
2260+ setPartialReduceMLAAction(ISD::PARTIAL_REDUCE_SUMLA, VT,
2261+ MVT::getVectorVT(MVT::i8, NumElts * 8), Custom);
22642262 }
22652263
22662264 // Lower fixed length vector operations to scalable equivalents.
0 commit comments