@@ -187,6 +187,8 @@ static bool isMergePassthruOpcode(unsigned Opc) {
187
187
case AArch64ISD::CTLZ_MERGE_PASSTHRU:
188
188
case AArch64ISD::CTPOP_MERGE_PASSTHRU:
189
189
case AArch64ISD::DUP_MERGE_PASSTHRU:
190
+ case AArch64ISD::ABS_MERGE_PASSTHRU:
191
+ case AArch64ISD::NEG_MERGE_PASSTHRU:
190
192
case AArch64ISD::FNEG_MERGE_PASSTHRU:
191
193
case AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU:
192
194
case AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU:
@@ -1097,6 +1099,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
1097
1099
setOperationAction(ISD::SHL, VT, Custom);
1098
1100
setOperationAction(ISD::SRL, VT, Custom);
1099
1101
setOperationAction(ISD::SRA, VT, Custom);
1102
+ setOperationAction(ISD::ABS, VT, Custom);
1100
1103
setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
1101
1104
setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1102
1105
setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
@@ -1345,6 +1348,7 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
1345
1348
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1346
1349
1347
1350
// Lower fixed length vector operations to scalable equivalents.
1351
+ setOperationAction(ISD::ABS, VT, Custom);
1348
1352
setOperationAction(ISD::ADD, VT, Custom);
1349
1353
setOperationAction(ISD::AND, VT, Custom);
1350
1354
setOperationAction(ISD::ANY_EXTEND, VT, Custom);
@@ -1743,6 +1747,8 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
1743
1747
MAKE_CASE(AArch64ISD::FSQRT_MERGE_PASSTHRU)
1744
1748
MAKE_CASE(AArch64ISD::FRECPX_MERGE_PASSTHRU)
1745
1749
MAKE_CASE(AArch64ISD::FABS_MERGE_PASSTHRU)
1750
+ MAKE_CASE(AArch64ISD::ABS_MERGE_PASSTHRU)
1751
+ MAKE_CASE(AArch64ISD::NEG_MERGE_PASSTHRU)
1746
1752
MAKE_CASE(AArch64ISD::SETCC_MERGE_ZERO)
1747
1753
MAKE_CASE(AArch64ISD::ADC)
1748
1754
MAKE_CASE(AArch64ISD::SBC)
@@ -3661,6 +3667,12 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3661
3667
case Intrinsic::aarch64_sve_fabs:
3662
3668
return DAG.getNode(AArch64ISD::FABS_MERGE_PASSTHRU, dl, Op.getValueType(),
3663
3669
Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
3670
+ case Intrinsic::aarch64_sve_abs:
3671
+ return DAG.getNode(AArch64ISD::ABS_MERGE_PASSTHRU, dl, Op.getValueType(),
3672
+ Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
3673
+ case Intrinsic::aarch64_sve_neg:
3674
+ return DAG.getNode(AArch64ISD::NEG_MERGE_PASSTHRU, dl, Op.getValueType(),
3675
+ Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
3664
3676
case Intrinsic::aarch64_sve_convert_to_svbool: {
3665
3677
EVT OutVT = Op.getValueType();
3666
3678
EVT InVT = Op.getOperand(1).getValueType();
@@ -4163,9 +4175,12 @@ SDValue AArch64TargetLowering::LowerSTORE(SDValue Op,
4163
4175
}
4164
4176
4165
4177
// Generate SUBS and CSEL for integer abs.
4166
- static SDValue LowerABS(SDValue Op, SelectionDAG &DAG) {
4178
+ SDValue AArch64TargetLowering:: LowerABS(SDValue Op, SelectionDAG &DAG) const {
4167
4179
MVT VT = Op.getSimpleValueType();
4168
4180
4181
+ if (VT.isVector())
4182
+ return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABS_MERGE_PASSTHRU);
4183
+
4169
4184
SDLoc DL(Op);
4170
4185
SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
4171
4186
Op.getOperand(0));
0 commit comments