Skip to content

Commit 119216e

Browse files
authored
[AArch64][GlobalISel] Use TargetConstant for shift immediates (#161527)
This changes the intrinsic definitions for shifts to use IntArg, which in turn changes how the shifts are represented in SDAG to use TargetConstant (and fixes up a number of ISel lowering places too). The vecshift immediates are changed from ImmLeaf to TImmLeaf to keep them matching the TargetConstant. On the GISel side the constant shift amounts are then represented as immediate operands, not separate constants. The end result is that this allows a few more patterns to match in GISel.
1 parent 8ae30a3 commit 119216e

24 files changed

+408
-752
lines changed

llvm/include/llvm/IR/IntrinsicsAArch64.td

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
162162
class AdvSIMD_2Arg_Scalar_Narrow_Intrinsic
163163
: DefaultAttrsIntrinsic<[llvm_anyint_ty],
164164
[LLVMExtendedType<0>, llvm_i32_ty],
165-
[IntrNoMem]>;
165+
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
166166
class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
167167
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
168168
[LLVMTruncatedType<0>],
@@ -187,13 +187,13 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
187187
class AdvSIMD_3VectorArg_Scalar_Intrinsic
188188
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
189189
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
190-
[IntrNoMem]>;
190+
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
191191
class AdvSIMD_CvtFxToFP_Intrinsic
192192
: DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
193-
[IntrNoMem]>;
193+
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
194194
class AdvSIMD_CvtFPToFx_Intrinsic
195195
: DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty],
196-
[IntrNoMem]>;
196+
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
197197

198198
class AdvSIMD_1Arg_Intrinsic
199199
: DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrNoMem]>;
@@ -221,7 +221,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
221221

222222
// Arithmetic ops
223223

224-
let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
224+
let TargetPrefix = "aarch64" in {
225225
// Vector Add Across Lanes
226226
def int_aarch64_neon_saddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
227227
def int_aarch64_neon_uaddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16461,7 +16461,7 @@ SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
1646116461

1646216462
if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize)
1646316463
return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0),
16464-
DAG.getConstant(Cnt, DL, MVT::i32));
16464+
DAG.getTargetConstant(Cnt, DL, MVT::i32));
1646516465
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
1646616466
DAG.getConstant(Intrinsic::aarch64_neon_ushl, DL,
1646716467
MVT::i32),
@@ -16491,7 +16491,8 @@ SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
1649116491
unsigned Opc =
1649216492
(Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR;
1649316493
return DAG.getNode(Opc, DL, VT, Op.getOperand(0),
16494-
DAG.getConstant(Cnt, DL, MVT::i32), Op->getFlags());
16494+
DAG.getTargetConstant(Cnt, DL, MVT::i32),
16495+
Op->getFlags());
1649516496
}
1649616497

1649716498
// Right shift register. Note, there is not a shift right register
@@ -19973,7 +19974,7 @@ static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG,
1997319974
SDValue FixConv =
1997419975
DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ResTy,
1997519976
DAG.getConstant(IntrinsicOpcode, DL, MVT::i32),
19976-
Op->getOperand(0), DAG.getConstant(C, DL, MVT::i32));
19977+
Op->getOperand(0), DAG.getTargetConstant(C, DL, MVT::i32));
1997719978
// We can handle smaller integers by generating an extra trunc.
1997819979
if (IntBits < FloatBits)
1997919980
FixConv = DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), FixConv);
@@ -20696,7 +20697,7 @@ static SDValue performConcatVectorsCombine(SDNode *N,
2069620697
N100 = DAG.getNode(AArch64ISD::NVCAST, DL, VT, N100);
2069720698
SDValue Uzp = DAG.getNode(AArch64ISD::UZP2, DL, VT, N000, N100);
2069820699
SDValue NewShiftConstant =
20699-
DAG.getConstant(N001ConstVal - NScalarSize, DL, MVT::i32);
20700+
DAG.getTargetConstant(N001ConstVal - NScalarSize, DL, MVT::i32);
2070020701

2070120702
return DAG.getNode(AArch64ISD::VLSHR, DL, VT, Uzp, NewShiftConstant);
2070220703
}
@@ -22373,14 +22374,14 @@ static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) {
2237322374

2237422375
if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits) {
2237522376
Op = DAG.getNode(Opcode, DL, VT, Op,
22376-
DAG.getSignedConstant(-ShiftAmount, DL, MVT::i32));
22377+
DAG.getSignedConstant(-ShiftAmount, DL, MVT::i32, true));
2237722378
if (N->getValueType(0) == MVT::i64)
2237822379
Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Op,
2237922380
DAG.getConstant(0, DL, MVT::i64));
2238022381
return Op;
2238122382
} else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) {
2238222383
Op = DAG.getNode(Opcode, DL, VT, Op,
22383-
DAG.getConstant(ShiftAmount, DL, MVT::i32));
22384+
DAG.getTargetConstant(ShiftAmount, DL, MVT::i32));
2238422385
if (N->getValueType(0) == MVT::i64)
2238522386
Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Op,
2238622387
DAG.getConstant(0, DL, MVT::i64));
@@ -23198,7 +23199,7 @@ static SDValue performZExtUZPCombine(SDNode *N, SelectionDAG &DAG) {
2319823199
Op.getOperand(ExtOffset == 0 ? 0 : 1));
2319923200
if (Shift != 0)
2320023201
BC = DAG.getNode(AArch64ISD::VLSHR, DL, VT, BC,
23201-
DAG.getConstant(Shift, DL, MVT::i32));
23202+
DAG.getTargetConstant(Shift, DL, MVT::i32));
2320223203
return DAG.getNode(ISD::AND, DL, VT, BC, DAG.getConstant(Mask, DL, VT));
2320323204
}
2320423205

llvm/lib/Target/AArch64/AArch64InstrFormats.td

Lines changed: 22 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -812,87 +812,56 @@ def fixedpoint_recip_f16_i64 : fixedpoint_recip_i64<f16>;
812812
def fixedpoint_recip_f32_i64 : fixedpoint_recip_i64<f32>;
813813
def fixedpoint_recip_f64_i64 : fixedpoint_recip_i64<f64>;
814814

815-
def vecshiftR8 : Operand<i32>, ImmLeaf<i32, [{
815+
def vecshiftR8 : Operand<i32>, TImmLeaf<i32, [{
816816
return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 9);
817817
}]> {
818818
let EncoderMethod = "getVecShiftR8OpValue";
819819
let DecoderMethod = "DecodeVecShiftR8Imm";
820820
let ParserMatchClass = Imm1_8Operand;
821821
}
822-
def vecshiftR16 : Operand<i32>, ImmLeaf<i32, [{
822+
def vecshiftR16 : Operand<i32>, TImmLeaf<i32, [{
823823
return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 17);
824824
}]> {
825825
let EncoderMethod = "getVecShiftR16OpValue";
826826
let DecoderMethod = "DecodeVecShiftR16Imm";
827827
let ParserMatchClass = Imm1_16Operand;
828828
}
829-
def vecshiftR16Narrow : Operand<i32>, ImmLeaf<i32, [{
829+
def vecshiftR16Narrow : Operand<i32>, TImmLeaf<i32, [{
830830
return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 9);
831831
}]> {
832832
let EncoderMethod = "getVecShiftR16OpValue";
833833
let DecoderMethod = "DecodeVecShiftR16ImmNarrow";
834834
let ParserMatchClass = Imm1_8Operand;
835835
}
836-
def vecshiftR32 : Operand<i32>, ImmLeaf<i32, [{
836+
def vecshiftR32 : Operand<i32>, TImmLeaf<i32, [{
837837
return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 33);
838838
}]> {
839839
let EncoderMethod = "getVecShiftR32OpValue";
840840
let DecoderMethod = "DecodeVecShiftR32Imm";
841841
let ParserMatchClass = Imm1_32Operand;
842842
}
843-
def vecshiftR32Narrow : Operand<i32>, ImmLeaf<i32, [{
843+
def vecshiftR32Narrow : Operand<i32>, TImmLeaf<i32, [{
844844
return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 17);
845845
}]> {
846846
let EncoderMethod = "getVecShiftR32OpValue";
847847
let DecoderMethod = "DecodeVecShiftR32ImmNarrow";
848848
let ParserMatchClass = Imm1_16Operand;
849849
}
850-
def vecshiftR64 : Operand<i32>, ImmLeaf<i32, [{
850+
def vecshiftR64 : Operand<i32>, TImmLeaf<i32, [{
851851
return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 65);
852852
}]> {
853853
let EncoderMethod = "getVecShiftR64OpValue";
854854
let DecoderMethod = "DecodeVecShiftR64Imm";
855855
let ParserMatchClass = Imm1_64Operand;
856856
}
857-
def vecshiftR64Narrow : Operand<i32>, ImmLeaf<i32, [{
857+
def vecshiftR64Narrow : Operand<i32>, TImmLeaf<i32, [{
858858
return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 33);
859859
}]> {
860860
let EncoderMethod = "getVecShiftR64OpValue";
861861
let DecoderMethod = "DecodeVecShiftR64ImmNarrow";
862862
let ParserMatchClass = Imm1_32Operand;
863863
}
864864

865-
// Same as vecshiftR#N, but use TargetConstant (TimmLeaf) instead of Constant
866-
// (ImmLeaf)
867-
def tvecshiftR8 : Operand<i32>, TImmLeaf<i32, [{
868-
return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 9);
869-
}]> {
870-
let EncoderMethod = "getVecShiftR8OpValue";
871-
let DecoderMethod = "DecodeVecShiftR8Imm";
872-
let ParserMatchClass = Imm1_8Operand;
873-
}
874-
def tvecshiftR16 : Operand<i32>, TImmLeaf<i32, [{
875-
return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 17);
876-
}]> {
877-
let EncoderMethod = "getVecShiftR16OpValue";
878-
let DecoderMethod = "DecodeVecShiftR16Imm";
879-
let ParserMatchClass = Imm1_16Operand;
880-
}
881-
def tvecshiftR32 : Operand<i32>, TImmLeaf<i32, [{
882-
return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 33);
883-
}]> {
884-
let EncoderMethod = "getVecShiftR32OpValue";
885-
let DecoderMethod = "DecodeVecShiftR32Imm";
886-
let ParserMatchClass = Imm1_32Operand;
887-
}
888-
def tvecshiftR64 : Operand<i32>, TImmLeaf<i32, [{
889-
return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 65);
890-
}]> {
891-
let EncoderMethod = "getVecShiftR64OpValue";
892-
let DecoderMethod = "DecodeVecShiftR64Imm";
893-
let ParserMatchClass = Imm1_64Operand;
894-
}
895-
896865
def Imm0_0Operand : AsmImmRange<0, 0>;
897866
def Imm0_1Operand : AsmImmRange<0, 1>;
898867
def Imm1_1Operand : AsmImmRange<1, 1>;
@@ -904,65 +873,35 @@ def Imm0_15Operand : AsmImmRange<0, 15>;
904873
def Imm0_31Operand : AsmImmRange<0, 31>;
905874
def Imm0_63Operand : AsmImmRange<0, 63>;
906875

907-
def vecshiftL8 : Operand<i32>, ImmLeaf<i32, [{
876+
def vecshiftL8 : Operand<i32>, TImmLeaf<i32, [{
908877
return (((uint32_t)Imm) < 8);
909878
}]> {
910879
let EncoderMethod = "getVecShiftL8OpValue";
911880
let DecoderMethod = "DecodeVecShiftL8Imm";
912881
let ParserMatchClass = Imm0_7Operand;
913882
}
914-
def vecshiftL16 : Operand<i32>, ImmLeaf<i32, [{
883+
def vecshiftL16 : Operand<i32>, TImmLeaf<i32, [{
915884
return (((uint32_t)Imm) < 16);
916885
}]> {
917886
let EncoderMethod = "getVecShiftL16OpValue";
918887
let DecoderMethod = "DecodeVecShiftL16Imm";
919888
let ParserMatchClass = Imm0_15Operand;
920889
}
921-
def vecshiftL32 : Operand<i32>, ImmLeaf<i32, [{
890+
def vecshiftL32 : Operand<i32>, TImmLeaf<i32, [{
922891
return (((uint32_t)Imm) < 32);
923892
}]> {
924893
let EncoderMethod = "getVecShiftL32OpValue";
925894
let DecoderMethod = "DecodeVecShiftL32Imm";
926895
let ParserMatchClass = Imm0_31Operand;
927896
}
928-
def vecshiftL64 : Operand<i32>, ImmLeaf<i32, [{
897+
def vecshiftL64 : Operand<i32>, TImmLeaf<i32, [{
929898
return (((uint32_t)Imm) < 64);
930899
}]> {
931900
let EncoderMethod = "getVecShiftL64OpValue";
932901
let DecoderMethod = "DecodeVecShiftL64Imm";
933902
let ParserMatchClass = Imm0_63Operand;
934903
}
935904

936-
// Same as vecshiftL#N, but use TargetConstant (TimmLeaf) instead of Constant
937-
// (ImmLeaf)
938-
def tvecshiftL8 : Operand<i32>, TImmLeaf<i32, [{
939-
return (((uint32_t)Imm) < 8);
940-
}]> {
941-
let EncoderMethod = "getVecShiftL8OpValue";
942-
let DecoderMethod = "DecodeVecShiftL8Imm";
943-
let ParserMatchClass = Imm0_7Operand;
944-
}
945-
def tvecshiftL16 : Operand<i32>, TImmLeaf<i32, [{
946-
return (((uint32_t)Imm) < 16);
947-
}]> {
948-
let EncoderMethod = "getVecShiftL16OpValue";
949-
let DecoderMethod = "DecodeVecShiftL16Imm";
950-
let ParserMatchClass = Imm0_15Operand;
951-
}
952-
def tvecshiftL32 : Operand<i32>, TImmLeaf<i32, [{
953-
return (((uint32_t)Imm) < 32);
954-
}]> {
955-
let EncoderMethod = "getVecShiftL32OpValue";
956-
let DecoderMethod = "DecodeVecShiftL32Imm";
957-
let ParserMatchClass = Imm0_31Operand;
958-
}
959-
def tvecshiftL64 : Operand<i32>, TImmLeaf<i32, [{
960-
return (((uint32_t)Imm) < 64);
961-
}]> {
962-
let EncoderMethod = "getVecShiftL64OpValue";
963-
let DecoderMethod = "DecodeVecShiftL64Imm";
964-
let ParserMatchClass = Imm0_63Operand;
965-
}
966905

967906
// Crazy immediate formats used by 32-bit and 64-bit logical immediate
968907
// instructions for splatting repeating bit patterns across the immediate.
@@ -10232,39 +10171,40 @@ multiclass SIMDVectorRShiftSD<bit U, bits<5> opc, string asm,
1023210171
def v4i16_shift : BaseSIMDVectorShift<0, U, opc, {0,0,1,?,?,?,?},
1023310172
V64, V64, vecshiftR16,
1023410173
asm, ".4h", ".4h",
10235-
[(set (v4i16 V64:$Rd), (OpNode (v4f16 V64:$Rn), (i32 imm:$imm)))]> {
10174+
[(set (v4i16 V64:$Rd), (OpNode (v4f16 V64:$Rn), (i32 vecshiftR16:$imm)))]> {
1023610175
bits<4> imm;
1023710176
let Inst{19-16} = imm;
1023810177
}
1023910178

1024010179
def v8i16_shift : BaseSIMDVectorShift<1, U, opc, {0,0,1,?,?,?,?},
1024110180
V128, V128, vecshiftR16,
1024210181
asm, ".8h", ".8h",
10243-
[(set (v8i16 V128:$Rd), (OpNode (v8f16 V128:$Rn), (i32 imm:$imm)))]> {
10182+
[(set (v8i16 V128:$Rd), (OpNode (v8f16 V128:$Rn), (i32 vecshiftR16:$imm)))]> {
1024410183
bits<4> imm;
1024510184
let Inst{19-16} = imm;
1024610185
}
1024710186
} // Predicates = [HasNEON, HasFullFP16]
10187+
1024810188
def v2i32_shift : BaseSIMDVectorShift<0, U, opc, {0,1,?,?,?,?,?},
1024910189
V64, V64, vecshiftR32,
1025010190
asm, ".2s", ".2s",
10251-
[(set (v2i32 V64:$Rd), (OpNode (v2f32 V64:$Rn), (i32 imm:$imm)))]> {
10191+
[(set (v2i32 V64:$Rd), (OpNode (v2f32 V64:$Rn), (i32 vecshiftR32:$imm)))]> {
1025210192
bits<5> imm;
1025310193
let Inst{20-16} = imm;
1025410194
}
1025510195

1025610196
def v4i32_shift : BaseSIMDVectorShift<1, U, opc, {0,1,?,?,?,?,?},
1025710197
V128, V128, vecshiftR32,
1025810198
asm, ".4s", ".4s",
10259-
[(set (v4i32 V128:$Rd), (OpNode (v4f32 V128:$Rn), (i32 imm:$imm)))]> {
10199+
[(set (v4i32 V128:$Rd), (OpNode (v4f32 V128:$Rn), (i32 vecshiftR32:$imm)))]> {
1026010200
bits<5> imm;
1026110201
let Inst{20-16} = imm;
1026210202
}
1026310203

1026410204
def v2i64_shift : BaseSIMDVectorShift<1, U, opc, {1,?,?,?,?,?,?},
1026510205
V128, V128, vecshiftR64,
1026610206
asm, ".2d", ".2d",
10267-
[(set (v2i64 V128:$Rd), (OpNode (v2f64 V128:$Rn), (i32 imm:$imm)))]> {
10207+
[(set (v2i64 V128:$Rd), (OpNode (v2f64 V128:$Rn), (i32 vecshiftR64:$imm)))]> {
1026810208
bits<6> imm;
1026910209
let Inst{21-16} = imm;
1027010210
}
@@ -10276,15 +10216,15 @@ multiclass SIMDVectorRShiftToFP<bit U, bits<5> opc, string asm,
1027610216
def v4i16_shift : BaseSIMDVectorShift<0, U, opc, {0,0,1,?,?,?,?},
1027710217
V64, V64, vecshiftR16,
1027810218
asm, ".4h", ".4h",
10279-
[(set (v4f16 V64:$Rd), (OpNode (v4i16 V64:$Rn), (i32 imm:$imm)))]> {
10219+
[(set (v4f16 V64:$Rd), (OpNode (v4i16 V64:$Rn), (i32 vecshiftR16:$imm)))]> {
1028010220
bits<4> imm;
1028110221
let Inst{19-16} = imm;
1028210222
}
1028310223

1028410224
def v8i16_shift : BaseSIMDVectorShift<1, U, opc, {0,0,1,?,?,?,?},
1028510225
V128, V128, vecshiftR16,
1028610226
asm, ".8h", ".8h",
10287-
[(set (v8f16 V128:$Rd), (OpNode (v8i16 V128:$Rn), (i32 imm:$imm)))]> {
10227+
[(set (v8f16 V128:$Rd), (OpNode (v8i16 V128:$Rn), (i32 vecshiftR16:$imm)))]> {
1028810228
bits<4> imm;
1028910229
let Inst{19-16} = imm;
1029010230
}
@@ -10293,23 +10233,23 @@ multiclass SIMDVectorRShiftToFP<bit U, bits<5> opc, string asm,
1029310233
def v2i32_shift : BaseSIMDVectorShift<0, U, opc, {0,1,?,?,?,?,?},
1029410234
V64, V64, vecshiftR32,
1029510235
asm, ".2s", ".2s",
10296-
[(set (v2f32 V64:$Rd), (OpNode (v2i32 V64:$Rn), (i32 imm:$imm)))]> {
10236+
[(set (v2f32 V64:$Rd), (OpNode (v2i32 V64:$Rn), (i32 vecshiftR32:$imm)))]> {
1029710237
bits<5> imm;
1029810238
let Inst{20-16} = imm;
1029910239
}
1030010240

1030110241
def v4i32_shift : BaseSIMDVectorShift<1, U, opc, {0,1,?,?,?,?,?},
1030210242
V128, V128, vecshiftR32,
1030310243
asm, ".4s", ".4s",
10304-
[(set (v4f32 V128:$Rd), (OpNode (v4i32 V128:$Rn), (i32 imm:$imm)))]> {
10244+
[(set (v4f32 V128:$Rd), (OpNode (v4i32 V128:$Rn), (i32 vecshiftR32:$imm)))]> {
1030510245
bits<5> imm;
1030610246
let Inst{20-16} = imm;
1030710247
}
1030810248

1030910249
def v2i64_shift : BaseSIMDVectorShift<1, U, opc, {1,?,?,?,?,?,?},
1031010250
V128, V128, vecshiftR64,
1031110251
asm, ".2d", ".2d",
10312-
[(set (v2f64 V128:$Rd), (OpNode (v2i64 V128:$Rn), (i32 imm:$imm)))]> {
10252+
[(set (v2f64 V128:$Rd), (OpNode (v2i64 V128:$Rn), (i32 vecshiftR64:$imm)))]> {
1031310253
bits<6> imm;
1031410254
let Inst{21-16} = imm;
1031510255
}

llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -556,8 +556,7 @@ void applyVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
556556
unsigned NewOpc =
557557
Opc == TargetOpcode::G_ASHR ? AArch64::G_VASHR : AArch64::G_VLSHR;
558558
MachineIRBuilder MIB(MI);
559-
auto ImmDef = MIB.buildConstant(LLT::scalar(32), Imm);
560-
MIB.buildInstr(NewOpc, {MI.getOperand(0)}, {MI.getOperand(1), ImmDef});
559+
MIB.buildInstr(NewOpc, {MI.getOperand(0)}, {MI.getOperand(1)}).addImm(Imm);
561560
MI.eraseFromParent();
562561
}
563562

0 commit comments

Comments
 (0)