@@ -1745,13 +1745,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
17451745 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
17461746 MachineMemOperand::MOVolatile;
17471747 return true;
1748- case Intrinsic::riscv_seg2_load:
1749- case Intrinsic::riscv_seg3_load:
1750- case Intrinsic::riscv_seg4_load:
1751- case Intrinsic::riscv_seg5_load:
1752- case Intrinsic::riscv_seg6_load:
1753- case Intrinsic::riscv_seg7_load:
1754- case Intrinsic::riscv_seg8_load:
17551748 case Intrinsic::riscv_seg2_load_mask:
17561749 case Intrinsic::riscv_seg3_load_mask:
17571750 case Intrinsic::riscv_seg4_load_mask:
@@ -1761,17 +1754,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
17611754 case Intrinsic::riscv_seg8_load_mask:
17621755 return SetRVVLoadStoreInfo(/*PtrOp*/ 0, /*IsStore*/ false,
17631756 /*IsUnitStrided*/ false, /*UsePtrVal*/ true);
1764- case Intrinsic::riscv_seg2_store:
1765- case Intrinsic::riscv_seg3_store:
1766- case Intrinsic::riscv_seg4_store:
1767- case Intrinsic::riscv_seg5_store:
1768- case Intrinsic::riscv_seg6_store:
1769- case Intrinsic::riscv_seg7_store:
1770- case Intrinsic::riscv_seg8_store:
1771- // Operands are (vec, ..., vec, ptr, vl)
1772- return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 2,
1773- /*IsStore*/ true,
1774- /*IsUnitStrided*/ false, /*UsePtrVal*/ true);
17751757 case Intrinsic::riscv_seg2_store_mask:
17761758 case Intrinsic::riscv_seg3_store_mask:
17771759 case Intrinsic::riscv_seg4_store_mask:
@@ -10591,13 +10573,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
1059110573 switch (IntNo) {
1059210574 default:
1059310575 break;
10594- case Intrinsic::riscv_seg2_load:
10595- case Intrinsic::riscv_seg3_load:
10596- case Intrinsic::riscv_seg4_load:
10597- case Intrinsic::riscv_seg5_load:
10598- case Intrinsic::riscv_seg6_load:
10599- case Intrinsic::riscv_seg7_load:
10600- case Intrinsic::riscv_seg8_load:
1060110576 case Intrinsic::riscv_seg2_load_mask:
1060210577 case Intrinsic::riscv_seg3_load_mask:
1060310578 case Intrinsic::riscv_seg4_load_mask:
@@ -10620,12 +10595,9 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
1062010595 ContainerVT.getScalarSizeInBits();
1062110596 EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
1062210597
10623- // Masked: (pointer, mask, vl)
10624- // Non-masked: (pointer, vl)
10625- bool IsMasked = Op.getNumOperands() > 4;
10598+ // Operands: (chain, int_id, pointer, mask, vl)
1062610599 SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10627- SDValue Mask =
10628- IsMasked ? Op.getOperand(3) : getAllOnesMask(ContainerVT, VL, DL, DAG);
10600+ SDValue Mask = Op.getOperand(3);
1062910601 MVT MaskVT = Mask.getSimpleValueType();
1063010602 if (MaskVT.isFixedLengthVector()) {
1063110603 MVT MaskContainerVT =
@@ -10699,13 +10671,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1069910671 switch (IntNo) {
1070010672 default:
1070110673 break;
10702- case Intrinsic::riscv_seg2_store:
10703- case Intrinsic::riscv_seg3_store:
10704- case Intrinsic::riscv_seg4_store:
10705- case Intrinsic::riscv_seg5_store:
10706- case Intrinsic::riscv_seg6_store:
10707- case Intrinsic::riscv_seg7_store:
10708- case Intrinsic::riscv_seg8_store:
1070910674 case Intrinsic::riscv_seg2_store_mask:
1071010675 case Intrinsic::riscv_seg3_store_mask:
1071110676 case Intrinsic::riscv_seg4_store_mask:
@@ -10720,24 +10685,8 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1072010685 Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
1072110686 Intrinsic::riscv_vsseg8_mask};
1072210687
10723- bool IsMasked = false;
10724- switch (IntNo) {
10725- case Intrinsic::riscv_seg2_store_mask:
10726- case Intrinsic::riscv_seg3_store_mask:
10727- case Intrinsic::riscv_seg4_store_mask:
10728- case Intrinsic::riscv_seg5_store_mask:
10729- case Intrinsic::riscv_seg6_store_mask:
10730- case Intrinsic::riscv_seg7_store_mask:
10731- case Intrinsic::riscv_seg8_store_mask:
10732- IsMasked = true;
10733- break;
10734- default:
10735- break;
10736- }
10737-
10738- // Non-masked: (chain, int_id, vec*, ptr, vl)
10739- // Masked: (chain, int_id, vec*, ptr, mask, vl)
10740- unsigned NF = Op->getNumOperands() - (IsMasked ? 5 : 4);
10688+ // Operands: (chain, int_id, vec*, ptr, mask, vl)
10689+ unsigned NF = Op->getNumOperands() - 5;
1074110690 assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
1074210691 MVT XLenVT = Subtarget.getXLenVT();
1074310692 MVT VT = Op->getOperand(2).getSimpleValueType();
@@ -10747,8 +10696,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1074710696 EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
1074810697
1074910698 SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10750- SDValue Mask = IsMasked ? Op.getOperand(Op.getNumOperands() - 2)
10751- : getAllOnesMask(ContainerVT, VL, DL, DAG);
10699+ SDValue Mask = Op.getOperand(Op.getNumOperands() - 2);
1075210700 MVT MaskVT = Mask.getSimpleValueType();
1075310701 if (MaskVT.isFixedLengthVector()) {
1075410702 MVT MaskContainerVT =
@@ -23823,10 +23771,10 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
2382323771}
2382423772
2382523773static const Intrinsic::ID FixedVlsegIntrIds[] = {
23826- Intrinsic::riscv_seg2_load , Intrinsic::riscv_seg3_load ,
23827- Intrinsic::riscv_seg4_load , Intrinsic::riscv_seg5_load ,
23828- Intrinsic::riscv_seg6_load , Intrinsic::riscv_seg7_load ,
23829- Intrinsic::riscv_seg8_load };
23774+ Intrinsic::riscv_seg2_load_mask , Intrinsic::riscv_seg3_load_mask ,
23775+ Intrinsic::riscv_seg4_load_mask , Intrinsic::riscv_seg5_load_mask ,
23776+ Intrinsic::riscv_seg6_load_mask , Intrinsic::riscv_seg7_load_mask ,
23777+ Intrinsic::riscv_seg8_load_mask };
2383023778
2383123779/// Lower an interleaved load into a vlsegN intrinsic.
2383223780///
@@ -23877,10 +23825,11 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
2387723825 };
2387823826
2387923827 Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23880-
23881- CallInst *VlsegN = Builder.CreateIntrinsic(
23882- FixedVlsegIntrIds[Factor - 2], {VTy, LI->getPointerOperandType(), XLenTy},
23883- {LI->getPointerOperand(), VL});
23828+ // All-ones mask.
23829+ Value *Mask = Builder.getAllOnesMask(VTy->getElementCount());
23830+ CallInst *VlsegN =
23831+ Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {VTy, XLenTy},
23832+ {LI->getPointerOperand(), Mask, VL});
2388423833
2388523834 for (unsigned i = 0; i < Shuffles.size(); i++) {
2388623835 Value *SubVec = Builder.CreateExtractValue(VlsegN, Indices[i]);
@@ -23891,10 +23840,10 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
2389123840}
2389223841
2389323842static const Intrinsic::ID FixedVssegIntrIds[] = {
23894- Intrinsic::riscv_seg2_store , Intrinsic::riscv_seg3_store ,
23895- Intrinsic::riscv_seg4_store , Intrinsic::riscv_seg5_store ,
23896- Intrinsic::riscv_seg6_store , Intrinsic::riscv_seg7_store ,
23897- Intrinsic::riscv_seg8_store };
23843+ Intrinsic::riscv_seg2_store_mask , Intrinsic::riscv_seg3_store_mask ,
23844+ Intrinsic::riscv_seg4_store_mask , Intrinsic::riscv_seg5_store_mask ,
23845+ Intrinsic::riscv_seg6_store_mask , Intrinsic::riscv_seg7_store_mask ,
23846+ Intrinsic::riscv_seg8_store_mask };
2389823847
2389923848/// Lower an interleaved store into a vssegN intrinsic.
2390023849///
@@ -23954,8 +23903,7 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
2395423903 }
2395523904
2395623905 Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
23957- SI->getModule(), FixedVssegIntrIds[Factor - 2],
23958- {VTy, SI->getPointerOperandType(), XLenTy});
23906+ SI->getModule(), FixedVssegIntrIds[Factor - 2], {VTy, XLenTy});
2395923907
2396023908 SmallVector<Value *, 10> Ops;
2396123909 SmallVector<int, 16> NewShuffleMask;
@@ -23975,7 +23923,10 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
2397523923 // potentially under larger LMULs) because we checked that the fixed vector
2397623924 // type fits in isLegalInterleavedAccessType
2397723925 Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23978- Ops.append({SI->getPointerOperand(), VL});
23926+ // All-ones mask.
23927+ Value *StoreMask = ConstantVector::getSplat(
23928+ VTy->getElementCount(), ConstantInt::getTrue(SVI->getContext()));
23929+ Ops.append({SI->getPointerOperand(), StoreMask, VL});
2397923930
2398023931 Builder.CreateCall(VssegNFunc, Ops);
2398123932
@@ -24004,10 +23955,12 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
2400423955
2400523956 if (auto *FVTy = dyn_cast<FixedVectorType>(ResVTy)) {
2400623957 Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
23958+ // All-ones mask.
23959+ Value *Mask = ConstantVector::getSplat(
23960+ FVTy->getElementCount(), ConstantInt::getTrue(LI->getContext()));
2400723961 Return =
24008- Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2],
24009- {ResVTy, LI->getPointerOperandType(), XLenTy},
24010- {LI->getPointerOperand(), VL});
23962+ Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {ResVTy, XLenTy},
23963+ {LI->getPointerOperand(), Mask, VL});
2401123964 } else {
2401223965 static const Intrinsic::ID IntrIds[] = {
2401323966 Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
@@ -24071,12 +24024,14 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
2407124024
2407224025 if (auto *FVTy = dyn_cast<FixedVectorType>(InVTy)) {
2407324026 Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
24074- SI->getModule(), FixedVssegIntrIds[Factor - 2],
24075- {InVTy, SI->getPointerOperandType(), XLenTy});
24027+ SI->getModule(), FixedVssegIntrIds[Factor - 2], {InVTy, XLenTy});
2407624028
2407724029 SmallVector<Value *, 10> Ops(InterleaveValues);
2407824030 Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
24079- Ops.append({SI->getPointerOperand(), VL});
24031+ // All-ones mask.
24032+ Value *Mask = ConstantVector::getSplat(
24033+ FVTy->getElementCount(), ConstantInt::getTrue(SI->getContext()));
24034+ Ops.append({SI->getPointerOperand(), Mask, VL});
2408024035
2408124036 Builder.CreateCall(VssegNFunc, Ops);
2408224037 } else {
@@ -24198,15 +24153,9 @@ bool RISCVTargetLowering::lowerInterleavedVPLoad(
2419824153
2419924154 Value *Return = nullptr;
2420024155 if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
24201- static const Intrinsic::ID FixedMaskedVlsegIntrIds[] = {
24202- Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
24203- Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
24204- Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
24205- Intrinsic::riscv_seg8_load_mask};
24206-
24207- Return = Builder.CreateIntrinsic(FixedMaskedVlsegIntrIds[Factor - 2],
24208- {FVTy, XLenTy},
24209- {Load->getArgOperand(0), Mask, EVL});
24156+ Return =
24157+ Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {FVTy, XLenTy},
24158+ {Load->getArgOperand(0), Mask, EVL});
2421024159 } else {
2421124160 static const Intrinsic::ID IntrMaskIds[] = {
2421224161 Intrinsic::riscv_vlseg2_mask, Intrinsic::riscv_vlseg3_mask,
@@ -24318,15 +24267,9 @@ bool RISCVTargetLowering::lowerInterleavedVPStore(
2431824267 XLenTy);
2431924268
2432024269 if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
24321- static const Intrinsic::ID FixedMaskedVssegIntrIds[] = {
24322- Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
24323- Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
24324- Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
24325- Intrinsic::riscv_seg8_store_mask};
24326-
2432724270 SmallVector<Value *, 8> Operands(InterleaveOperands);
2432824271 Operands.append({Store->getArgOperand(1), Mask, EVL});
24329- Builder.CreateIntrinsic(FixedMaskedVssegIntrIds [Factor - 2], {FVTy, XLenTy},
24272+ Builder.CreateIntrinsic(FixedVssegIntrIds [Factor - 2], {FVTy, XLenTy},
2433024273 Operands);
2433124274 return true;
2433224275 }
0 commit comments