@@ -1740,13 +1740,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
17401740 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
17411741 MachineMemOperand::MOVolatile;
17421742 return true;
1743- case Intrinsic::riscv_seg2_load:
1744- case Intrinsic::riscv_seg3_load:
1745- case Intrinsic::riscv_seg4_load:
1746- case Intrinsic::riscv_seg5_load:
1747- case Intrinsic::riscv_seg6_load:
1748- case Intrinsic::riscv_seg7_load:
1749- case Intrinsic::riscv_seg8_load:
17501743 case Intrinsic::riscv_seg2_load_mask:
17511744 case Intrinsic::riscv_seg3_load_mask:
17521745 case Intrinsic::riscv_seg4_load_mask:
@@ -1756,17 +1749,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
17561749 case Intrinsic::riscv_seg8_load_mask:
17571750 return SetRVVLoadStoreInfo(/*PtrOp*/ 0, /*IsStore*/ false,
17581751 /*IsUnitStrided*/ false, /*UsePtrVal*/ true);
1759- case Intrinsic::riscv_seg2_store:
1760- case Intrinsic::riscv_seg3_store:
1761- case Intrinsic::riscv_seg4_store:
1762- case Intrinsic::riscv_seg5_store:
1763- case Intrinsic::riscv_seg6_store:
1764- case Intrinsic::riscv_seg7_store:
1765- case Intrinsic::riscv_seg8_store:
1766- // Operands are (vec, ..., vec, ptr, vl)
1767- return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 2,
1768- /*IsStore*/ true,
1769- /*IsUnitStrided*/ false, /*UsePtrVal*/ true);
17701752 case Intrinsic::riscv_seg2_store_mask:
17711753 case Intrinsic::riscv_seg3_store_mask:
17721754 case Intrinsic::riscv_seg4_store_mask:
@@ -10581,13 +10563,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
1058110563 switch (IntNo) {
1058210564 default:
1058310565 break;
10584- case Intrinsic::riscv_seg2_load:
10585- case Intrinsic::riscv_seg3_load:
10586- case Intrinsic::riscv_seg4_load:
10587- case Intrinsic::riscv_seg5_load:
10588- case Intrinsic::riscv_seg6_load:
10589- case Intrinsic::riscv_seg7_load:
10590- case Intrinsic::riscv_seg8_load:
1059110566 case Intrinsic::riscv_seg2_load_mask:
1059210567 case Intrinsic::riscv_seg3_load_mask:
1059310568 case Intrinsic::riscv_seg4_load_mask:
@@ -10610,12 +10585,9 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
1061010585 ContainerVT.getScalarSizeInBits();
1061110586 EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
1061210587
10613- // Masked: (pointer, mask, vl)
10614- // Non-masked: (pointer, vl)
10615- bool IsMasked = Op.getNumOperands() > 4;
10588+ // Operands: (chain, int_id, pointer, mask, vl)
1061610589 SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10617- SDValue Mask =
10618- IsMasked ? Op.getOperand(3) : getAllOnesMask(ContainerVT, VL, DL, DAG);
10590+ SDValue Mask = Op.getOperand(3);
1061910591 MVT MaskVT = Mask.getSimpleValueType();
1062010592 if (MaskVT.isFixedLengthVector()) {
1062110593 MVT MaskContainerVT =
@@ -10689,13 +10661,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1068910661 switch (IntNo) {
1069010662 default:
1069110663 break;
10692- case Intrinsic::riscv_seg2_store:
10693- case Intrinsic::riscv_seg3_store:
10694- case Intrinsic::riscv_seg4_store:
10695- case Intrinsic::riscv_seg5_store:
10696- case Intrinsic::riscv_seg6_store:
10697- case Intrinsic::riscv_seg7_store:
10698- case Intrinsic::riscv_seg8_store:
1069910664 case Intrinsic::riscv_seg2_store_mask:
1070010665 case Intrinsic::riscv_seg3_store_mask:
1070110666 case Intrinsic::riscv_seg4_store_mask:
@@ -10710,24 +10675,8 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1071010675 Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
1071110676 Intrinsic::riscv_vsseg8_mask};
1071210677
10713- bool IsMasked = false;
10714- switch (IntNo) {
10715- case Intrinsic::riscv_seg2_store_mask:
10716- case Intrinsic::riscv_seg3_store_mask:
10717- case Intrinsic::riscv_seg4_store_mask:
10718- case Intrinsic::riscv_seg5_store_mask:
10719- case Intrinsic::riscv_seg6_store_mask:
10720- case Intrinsic::riscv_seg7_store_mask:
10721- case Intrinsic::riscv_seg8_store_mask:
10722- IsMasked = true;
10723- break;
10724- default:
10725- break;
10726- }
10727-
10728- // Non-masked: (chain, int_id, vec*, ptr, vl)
10729- // Masked: (chain, int_id, vec*, ptr, mask, vl)
10730- unsigned NF = Op->getNumOperands() - (IsMasked ? 5 : 4);
10678+ // Operands: (chain, int_id, vec*, ptr, mask, vl)
10679+ unsigned NF = Op->getNumOperands() - 5;
1073110680 assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
1073210681 MVT XLenVT = Subtarget.getXLenVT();
1073310682 MVT VT = Op->getOperand(2).getSimpleValueType();
@@ -10737,8 +10686,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1073710686 EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
1073810687
1073910688 SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10740- SDValue Mask = IsMasked ? Op.getOperand(Op.getNumOperands() - 2)
10741- : getAllOnesMask(ContainerVT, VL, DL, DAG);
10689+ SDValue Mask = Op.getOperand(Op.getNumOperands() - 2);
1074210690 MVT MaskVT = Mask.getSimpleValueType();
1074310691 if (MaskVT.isFixedLengthVector()) {
1074410692 MVT MaskContainerVT =
@@ -23660,10 +23608,10 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
2366023608}
2366123609
2366223610static const Intrinsic::ID FixedVlsegIntrIds[] = {
23663- Intrinsic::riscv_seg2_load , Intrinsic::riscv_seg3_load ,
23664- Intrinsic::riscv_seg4_load , Intrinsic::riscv_seg5_load ,
23665- Intrinsic::riscv_seg6_load , Intrinsic::riscv_seg7_load ,
23666- Intrinsic::riscv_seg8_load };
23611+ Intrinsic::riscv_seg2_load_mask , Intrinsic::riscv_seg3_load_mask ,
23612+ Intrinsic::riscv_seg4_load_mask , Intrinsic::riscv_seg5_load_mask ,
23613+ Intrinsic::riscv_seg6_load_mask , Intrinsic::riscv_seg7_load_mask ,
23614+ Intrinsic::riscv_seg8_load_mask };
2366723615
2366823616/// Lower an interleaved load into a vlsegN intrinsic.
2366923617///
@@ -23714,10 +23662,11 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
2371423662 };
2371523663
2371623664 Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23717-
23718- CallInst *VlsegN = Builder.CreateIntrinsic(
23719- FixedVlsegIntrIds[Factor - 2], {VTy, LI->getPointerOperandType(), XLenTy},
23720- {LI->getPointerOperand(), VL});
23665+ // All-ones mask.
23666+ Value *Mask = Builder.getAllOnesMask(VTy->getElementCount());
23667+ CallInst *VlsegN =
23668+ Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {VTy, XLenTy},
23669+ {LI->getPointerOperand(), Mask, VL});
2372123670
2372223671 for (unsigned i = 0; i < Shuffles.size(); i++) {
2372323672 Value *SubVec = Builder.CreateExtractValue(VlsegN, Indices[i]);
@@ -23728,10 +23677,10 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
2372823677}
2372923678
2373023679static const Intrinsic::ID FixedVssegIntrIds[] = {
23731- Intrinsic::riscv_seg2_store , Intrinsic::riscv_seg3_store ,
23732- Intrinsic::riscv_seg4_store , Intrinsic::riscv_seg5_store ,
23733- Intrinsic::riscv_seg6_store , Intrinsic::riscv_seg7_store ,
23734- Intrinsic::riscv_seg8_store };
23680+ Intrinsic::riscv_seg2_store_mask , Intrinsic::riscv_seg3_store_mask ,
23681+ Intrinsic::riscv_seg4_store_mask , Intrinsic::riscv_seg5_store_mask ,
23682+ Intrinsic::riscv_seg6_store_mask , Intrinsic::riscv_seg7_store_mask ,
23683+ Intrinsic::riscv_seg8_store_mask };
2373523684
2373623685/// Lower an interleaved store into a vssegN intrinsic.
2373723686///
@@ -23791,8 +23740,7 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
2379123740 }
2379223741
2379323742 Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
23794- SI->getModule(), FixedVssegIntrIds[Factor - 2],
23795- {VTy, SI->getPointerOperandType(), XLenTy});
23743+ SI->getModule(), FixedVssegIntrIds[Factor - 2], {VTy, XLenTy});
2379623744
2379723745 SmallVector<Value *, 10> Ops;
2379823746 SmallVector<int, 16> NewShuffleMask;
@@ -23812,7 +23760,10 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
2381223760 // potentially under larger LMULs) because we checked that the fixed vector
2381323761 // type fits in isLegalInterleavedAccessType
2381423762 Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23815- Ops.append({SI->getPointerOperand(), VL});
23763+ // All-ones mask.
23764+ Value *StoreMask = ConstantVector::getSplat(
23765+ VTy->getElementCount(), ConstantInt::getTrue(SVI->getContext()));
23766+ Ops.append({SI->getPointerOperand(), StoreMask, VL});
2381623767
2381723768 Builder.CreateCall(VssegNFunc, Ops);
2381823769
@@ -23841,10 +23792,12 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
2384123792
2384223793 if (auto *FVTy = dyn_cast<FixedVectorType>(ResVTy)) {
2384323794 Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
23795+ // All-ones mask.
23796+ Value *Mask = ConstantVector::getSplat(
23797+ FVTy->getElementCount(), ConstantInt::getTrue(LI->getContext()));
2384423798 Return =
23845- Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2],
23846- {ResVTy, LI->getPointerOperandType(), XLenTy},
23847- {LI->getPointerOperand(), VL});
23799+ Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {ResVTy, XLenTy},
23800+ {LI->getPointerOperand(), Mask, VL});
2384823801 } else {
2384923802 static const Intrinsic::ID IntrIds[] = {
2385023803 Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
@@ -23908,12 +23861,14 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
2390823861
2390923862 if (auto *FVTy = dyn_cast<FixedVectorType>(InVTy)) {
2391023863 Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
23911- SI->getModule(), FixedVssegIntrIds[Factor - 2],
23912- {InVTy, SI->getPointerOperandType(), XLenTy});
23864+ SI->getModule(), FixedVssegIntrIds[Factor - 2], {InVTy, XLenTy});
2391323865
2391423866 SmallVector<Value *, 10> Ops(InterleaveValues);
2391523867 Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
23916- Ops.append({SI->getPointerOperand(), VL});
23868+ // All-ones mask.
23869+ Value *Mask = ConstantVector::getSplat(
23870+ FVTy->getElementCount(), ConstantInt::getTrue(SI->getContext()));
23871+ Ops.append({SI->getPointerOperand(), Mask, VL});
2391723872
2391823873 Builder.CreateCall(VssegNFunc, Ops);
2391923874 } else {
@@ -24035,15 +23990,9 @@ bool RISCVTargetLowering::lowerDeinterleavedVPLoad(
2403523990
2403623991 Value *Return = nullptr;
2403723992 if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
24038- static const Intrinsic::ID FixedMaskedVlsegIntrIds[] = {
24039- Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
24040- Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
24041- Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
24042- Intrinsic::riscv_seg8_load_mask};
24043-
24044- Return = Builder.CreateIntrinsic(FixedMaskedVlsegIntrIds[Factor - 2],
24045- {FVTy, XLenTy},
24046- {Load->getArgOperand(0), Mask, EVL});
23993+ Return =
23994+ Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {FVTy, XLenTy},
23995+ {Load->getArgOperand(0), Mask, EVL});
2404723996 } else {
2404823997 static const Intrinsic::ID IntrMaskIds[] = {
2404923998 Intrinsic::riscv_vlseg2_mask, Intrinsic::riscv_vlseg3_mask,
@@ -24155,15 +24104,9 @@ bool RISCVTargetLowering::lowerInterleavedVPStore(
2415524104 XLenTy);
2415624105
2415724106 if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
24158- static const Intrinsic::ID FixedMaskedVssegIntrIds[] = {
24159- Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
24160- Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
24161- Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
24162- Intrinsic::riscv_seg8_store_mask};
24163-
2416424107 SmallVector<Value *, 8> Operands(InterleaveOperands);
2416524108 Operands.append({Store->getArgOperand(1), Mask, EVL});
24166- Builder.CreateIntrinsic(FixedMaskedVssegIntrIds [Factor - 2], {FVTy, XLenTy},
24109+ Builder.CreateIntrinsic(FixedVssegIntrIds [Factor - 2], {FVTy, XLenTy},
2416724110 Operands);
2416824111 return true;
2416924112 }
0 commit comments