Skip to content

Commit 75108c0

Browse files
committed
[RISCV] Deprecate riscv.segN.load/store in favor of their mask variants
RISCVVectorPeepholePass would replace instructions with all-ones mask with their unmask variant, so there isn't really a point to keep separate versions of intrinsics.
1 parent a41bfb1 commit 75108c0

File tree

7 files changed

+84
-243
lines changed

7 files changed

+84
-243
lines changed

llvm/include/llvm/IR/IntrinsicsRISCV.td

Lines changed: 4 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1704,14 +1704,10 @@ let TargetPrefix = "riscv" in {
17041704
}
17051705

17061706
// Segment loads/stores for fixed vectors.
1707+
// Note: we only have the masked variants because RISCVVectorPeephole
1708+
// would lower any instructions with all-ones mask into unmasked version
1709+
// anyway.
17071710
foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1708-
// Input: (pointer, vl)
1709-
def int_riscv_seg # nf # _load
1710-
: DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
1711-
!listsplat(LLVMMatchType<0>,
1712-
!add(nf, -1))),
1713-
[llvm_anyptr_ty, llvm_anyint_ty],
1714-
[NoCapture<ArgIndex<0>>, IntrReadMem]>;
17151711
// Input: (pointer, mask, vl)
17161712
def int_riscv_seg # nf # _load_mask
17171713
: DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
@@ -1721,15 +1717,7 @@ let TargetPrefix = "riscv" in {
17211717
llvm_anyint_ty],
17221718
[NoCapture<ArgIndex<0>>, IntrReadMem]>;
17231719

1724-
// Input: (<stored values>, pointer, vl)
1725-
def int_riscv_seg # nf # _store
1726-
: DefaultAttrsIntrinsic<[],
1727-
!listconcat([llvm_anyvector_ty],
1728-
!listsplat(LLVMMatchType<0>,
1729-
!add(nf, -1)),
1730-
[llvm_anyptr_ty, llvm_anyint_ty]),
1731-
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>;
1732-
// Input: (<stored values>, pointer, mask, vl)
1720+
// Input: (<stored values>..., pointer, mask, vl)
17331721
def int_riscv_seg # nf # _store_mask
17341722
: DefaultAttrsIntrinsic<[],
17351723
!listconcat([llvm_anyvector_ty],

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 37 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -1740,13 +1740,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
17401740
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
17411741
MachineMemOperand::MOVolatile;
17421742
return true;
1743-
case Intrinsic::riscv_seg2_load:
1744-
case Intrinsic::riscv_seg3_load:
1745-
case Intrinsic::riscv_seg4_load:
1746-
case Intrinsic::riscv_seg5_load:
1747-
case Intrinsic::riscv_seg6_load:
1748-
case Intrinsic::riscv_seg7_load:
1749-
case Intrinsic::riscv_seg8_load:
17501743
case Intrinsic::riscv_seg2_load_mask:
17511744
case Intrinsic::riscv_seg3_load_mask:
17521745
case Intrinsic::riscv_seg4_load_mask:
@@ -1756,17 +1749,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
17561749
case Intrinsic::riscv_seg8_load_mask:
17571750
return SetRVVLoadStoreInfo(/*PtrOp*/ 0, /*IsStore*/ false,
17581751
/*IsUnitStrided*/ false, /*UsePtrVal*/ true);
1759-
case Intrinsic::riscv_seg2_store:
1760-
case Intrinsic::riscv_seg3_store:
1761-
case Intrinsic::riscv_seg4_store:
1762-
case Intrinsic::riscv_seg5_store:
1763-
case Intrinsic::riscv_seg6_store:
1764-
case Intrinsic::riscv_seg7_store:
1765-
case Intrinsic::riscv_seg8_store:
1766-
// Operands are (vec, ..., vec, ptr, vl)
1767-
return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 2,
1768-
/*IsStore*/ true,
1769-
/*IsUnitStrided*/ false, /*UsePtrVal*/ true);
17701752
case Intrinsic::riscv_seg2_store_mask:
17711753
case Intrinsic::riscv_seg3_store_mask:
17721754
case Intrinsic::riscv_seg4_store_mask:
@@ -10581,13 +10563,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
1058110563
switch (IntNo) {
1058210564
default:
1058310565
break;
10584-
case Intrinsic::riscv_seg2_load:
10585-
case Intrinsic::riscv_seg3_load:
10586-
case Intrinsic::riscv_seg4_load:
10587-
case Intrinsic::riscv_seg5_load:
10588-
case Intrinsic::riscv_seg6_load:
10589-
case Intrinsic::riscv_seg7_load:
10590-
case Intrinsic::riscv_seg8_load:
1059110566
case Intrinsic::riscv_seg2_load_mask:
1059210567
case Intrinsic::riscv_seg3_load_mask:
1059310568
case Intrinsic::riscv_seg4_load_mask:
@@ -10610,12 +10585,9 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
1061010585
ContainerVT.getScalarSizeInBits();
1061110586
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
1061210587

10613-
// Masked: (pointer, mask, vl)
10614-
// Non-masked: (pointer, vl)
10615-
bool IsMasked = Op.getNumOperands() > 4;
10588+
// Operands: (chain, int_id, pointer, mask, vl)
1061610589
SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10617-
SDValue Mask =
10618-
IsMasked ? Op.getOperand(3) : getAllOnesMask(ContainerVT, VL, DL, DAG);
10590+
SDValue Mask = Op.getOperand(3);
1061910591
MVT MaskVT = Mask.getSimpleValueType();
1062010592
if (MaskVT.isFixedLengthVector()) {
1062110593
MVT MaskContainerVT =
@@ -10689,13 +10661,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1068910661
switch (IntNo) {
1069010662
default:
1069110663
break;
10692-
case Intrinsic::riscv_seg2_store:
10693-
case Intrinsic::riscv_seg3_store:
10694-
case Intrinsic::riscv_seg4_store:
10695-
case Intrinsic::riscv_seg5_store:
10696-
case Intrinsic::riscv_seg6_store:
10697-
case Intrinsic::riscv_seg7_store:
10698-
case Intrinsic::riscv_seg8_store:
1069910664
case Intrinsic::riscv_seg2_store_mask:
1070010665
case Intrinsic::riscv_seg3_store_mask:
1070110666
case Intrinsic::riscv_seg4_store_mask:
@@ -10710,24 +10675,8 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1071010675
Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
1071110676
Intrinsic::riscv_vsseg8_mask};
1071210677

10713-
bool IsMasked = false;
10714-
switch (IntNo) {
10715-
case Intrinsic::riscv_seg2_store_mask:
10716-
case Intrinsic::riscv_seg3_store_mask:
10717-
case Intrinsic::riscv_seg4_store_mask:
10718-
case Intrinsic::riscv_seg5_store_mask:
10719-
case Intrinsic::riscv_seg6_store_mask:
10720-
case Intrinsic::riscv_seg7_store_mask:
10721-
case Intrinsic::riscv_seg8_store_mask:
10722-
IsMasked = true;
10723-
break;
10724-
default:
10725-
break;
10726-
}
10727-
10728-
// Non-masked: (chain, int_id, vec*, ptr, vl)
10729-
// Masked: (chain, int_id, vec*, ptr, mask, vl)
10730-
unsigned NF = Op->getNumOperands() - (IsMasked ? 5 : 4);
10678+
// Operands: (chain, int_id, vec*, ptr, mask, vl)
10679+
unsigned NF = Op->getNumOperands() - 5;
1073110680
assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
1073210681
MVT XLenVT = Subtarget.getXLenVT();
1073310682
MVT VT = Op->getOperand(2).getSimpleValueType();
@@ -10737,8 +10686,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1073710686
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
1073810687

1073910688
SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10740-
SDValue Mask = IsMasked ? Op.getOperand(Op.getNumOperands() - 2)
10741-
: getAllOnesMask(ContainerVT, VL, DL, DAG);
10689+
SDValue Mask = Op.getOperand(Op.getNumOperands() - 2);
1074210690
MVT MaskVT = Mask.getSimpleValueType();
1074310691
if (MaskVT.isFixedLengthVector()) {
1074410692
MVT MaskContainerVT =
@@ -23660,10 +23608,10 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
2366023608
}
2366123609

2366223610
static const Intrinsic::ID FixedVlsegIntrIds[] = {
23663-
Intrinsic::riscv_seg2_load, Intrinsic::riscv_seg3_load,
23664-
Intrinsic::riscv_seg4_load, Intrinsic::riscv_seg5_load,
23665-
Intrinsic::riscv_seg6_load, Intrinsic::riscv_seg7_load,
23666-
Intrinsic::riscv_seg8_load};
23611+
Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
23612+
Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
23613+
Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
23614+
Intrinsic::riscv_seg8_load_mask};
2366723615

2366823616
/// Lower an interleaved load into a vlsegN intrinsic.
2366923617
///
@@ -23714,10 +23662,11 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
2371423662
};
2371523663

2371623664
Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23717-
23718-
CallInst *VlsegN = Builder.CreateIntrinsic(
23719-
FixedVlsegIntrIds[Factor - 2], {VTy, LI->getPointerOperandType(), XLenTy},
23720-
{LI->getPointerOperand(), VL});
23665+
// All-ones mask.
23666+
Value *Mask = Builder.getAllOnesMask(VTy->getElementCount());
23667+
CallInst *VlsegN =
23668+
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {VTy, XLenTy},
23669+
{LI->getPointerOperand(), Mask, VL});
2372123670

2372223671
for (unsigned i = 0; i < Shuffles.size(); i++) {
2372323672
Value *SubVec = Builder.CreateExtractValue(VlsegN, Indices[i]);
@@ -23728,10 +23677,10 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
2372823677
}
2372923678

2373023679
static const Intrinsic::ID FixedVssegIntrIds[] = {
23731-
Intrinsic::riscv_seg2_store, Intrinsic::riscv_seg3_store,
23732-
Intrinsic::riscv_seg4_store, Intrinsic::riscv_seg5_store,
23733-
Intrinsic::riscv_seg6_store, Intrinsic::riscv_seg7_store,
23734-
Intrinsic::riscv_seg8_store};
23680+
Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
23681+
Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
23682+
Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
23683+
Intrinsic::riscv_seg8_store_mask};
2373523684

2373623685
/// Lower an interleaved store into a vssegN intrinsic.
2373723686
///
@@ -23791,8 +23740,7 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
2379123740
}
2379223741

2379323742
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
23794-
SI->getModule(), FixedVssegIntrIds[Factor - 2],
23795-
{VTy, SI->getPointerOperandType(), XLenTy});
23743+
SI->getModule(), FixedVssegIntrIds[Factor - 2], {VTy, XLenTy});
2379623744

2379723745
SmallVector<Value *, 10> Ops;
2379823746
SmallVector<int, 16> NewShuffleMask;
@@ -23812,7 +23760,10 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
2381223760
// potentially under larger LMULs) because we checked that the fixed vector
2381323761
// type fits in isLegalInterleavedAccessType
2381423762
Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23815-
Ops.append({SI->getPointerOperand(), VL});
23763+
// All-ones mask.
23764+
Value *StoreMask = ConstantVector::getSplat(
23765+
VTy->getElementCount(), ConstantInt::getTrue(SVI->getContext()));
23766+
Ops.append({SI->getPointerOperand(), StoreMask, VL});
2381623767

2381723768
Builder.CreateCall(VssegNFunc, Ops);
2381823769

@@ -23841,10 +23792,12 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
2384123792

2384223793
if (auto *FVTy = dyn_cast<FixedVectorType>(ResVTy)) {
2384323794
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
23795+
// All-ones mask.
23796+
Value *Mask = ConstantVector::getSplat(
23797+
FVTy->getElementCount(), ConstantInt::getTrue(LI->getContext()));
2384423798
Return =
23845-
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2],
23846-
{ResVTy, LI->getPointerOperandType(), XLenTy},
23847-
{LI->getPointerOperand(), VL});
23799+
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {ResVTy, XLenTy},
23800+
{LI->getPointerOperand(), Mask, VL});
2384823801
} else {
2384923802
static const Intrinsic::ID IntrIds[] = {
2385023803
Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
@@ -23908,12 +23861,14 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
2390823861

2390923862
if (auto *FVTy = dyn_cast<FixedVectorType>(InVTy)) {
2391023863
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
23911-
SI->getModule(), FixedVssegIntrIds[Factor - 2],
23912-
{InVTy, SI->getPointerOperandType(), XLenTy});
23864+
SI->getModule(), FixedVssegIntrIds[Factor - 2], {InVTy, XLenTy});
2391323865

2391423866
SmallVector<Value *, 10> Ops(InterleaveValues);
2391523867
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
23916-
Ops.append({SI->getPointerOperand(), VL});
23868+
// All-ones mask.
23869+
Value *Mask = ConstantVector::getSplat(
23870+
FVTy->getElementCount(), ConstantInt::getTrue(SI->getContext()));
23871+
Ops.append({SI->getPointerOperand(), Mask, VL});
2391723872

2391823873
Builder.CreateCall(VssegNFunc, Ops);
2391923874
} else {
@@ -24035,15 +23990,9 @@ bool RISCVTargetLowering::lowerDeinterleavedVPLoad(
2403523990

2403623991
Value *Return = nullptr;
2403723992
if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
24038-
static const Intrinsic::ID FixedMaskedVlsegIntrIds[] = {
24039-
Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
24040-
Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
24041-
Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
24042-
Intrinsic::riscv_seg8_load_mask};
24043-
24044-
Return = Builder.CreateIntrinsic(FixedMaskedVlsegIntrIds[Factor - 2],
24045-
{FVTy, XLenTy},
24046-
{Load->getArgOperand(0), Mask, EVL});
23993+
Return =
23994+
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {FVTy, XLenTy},
23995+
{Load->getArgOperand(0), Mask, EVL});
2404723996
} else {
2404823997
static const Intrinsic::ID IntrMaskIds[] = {
2404923998
Intrinsic::riscv_vlseg2_mask, Intrinsic::riscv_vlseg3_mask,
@@ -24155,15 +24104,9 @@ bool RISCVTargetLowering::lowerInterleavedVPStore(
2415524104
XLenTy);
2415624105

2415724106
if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
24158-
static const Intrinsic::ID FixedMaskedVssegIntrIds[] = {
24159-
Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
24160-
Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
24161-
Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
24162-
Intrinsic::riscv_seg8_store_mask};
24163-
2416424107
SmallVector<Value *, 8> Operands(InterleaveOperands);
2416524108
Operands.append({Store->getArgOperand(1), Mask, EVL});
24166-
Builder.CreateIntrinsic(FixedMaskedVssegIntrIds[Factor - 2], {FVTy, XLenTy},
24109+
Builder.CreateIntrinsic(FixedVssegIntrIds[Factor - 2], {FVTy, XLenTy},
2416724110
Operands);
2416824111
return true;
2416924112
}

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-segN-load.ll

Lines changed: 7 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ define <8 x i8> @load_factor2(ptr %ptr) {
77
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
88
; CHECK-NEXT: vlseg2e8.v v7, (a0)
99
; CHECK-NEXT: ret
10-
%1 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0.i64(ptr %ptr, i64 8)
10+
%1 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
1111
%2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0
1212
%3 = extractvalue { <8 x i8>, <8 x i8> } %1, 1
1313
ret <8 x i8> %3
@@ -19,7 +19,7 @@ define <8 x i8> @load_factor3(ptr %ptr) {
1919
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
2020
; CHECK-NEXT: vlseg3e8.v v6, (a0)
2121
; CHECK-NEXT: ret
22-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0.i64(ptr %ptr, i64 8)
22+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
2323
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
2424
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
2525
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -32,7 +32,7 @@ define <8 x i8> @load_factor4(ptr %ptr) {
3232
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
3333
; CHECK-NEXT: vlseg4e8.v v5, (a0)
3434
; CHECK-NEXT: ret
35-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0.i64(ptr %ptr, i64 8)
35+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
3636
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
3737
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
3838
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -46,7 +46,7 @@ define <8 x i8> @load_factor5(ptr %ptr) {
4646
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
4747
; CHECK-NEXT: vlseg5e8.v v4, (a0)
4848
; CHECK-NEXT: ret
49-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0.i64(ptr %ptr, i64 8)
49+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
5050
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
5151
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
5252
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -61,7 +61,7 @@ define <8 x i8> @load_factor6(ptr %ptr) {
6161
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
6262
; CHECK-NEXT: vlseg6e8.v v3, (a0)
6363
; CHECK-NEXT: ret
64-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0.i64(ptr %ptr, i64 8)
64+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
6565
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
6666
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
6767
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -77,7 +77,7 @@ define <8 x i8> @load_factor7(ptr %ptr) {
7777
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
7878
; CHECK-NEXT: vlseg7e8.v v2, (a0)
7979
; CHECK-NEXT: ret
80-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0.i64(ptr %ptr, i64 8)
80+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
8181
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
8282
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
8383
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -94,7 +94,7 @@ define <8 x i8> @load_factor8(ptr %ptr) {
9494
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
9595
; CHECK-NEXT: vlseg8e8.v v1, (a0)
9696
; CHECK-NEXT: ret
97-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0.i64(ptr %ptr, i64 8)
97+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
9898
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
9999
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
100100
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -105,10 +105,3 @@ define <8 x i8> @load_factor8(ptr %ptr) {
105105
%9 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 7
106106
ret <8 x i8> %9
107107
}
108-
declare { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0.i64(ptr, i64)
109-
declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0.i64(ptr, i64)
110-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0.i64(ptr, i64)
111-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0.i64(ptr, i64)
112-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0.i64(ptr, i64)
113-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0.i64(ptr, i64)
114-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0.i64(ptr, i64)

0 commit comments

Comments
 (0)