From f78d5ef8052c6bfb7ef635045829aa82453c662a Mon Sep 17 00:00:00 2001 From: Philip Reames Date: Tue, 22 Jul 2025 08:16:13 -0700 Subject: [PATCH 1/4] [RISCV] Rewrite deinterleaveN one active as vlse optimization as DAG combine This reworks an existing optimization on the fixed vector (shuffle based) deinterleave lowering into a DAG combine. This has the effect of making it kick in much more widely - in pariicularly on the deinterleave intrinsic (i.e. scalable) path, deinterleaveN (without load) lowering, but also the intrinsic lowering paths. As posted, this is a POC/WIP as the impact here appears wider than I really expected, and my SDAG knowledge is failing me on a couple details (called out explicitly in code comments addressed to reviewers). The big question I have is whether the impact here is "too big". I don't have much context on the intrinsic API usage, and am unclear if this rewrite applying would be considered acceptable. (There's also a couple tests which clearly need rewritten to preserve test intent, but I'll do that once the general direction is agreed.) --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 48 ++ .../Target/RISCV/RISCVInterleavedAccess.cpp | 22 - .../rvv/fixed-vectors-deinterleave-load.ll | 4 +- .../RISCV/rvv/fixed-vectors-segN-load.ll | 24 +- llvm/test/CodeGen/RISCV/rvv/pr141907.ll | 18 +- .../RISCV/rvv/vector-deinterleave-load.ll | 17 +- .../CodeGen/RISCV/rvv/vector-deinterleave.ll | 11 +- llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll | 520 ++++++++++++----- llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll | 544 +++++++++++++----- .../RISCV/rvv/vp-vector-interleaved-access.ll | 8 +- 10 files changed, 901 insertions(+), 315 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 4845a9c84e01f..13169adf20e90 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -20760,6 +20760,54 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, } break; } + case RISCVISD::TUPLE_EXTRACT: { + EVT VT = N->getValueType(0); + SDValue Tuple = N->getOperand(0); + unsigned Idx = N->getConstantOperandVal(1); + if (!Tuple.hasOneUse() || Tuple.getOpcode() != ISD::INTRINSIC_W_CHAIN) + break; + + unsigned NF = 0; + switch (Tuple.getConstantOperandVal(1)) { + default: break; + case Intrinsic::riscv_vlseg2_mask: NF = 2; break; + case Intrinsic::riscv_vlseg3_mask: NF = 3; break; + case Intrinsic::riscv_vlseg4_mask: NF = 4; break; + case Intrinsic::riscv_vlseg5_mask: NF = 5; break; + case Intrinsic::riscv_vlseg6_mask: NF = 6; break; + case Intrinsic::riscv_vlseg7_mask: NF = 7; break; + case Intrinsic::riscv_vlseg8_mask: NF = 8; break; + } + if (!NF || Subtarget.hasOptimizedSegmentLoadStore(NF)) + break; + + // @REVIEWERS - What's the right value to use for the mem size here? + unsigned SEW = VT.getScalarSizeInBits(); + if (Log2_64(SEW) != Tuple.getConstantOperandVal(7)) + break; + unsigned Stride = SEW/8 * NF; + SDValue Offset = DAG.getConstant(SEW/8 * Idx, DL, XLenVT); + + SDValue Ops[] = { + /*Chain=*/Tuple.getOperand(0), + /*IntID=*/DAG.getTargetConstant(Intrinsic::riscv_vlse_mask, DL, XLenVT), + /*Passthru=*/Tuple.getOperand(2), + /*Ptr=*/DAG.getNode(ISD::ADD, DL, XLenVT, Tuple.getOperand(3), Offset), + /*Stride=*/DAG.getConstant(Stride, DL, XLenVT), + /*Mask=*/Tuple.getOperand(4), + /*VL=*/Tuple.getOperand(5), + /*Policy=*/Tuple.getOperand(6) + }; + + SDVTList VTs = DAG.getVTList({VT, MVT::Other}); + // @REVIEWERS - What's the right MemVT and MMO to use here? + SDValue Result = + DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, + cast(Tuple)->getMemoryVT(), + cast(Tuple)->getMemOperand()); + SDValue Chain = Result.getValue(1); + return DAG.getMergeValues({Result, Chain}, DL); + } } return SDValue(); diff --git a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp index 25817b6d2707f..d1f0184e3cf8a 100644 --- a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp +++ b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp @@ -216,28 +216,6 @@ bool RISCVTargetLowering::lowerInterleavedLoad( if (!isLegalInterleavedAccessType(VTy, Factor, Alignment, AS, DL)) return false; - // If the segment load is going to be performed segment at a time anyways - // and there's only one element used, use a strided load instead. This - // will be equally fast, and create less vector register pressure. - if (Indices.size() == 1 && !Subtarget.hasOptimizedSegmentLoadStore(Factor)) { - unsigned ScalarSizeInBytes = DL.getTypeStoreSize(VTy->getElementType()); - Value *Stride = ConstantInt::get(XLenTy, Factor * ScalarSizeInBytes); - Value *Offset = ConstantInt::get(XLenTy, Indices[0] * ScalarSizeInBytes); - Value *BasePtr = Builder.CreatePtrAdd(Ptr, Offset); - // Note: Same VL as above, but i32 not xlen due to signature of - // vp.strided.load - VL = Builder.CreateElementCount(Builder.getInt32Ty(), - VTy->getElementCount()); - CallInst *CI = - Builder.CreateIntrinsic(Intrinsic::experimental_vp_strided_load, - {VTy, BasePtr->getType(), Stride->getType()}, - {BasePtr, Stride, Mask, VL}); - CI->addParamAttr(0, - Attribute::getWithAlignment(CI->getContext(), Alignment)); - Shuffles[0]->replaceAllUsesWith(CI); - return true; - }; - CallInst *VlsegN = Builder.CreateIntrinsic( FixedVlsegIntrIds[Factor - 2], {VTy, PtrTy, XLenTy}, {Ptr, Mask, VL}); diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll index 807651c9b40c6..0ec7a7e9f7f38 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll @@ -260,8 +260,10 @@ define {<2 x double>, <2 x double>} @vector_deinterleave_load_v2f64_v4f64(ptr %p define { <8 x i8>, <8 x i8>, <8 x i8> } @vector_deinterleave_load_factor3(ptr %p) { ; CHECK-LABEL: vector_deinterleave_load_factor3: ; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a1, 3 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; CHECK-NEXT: vlseg3e8.v v6, (a0) +; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret %vec = load <24 x i8>, ptr %p %d0 = call {<8 x i8>, <8 x i8>, <8 x i8>} @llvm.vector.deinterleave3(<24 x i8> %vec) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-segN-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-segN-load.ll index 4eed3df0d3f16..46c15fbb67319 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-segN-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-segN-load.ll @@ -16,8 +16,10 @@ define <8 x i8> @load_factor2(ptr %ptr) { define <8 x i8> @load_factor3(ptr %ptr) { ; CHECK-LABEL: load_factor3: ; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a1, 3 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; CHECK-NEXT: vlseg3e8.v v6, (a0) +; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret %1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8) %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0 @@ -29,8 +31,10 @@ define <8 x i8> @load_factor3(ptr %ptr) { define <8 x i8> @load_factor4(ptr %ptr) { ; CHECK-LABEL: load_factor4: ; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, a0, 3 +; CHECK-NEXT: li a1, 4 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; CHECK-NEXT: vlseg4e8.v v5, (a0) +; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8) %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0 @@ -43,8 +47,10 @@ define <8 x i8> @load_factor4(ptr %ptr) { define <8 x i8> @load_factor5(ptr %ptr) { ; CHECK-LABEL: load_factor5: ; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a1, 5 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; CHECK-NEXT: vlseg5e8.v v4, (a0) +; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8) %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0 @@ -58,8 +64,10 @@ define <8 x i8> @load_factor5(ptr %ptr) { define <8 x i8> @load_factor6(ptr %ptr) { ; CHECK-LABEL: load_factor6: ; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, a0, 5 +; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; CHECK-NEXT: vlseg6e8.v v3, (a0) +; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8) %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0 @@ -74,8 +82,10 @@ define <8 x i8> @load_factor6(ptr %ptr) { define <8 x i8> @load_factor7(ptr %ptr) { ; CHECK-LABEL: load_factor7: ; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, a0, 6 +; CHECK-NEXT: li a1, 7 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; CHECK-NEXT: vlseg7e8.v v2, (a0) +; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8) %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0 @@ -91,8 +101,10 @@ define <8 x i8> @load_factor7(ptr %ptr) { define <8 x i8> @load_factor8(ptr %ptr) { ; CHECK-LABEL: load_factor8: ; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, a0, 7 +; CHECK-NEXT: li a1, 8 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; CHECK-NEXT: vlseg8e8.v v1, (a0) +; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8) %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/pr141907.ll b/llvm/test/CodeGen/RISCV/rvv/pr141907.ll index 648b47dc440c3..f93f88a5bc06c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/pr141907.ll +++ b/llvm/test/CodeGen/RISCV/rvv/pr141907.ll @@ -9,27 +9,29 @@ define void @pr141907(ptr %0) nounwind { ; CHECK-NEXT: slli a1, a1, 2 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma +; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: addi a3, sp, 20 +; CHECK-NEXT: li a4, 12 ; CHECK-NEXT: .LBB0_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vs4r.v v8, (a2) ; CHECK-NEXT: vsetvli a1, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, ma -; CHECK-NEXT: vnsrl.wi v11, v9, 0, v0.t -; CHECK-NEXT: vsetvli a3, zero, e32, m1, ta, ma -; CHECK-NEXT: vlseg3e32.v v8, (a2) +; CHECK-NEXT: vnsrl.wi v9, v8, 0, v0.t +; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma +; CHECK-NEXT: vlse32.v v8, (a3), a4 ; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, ma -; CHECK-NEXT: vsseg2e16.v v11, (zero) +; CHECK-NEXT: vsseg2e16.v v9, (zero) ; CHECK-NEXT: bnez a1, .LBB0_1 ; CHECK-NEXT: .LBB0_2: # %while.body5 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma -; CHECK-NEXT: vse16.v v9, (a0) +; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: j .LBB0_2 entry: br label %vector.body diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll index f9f0aa67a9034..688a44324c09a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll @@ -371,8 +371,10 @@ define {, } @vector_deinterleave_load_nxv2p0 define { , , } @vector_deinterleave_load_factor3(ptr %p) { ; CHECK-LABEL: vector_deinterleave_load_factor3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma -; CHECK-NEXT: vlseg3e8.v v6, (a0) +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a1, 3 +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret %vec = load , ptr %p %d0 = call {, , } @llvm.vector.deinterleave3( %vec) @@ -407,8 +409,9 @@ define { , , , @vector_deinterleave_load_factor4_oneactive(ptr %p) { ; CHECK-LABEL: vector_deinterleave_load_factor4_oneactive: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma -; CHECK-NEXT: vlseg4e8.v v8, (a0) +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret %vec = load , ptr %p %d0 = call { , , , } @llvm.vector.deinterleave4( %vec) @@ -419,8 +422,10 @@ define @vector_deinterleave_load_factor4_oneactive(ptr %p) { define @vector_deinterleave_load_factor4_oneactive2(ptr %p) { ; CHECK-LABEL: vector_deinterleave_load_factor4_oneactive2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma -; CHECK-NEXT: vlseg4e8.v v5, (a0) +; CHECK-NEXT: addi a0, a0, 3 +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret %vec = load , ptr %p %d0 = call { , , , } @llvm.vector.deinterleave4( %vec) diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll index 0a96e4ff7fba7..a84608b55f0fc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll @@ -3712,8 +3712,9 @@ define @vector_deinterleave_nxv1f32_nxv8f32_oneactive( @vector_deinterleave_nxv1f32_nxv8f32_oneactive2( @test_vlseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 3 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -209,8 +211,10 @@ entry: define @test_vlseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 3 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -236,8 +240,10 @@ entry: define @test_vlseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 3 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -263,8 +269,10 @@ entry: define @test_vlseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 3 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -290,8 +298,10 @@ entry: define @test_vlseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 3 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -317,8 +327,10 @@ entry: define @test_vlseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -344,8 +356,10 @@ entry: define @test_vlseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -371,8 +385,10 @@ entry: define @test_vlseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -398,8 +414,10 @@ entry: define @test_vlseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -425,8 +443,10 @@ entry: define @test_vlseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -452,8 +472,10 @@ entry: define @test_vlseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -479,8 +501,10 @@ entry: define @test_vlseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -506,8 +530,10 @@ entry: define @test_vlseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -533,8 +559,10 @@ entry: define @test_vlseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -560,8 +588,10 @@ entry: define @test_vlseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -587,8 +617,10 @@ entry: define @test_vlseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -614,8 +646,10 @@ entry: define @test_vlseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -641,8 +675,10 @@ entry: define @test_vlseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -668,8 +704,10 @@ entry: define @test_vlseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 7 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -695,8 +733,10 @@ entry: define @test_vlseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 7 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -722,8 +762,10 @@ entry: define @test_vlseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 7 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -749,8 +791,10 @@ entry: define @test_vlseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 7 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -776,8 +820,10 @@ entry: define @test_vlseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -803,8 +849,10 @@ entry: define @test_vlseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -830,8 +878,10 @@ entry: define @test_vlseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -857,8 +907,10 @@ entry: define @test_vlseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 3) @@ -1013,8 +1065,10 @@ entry: define @test_vlseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1039,8 +1093,10 @@ entry: define @test_vlseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1065,8 +1121,10 @@ entry: define @test_vlseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1091,8 +1149,10 @@ entry: define @test_vlseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1117,8 +1177,10 @@ entry: define @test_vlseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1143,8 +1205,10 @@ entry: define @test_vlseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1169,8 +1233,10 @@ entry: define @test_vlseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1195,8 +1261,10 @@ entry: define @test_vlseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1221,8 +1289,10 @@ entry: define @test_vlseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1247,8 +1317,10 @@ entry: define @test_vlseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1273,8 +1345,10 @@ entry: define @test_vlseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1299,8 +1373,10 @@ entry: define @test_vlseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1325,8 +1401,10 @@ entry: define @test_vlseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1351,8 +1429,10 @@ entry: define @test_vlseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1377,8 +1457,10 @@ entry: define @test_vlseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1403,8 +1485,10 @@ entry: define @test_vlseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1429,8 +1513,10 @@ entry: define @test_vlseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1455,8 +1541,10 @@ entry: define @test_vlseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1481,8 +1569,10 @@ entry: define @test_vlseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1507,8 +1597,10 @@ entry: define @test_vlseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -1637,8 +1729,10 @@ entry: define @test_vlseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -1663,8 +1757,10 @@ entry: define @test_vlseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -1689,8 +1785,10 @@ entry: define @test_vlseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -1715,8 +1813,10 @@ entry: define @test_vlseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -1741,8 +1841,10 @@ entry: define @test_vlseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -1767,8 +1869,10 @@ entry: define @test_vlseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -1793,8 +1897,10 @@ entry: define @test_vlseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 20 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -1819,8 +1925,10 @@ entry: define @test_vlseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 20 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -1845,8 +1953,10 @@ entry: define @test_vlseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -1871,8 +1981,10 @@ entry: define @test_vlseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -1897,8 +2009,10 @@ entry: define @test_vlseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 28 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -1923,8 +2037,10 @@ entry: define @test_vlseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 28 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -1949,8 +2065,10 @@ entry: define @test_vlseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -1975,8 +2093,10 @@ entry: define @test_vlseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -2079,8 +2199,10 @@ entry: define @test_vlseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -2105,8 +2227,10 @@ entry: define @test_vlseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -2131,8 +2255,10 @@ entry: define @test_vlseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -2157,8 +2283,10 @@ entry: define @test_vlseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -2183,8 +2311,10 @@ entry: define @test_vlseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 40 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -2209,8 +2339,10 @@ entry: define @test_vlseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 48 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -2235,8 +2367,10 @@ entry: define @test_vlseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 56 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -2261,8 +2395,10 @@ entry: define @test_vlseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -2411,8 +2547,10 @@ entry: define @test_vlseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2436,8 +2574,10 @@ entry: define @test_vlseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2461,8 +2601,10 @@ entry: define @test_vlseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2486,8 +2628,10 @@ entry: define @test_vlseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2511,8 +2655,10 @@ entry: define @test_vlseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2536,8 +2682,10 @@ entry: define @test_vlseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2561,8 +2709,10 @@ entry: define @test_vlseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2586,8 +2736,10 @@ entry: define @test_vlseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2611,8 +2763,10 @@ entry: define @test_vlseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2636,8 +2790,10 @@ entry: define @test_vlseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2661,8 +2817,10 @@ entry: define @test_vlseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2686,8 +2844,10 @@ entry: define @test_vlseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2711,8 +2871,10 @@ entry: define @test_vlseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2736,8 +2898,10 @@ entry: define @test_vlseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2761,8 +2925,10 @@ entry: define @test_vlseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2786,8 +2952,10 @@ entry: define @test_vlseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2811,8 +2979,10 @@ entry: define @test_vlseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2836,8 +3006,10 @@ entry: define @test_vlseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2861,8 +3033,10 @@ entry: define @test_vlseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -2886,8 +3060,10 @@ entry: define @test_vlseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -3011,8 +3187,10 @@ entry: define @test_vlseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3036,8 +3214,10 @@ entry: define @test_vlseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3061,8 +3241,10 @@ entry: define @test_vlseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3086,8 +3268,10 @@ entry: define @test_vlseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3111,8 +3295,10 @@ entry: define @test_vlseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3136,8 +3322,10 @@ entry: define @test_vlseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3161,8 +3349,10 @@ entry: define @test_vlseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 20 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3186,8 +3376,10 @@ entry: define @test_vlseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 20 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3211,8 +3403,10 @@ entry: define @test_vlseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3236,8 +3430,10 @@ entry: define @test_vlseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3261,8 +3457,10 @@ entry: define @test_vlseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 28 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3286,8 +3484,10 @@ entry: define @test_vlseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 28 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3311,8 +3511,10 @@ entry: define @test_vlseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3336,8 +3538,10 @@ entry: define @test_vlseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 5) @@ -3436,8 +3640,10 @@ entry: define @test_vlseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -3461,8 +3667,10 @@ entry: define @test_vlseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -3486,8 +3694,10 @@ entry: define @test_vlseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -3511,8 +3721,10 @@ entry: define @test_vlseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -3536,8 +3748,10 @@ entry: define @test_vlseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 40 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -3561,8 +3775,10 @@ entry: define @test_vlseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 48 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -3586,8 +3802,10 @@ entry: define @test_vlseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 56 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -3611,8 +3829,10 @@ entry: define @test_vlseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 6) @@ -3761,8 +3981,10 @@ entry: define @test_vlseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -3786,8 +4008,10 @@ entry: define @test_vlseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -3811,8 +4035,10 @@ entry: define @test_vlseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -3836,8 +4062,10 @@ entry: define @test_vlseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -3861,8 +4089,10 @@ entry: define @test_vlseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -3886,8 +4116,10 @@ entry: define @test_vlseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -3911,8 +4143,10 @@ entry: define @test_vlseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -3936,8 +4170,10 @@ entry: define @test_vlseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -3961,8 +4197,10 @@ entry: define @test_vlseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -3986,8 +4224,10 @@ entry: define @test_vlseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -4011,8 +4251,10 @@ entry: define @test_vlseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -4036,8 +4278,10 @@ entry: define @test_vlseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -4061,8 +4305,10 @@ entry: define @test_vlseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -4086,8 +4332,10 @@ entry: define @test_vlseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -4111,8 +4359,10 @@ entry: define @test_vlseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -4136,8 +4386,10 @@ entry: define @test_vlseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -4161,8 +4413,10 @@ entry: define @test_vlseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -4186,8 +4440,10 @@ entry: define @test_vlseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -4211,8 +4467,10 @@ entry: define @test_vlseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) @@ -4236,8 +4494,10 @@ entry: define @test_vlseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i32 %vl, i32 1, i32 4) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll index 16e5e7b9199a3..5a5a078a46ad4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll @@ -194,8 +194,10 @@ entry: define @test_vlseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 3 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -206,8 +208,10 @@ entry: define @test_vlseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 3 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0) +; CHECK-NEXT: vlse8.v v8, (a0), a2 ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) @@ -233,8 +237,10 @@ entry: define @test_vlseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 3 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -260,8 +266,10 @@ entry: define @test_vlseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 3 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -287,8 +295,10 @@ entry: define @test_vlseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 3 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -314,8 +324,10 @@ entry: define @test_vlseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 3 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -341,8 +353,10 @@ entry: define @test_vlseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -353,8 +367,10 @@ entry: define @test_vlseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0) +; CHECK-NEXT: vlse8.v v8, (a0), a2 ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) @@ -380,8 +396,10 @@ entry: define @test_vlseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -407,8 +425,10 @@ entry: define @test_vlseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -434,8 +454,10 @@ entry: define @test_vlseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -461,8 +483,10 @@ entry: define @test_vlseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -488,8 +512,10 @@ entry: define @test_vlseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -500,8 +526,10 @@ entry: define @test_vlseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0) +; CHECK-NEXT: vlse8.v v8, (a0), a2 ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) @@ -527,8 +555,10 @@ entry: define @test_vlseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -554,8 +584,10 @@ entry: define @test_vlseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -581,8 +613,10 @@ entry: define @test_vlseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -608,8 +642,10 @@ entry: define @test_vlseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -620,8 +656,10 @@ entry: define @test_vlseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0) +; CHECK-NEXT: vlse8.v v8, (a0), a2 ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) @@ -647,8 +685,10 @@ entry: define @test_vlseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -674,8 +714,10 @@ entry: define @test_vlseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -701,8 +743,10 @@ entry: define @test_vlseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -728,8 +772,10 @@ entry: define @test_vlseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 7 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -740,8 +786,10 @@ entry: define @test_vlseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 7 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0) +; CHECK-NEXT: vlse8.v v8, (a0), a2 ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) @@ -767,8 +815,10 @@ entry: define @test_vlseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 7 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -794,8 +844,10 @@ entry: define @test_vlseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 7 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -821,8 +873,10 @@ entry: define @test_vlseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 7 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -848,8 +902,10 @@ entry: define @test_vlseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -860,8 +916,10 @@ entry: define @test_vlseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0) +; CHECK-NEXT: vlse8.v v8, (a0), a2 ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) @@ -887,8 +945,10 @@ entry: define @test_vlseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -914,8 +974,10 @@ entry: define @test_vlseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -941,8 +1003,10 @@ entry: define @test_vlseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 3) @@ -1097,8 +1161,10 @@ entry: define @test_vlseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1123,8 +1189,10 @@ entry: define @test_vlseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1149,8 +1217,10 @@ entry: define @test_vlseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1175,8 +1245,10 @@ entry: define @test_vlseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1201,8 +1273,10 @@ entry: define @test_vlseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1227,8 +1301,10 @@ entry: define @test_vlseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1253,8 +1329,10 @@ entry: define @test_vlseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1279,8 +1357,10 @@ entry: define @test_vlseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1305,8 +1385,10 @@ entry: define @test_vlseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1331,8 +1413,10 @@ entry: define @test_vlseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1357,8 +1441,10 @@ entry: define @test_vlseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1383,8 +1469,10 @@ entry: define @test_vlseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1409,8 +1497,10 @@ entry: define @test_vlseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1435,8 +1525,10 @@ entry: define @test_vlseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1461,8 +1553,10 @@ entry: define @test_vlseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1487,8 +1581,10 @@ entry: define @test_vlseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1513,8 +1609,10 @@ entry: define @test_vlseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1539,8 +1637,10 @@ entry: define @test_vlseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1565,8 +1665,10 @@ entry: define @test_vlseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1591,8 +1693,10 @@ entry: define @test_vlseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -1721,8 +1825,10 @@ entry: define @test_vlseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -1747,8 +1853,10 @@ entry: define @test_vlseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -1773,8 +1881,10 @@ entry: define @test_vlseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -1799,8 +1909,10 @@ entry: define @test_vlseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -1825,8 +1937,10 @@ entry: define @test_vlseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -1851,8 +1965,10 @@ entry: define @test_vlseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -1877,8 +1993,10 @@ entry: define @test_vlseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 20 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -1903,8 +2021,10 @@ entry: define @test_vlseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 20 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -1929,8 +2049,10 @@ entry: define @test_vlseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -1955,8 +2077,10 @@ entry: define @test_vlseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -1981,8 +2105,10 @@ entry: define @test_vlseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 28 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -2007,8 +2133,10 @@ entry: define @test_vlseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 28 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -2033,8 +2161,10 @@ entry: define @test_vlseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -2059,8 +2189,10 @@ entry: define @test_vlseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -2163,8 +2295,10 @@ entry: define @test_vlseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -2189,8 +2323,10 @@ entry: define @test_vlseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -2215,8 +2351,10 @@ entry: define @test_vlseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -2241,8 +2379,10 @@ entry: define @test_vlseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -2267,8 +2407,10 @@ entry: define @test_vlseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 40 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -2293,8 +2435,10 @@ entry: define @test_vlseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 48 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -2319,8 +2463,10 @@ entry: define @test_vlseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 56 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -2345,8 +2491,10 @@ entry: define @test_vlseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -2495,8 +2643,10 @@ entry: define @test_vlseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2520,8 +2670,10 @@ entry: define @test_vlseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2545,8 +2697,10 @@ entry: define @test_vlseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2570,8 +2724,10 @@ entry: define @test_vlseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2595,8 +2751,10 @@ entry: define @test_vlseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2620,8 +2778,10 @@ entry: define @test_vlseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2645,8 +2805,10 @@ entry: define @test_vlseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2670,8 +2832,10 @@ entry: define @test_vlseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2695,8 +2859,10 @@ entry: define @test_vlseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2720,8 +2886,10 @@ entry: define @test_vlseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2745,8 +2913,10 @@ entry: define @test_vlseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2770,8 +2940,10 @@ entry: define @test_vlseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2795,8 +2967,10 @@ entry: define @test_vlseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2820,8 +2994,10 @@ entry: define @test_vlseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2845,8 +3021,10 @@ entry: define @test_vlseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2870,8 +3048,10 @@ entry: define @test_vlseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2895,8 +3075,10 @@ entry: define @test_vlseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2920,8 +3102,10 @@ entry: define @test_vlseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2945,8 +3129,10 @@ entry: define @test_vlseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -2970,8 +3156,10 @@ entry: define @test_vlseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -3095,8 +3283,10 @@ entry: define @test_vlseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3120,8 +3310,10 @@ entry: define @test_vlseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3145,8 +3337,10 @@ entry: define @test_vlseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3170,8 +3364,10 @@ entry: define @test_vlseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3195,8 +3391,10 @@ entry: define @test_vlseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3220,8 +3418,10 @@ entry: define @test_vlseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3245,8 +3445,10 @@ entry: define @test_vlseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 20 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3270,8 +3472,10 @@ entry: define @test_vlseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 20 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3295,8 +3499,10 @@ entry: define @test_vlseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3320,8 +3526,10 @@ entry: define @test_vlseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3345,8 +3553,10 @@ entry: define @test_vlseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 28 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3370,8 +3580,10 @@ entry: define @test_vlseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 28 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3395,8 +3607,10 @@ entry: define @test_vlseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3420,8 +3634,10 @@ entry: define @test_vlseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 5) @@ -3520,8 +3736,10 @@ entry: define @test_vlseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -3545,8 +3763,10 @@ entry: define @test_vlseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -3570,8 +3790,10 @@ entry: define @test_vlseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -3595,8 +3817,10 @@ entry: define @test_vlseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma -; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -3620,8 +3844,10 @@ entry: define @test_vlseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 40 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -3645,8 +3871,10 @@ entry: define @test_vlseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 48 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -3670,8 +3898,10 @@ entry: define @test_vlseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 56 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -3695,8 +3925,10 @@ entry: define @test_vlseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 8 +; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 6) @@ -3845,8 +4077,10 @@ entry: define @test_vlseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -3870,8 +4104,10 @@ entry: define @test_vlseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -3895,8 +4131,10 @@ entry: define @test_vlseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -3920,8 +4158,10 @@ entry: define @test_vlseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -3945,8 +4185,10 @@ entry: define @test_vlseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -3970,8 +4212,10 @@ entry: define @test_vlseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -3995,8 +4239,10 @@ entry: define @test_vlseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -4020,8 +4266,10 @@ entry: define @test_vlseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -4045,8 +4293,10 @@ entry: define @test_vlseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -4070,8 +4320,10 @@ entry: define @test_vlseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -4095,8 +4347,10 @@ entry: define @test_vlseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -4120,8 +4374,10 @@ entry: define @test_vlseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -4145,8 +4401,10 @@ entry: define @test_vlseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -4170,8 +4428,10 @@ entry: define @test_vlseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -4195,8 +4455,10 @@ entry: define @test_vlseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -4220,8 +4482,10 @@ entry: define @test_vlseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -4245,8 +4509,10 @@ entry: define @test_vlseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 14 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -4270,8 +4536,10 @@ entry: define @test_vlseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -4295,8 +4563,10 @@ entry: define @test_vlseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) @@ -4320,8 +4590,10 @@ entry: define @test_vlseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8) undef, ptr %base, %mask, i64 %vl, i64 1, i64 4) diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll index 23c0c826e85e3..2afb72fc71b39 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll @@ -674,16 +674,20 @@ define @load_factor2_oneactive(ptr %ptr, i32 %evl) { define @load_factor5_oneactive(ptr %ptr, i32 %evl) { ; RV32-LABEL: load_factor5_oneactive: ; RV32: # %bb.0: +; RV32-NEXT: addi a0, a0, 12 +; RV32-NEXT: li a2, 20 ; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV32-NEXT: vlseg5e32.v v5, (a0) +; RV32-NEXT: vlse32.v v8, (a0), a2 ; RV32-NEXT: ret ; ; RV64-LABEL: load_factor5_oneactive: ; RV64: # %bb.0: ; RV64-NEXT: slli a1, a1, 32 +; RV64-NEXT: addi a0, a0, 12 ; RV64-NEXT: srli a1, a1, 32 +; RV64-NEXT: li a2, 20 ; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV64-NEXT: vlseg5e32.v v5, (a0) +; RV64-NEXT: vlse32.v v8, (a0), a2 ; RV64-NEXT: ret %rvl = mul nuw i32 %evl, 5 %wide.masked.load = call @llvm.vp.load(ptr %ptr, splat (i1 true), i32 %rvl) From 0e1e8e5232aaa78e4c4d9ccb744a7ca586f44f0c Mon Sep 17 00:00:00 2001 From: Philip Reames Date: Tue, 22 Jul 2025 11:29:03 -0700 Subject: [PATCH 2/4] Address review comment --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 1e05030715398..dce23f06af078 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -20809,8 +20809,8 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, cast(Tuple)->getMemoryVT(), cast(Tuple)->getMemOperand()); - SDValue Chain = Result.getValue(1); - return DAG.getMergeValues({Result, Chain}, DL); + DAG.ReplaceAllUsesOfValueWith(Tuple.getValue(1), Result.getValue(1)); + return Result.getValue(0); } } From d2e7f2fe62caf5743db2eaff48b72d294f7e1c35 Mon Sep 17 00:00:00 2001 From: Philip Reames Date: Fri, 25 Jul 2025 11:35:11 -0700 Subject: [PATCH 3/4] Address review comments --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 58 ++++++++++++--------- 1 file changed, 33 insertions(+), 25 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index d6728d65e5e91..373d93fbd6f03 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -20831,42 +20831,50 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, unsigned NF = 0; switch (Tuple.getConstantOperandVal(1)) { - default: break; - case Intrinsic::riscv_vlseg2_mask: NF = 2; break; - case Intrinsic::riscv_vlseg3_mask: NF = 3; break; - case Intrinsic::riscv_vlseg4_mask: NF = 4; break; - case Intrinsic::riscv_vlseg5_mask: NF = 5; break; - case Intrinsic::riscv_vlseg6_mask: NF = 6; break; - case Intrinsic::riscv_vlseg7_mask: NF = 7; break; - case Intrinsic::riscv_vlseg8_mask: NF = 8; break; + default: + break; + case Intrinsic::riscv_vlseg2_mask: + case Intrinsic::riscv_vlseg3_mask: + case Intrinsic::riscv_vlseg4_mask: + case Intrinsic::riscv_vlseg5_mask: + case Intrinsic::riscv_vlseg6_mask: + case Intrinsic::riscv_vlseg7_mask: + case Intrinsic::riscv_vlseg8_mask: + NF = Tuple.getValueType().getRISCVVectorTupleNumFields(); + break; } + if (!NF || Subtarget.hasOptimizedSegmentLoadStore(NF)) break; - // @REVIEWERS - What's the right value to use for the mem size here? unsigned SEW = VT.getScalarSizeInBits(); if (Log2_64(SEW) != Tuple.getConstantOperandVal(7)) break; - unsigned Stride = SEW/8 * NF; - SDValue Offset = DAG.getConstant(SEW/8 * Idx, DL, XLenVT); + unsigned Stride = SEW / 8 * NF; + unsigned Offset = SEW / 8 * Idx; SDValue Ops[] = { - /*Chain=*/Tuple.getOperand(0), - /*IntID=*/DAG.getTargetConstant(Intrinsic::riscv_vlse_mask, DL, XLenVT), - /*Passthru=*/Tuple.getOperand(2), - /*Ptr=*/DAG.getNode(ISD::ADD, DL, XLenVT, Tuple.getOperand(3), Offset), - /*Stride=*/DAG.getConstant(Stride, DL, XLenVT), - /*Mask=*/Tuple.getOperand(4), - /*VL=*/Tuple.getOperand(5), - /*Policy=*/Tuple.getOperand(6) - }; + /*Chain=*/Tuple.getOperand(0), + /*IntID=*/DAG.getTargetConstant(Intrinsic::riscv_vlse_mask, DL, XLenVT), + /*Passthru=*/Tuple.getOperand(2), + /*Ptr=*/ + DAG.getNode(ISD::ADD, DL, XLenVT, Tuple.getOperand(3), + DAG.getConstant(Offset, DL, XLenVT)), + /*Stride=*/DAG.getConstant(Stride, DL, XLenVT), + /*Mask=*/Tuple.getOperand(4), + /*VL=*/Tuple.getOperand(5), + /*Policy=*/Tuple.getOperand(6)}; + + auto TupleMemSD = cast(Tuple); + // Match getTgtMemIntrinsic for non-unit stride case + EVT MemVT = TupleMemSD->getMemoryVT().getScalarType(); + MachineFunction &MF = DAG.getMachineFunction(); + MachineMemOperand *MMO = MF.getMachineMemOperand( + TupleMemSD->getMemOperand(), Offset, MemoryLocation::UnknownSize); SDVTList VTs = DAG.getVTList({VT, MVT::Other}); - // @REVIEWERS - What's the right MemVT and MMO to use here? - SDValue Result = - DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, - cast(Tuple)->getMemoryVT(), - cast(Tuple)->getMemOperand()); + SDValue Result = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, + Ops, MemVT, MMO); DAG.ReplaceAllUsesOfValueWith(Tuple.getValue(1), Result.getValue(1)); return Result.getValue(0); } From 73576588e0800cbcb4b3e502404ae9e9ea425e66 Mon Sep 17 00:00:00 2001 From: Philip Reames Date: Fri, 25 Jul 2025 11:41:15 -0700 Subject: [PATCH 4/4] Turn a check into an assert --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 373d93fbd6f03..9c63410fa919a 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -20848,8 +20848,8 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, break; unsigned SEW = VT.getScalarSizeInBits(); - if (Log2_64(SEW) != Tuple.getConstantOperandVal(7)) - break; + assert(Log2_64(SEW) == Tuple.getConstantOperandVal(7) && + "Type mismatch without bitcast?"); unsigned Stride = SEW / 8 * NF; unsigned Offset = SEW / 8 * Idx;