diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 870e393b40411..0f952384549ed 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -40,6 +40,12 @@ using namespace llvm; STATISTIC(NumInsertedVSETVL, "Number of VSETVL inst inserted"); STATISTIC(NumCoalescedVSETVL, "Number of VSETVL inst coalesced"); +static cl::opt EnsureWholeVectorRegisterMoveValidVTYPE( + DEBUG_TYPE "-whole-vector-register-move-valid-vtype", cl::Hidden, + cl::desc("Insert vsetvlis before vmvNr.vs to ensure vtype is valid and " + "vill is cleared"), + cl::init(true)); + namespace { /// Given a virtual register \p Reg, return the corresponding VNInfo for it. @@ -195,6 +201,14 @@ static bool hasUndefinedPassthru(const MachineInstr &MI) { return UseMO.getReg() == RISCV::NoRegister || UseMO.isUndef(); } +/// Return true if \p MI is a copy that will be lowered to one or more vmvNr.vs. +static bool isVectorCopy(const TargetRegisterInfo *TRI, + const MachineInstr &MI) { + return MI.isCopy() && MI.getOperand(0).getReg().isPhysical() && + RISCVRegisterInfo::isRVVRegClass( + TRI->getMinimalPhysRegClass(MI.getOperand(0).getReg())); +} + /// Which subfields of VL or VTYPE have values we need to preserve? struct DemandedFields { // Some unknown property of VL is used. If demanded, must preserve entire @@ -221,10 +235,13 @@ struct DemandedFields { bool SEWLMULRatio = false; bool TailPolicy = false; bool MaskPolicy = false; + // If this is true, we demand that VTYPE is set to some legal state, i.e. that + // vill is unset. + bool VILL = false; // Return true if any part of VTYPE was used bool usedVTYPE() const { - return SEW || LMUL || SEWLMULRatio || TailPolicy || MaskPolicy; + return SEW || LMUL || SEWLMULRatio || TailPolicy || MaskPolicy || VILL; } // Return true if any property of VL was used @@ -239,6 +256,7 @@ struct DemandedFields { SEWLMULRatio = true; TailPolicy = true; MaskPolicy = true; + VILL = true; } // Mark all VL properties as demanded @@ -263,6 +281,7 @@ struct DemandedFields { SEWLMULRatio |= B.SEWLMULRatio; TailPolicy |= B.TailPolicy; MaskPolicy |= B.MaskPolicy; + VILL |= B.VILL; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -308,7 +327,8 @@ struct DemandedFields { OS << ", "; OS << "SEWLMULRatio=" << SEWLMULRatio << ", "; OS << "TailPolicy=" << TailPolicy << ", "; - OS << "MaskPolicy=" << MaskPolicy; + OS << "MaskPolicy=" << MaskPolicy << ", "; + OS << "VILL=" << VILL; OS << "}"; } #endif @@ -503,6 +523,21 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) { } } + // In ยง32.16.6, whole vector register moves have a dependency on SEW. At the + // MIR level though we don't encode the element type, and it gives the same + // result whatever the SEW may be. + // + // However it does need valid SEW, i.e. vill must be cleared. The entry to a + // function, calls and inline assembly may all set it, so make sure we clear + // it for whole register copies. Do this by leaving VILL demanded. + if (isVectorCopy(ST->getRegisterInfo(), MI)) { + Res.LMUL = DemandedFields::LMULNone; + Res.SEW = DemandedFields::SEWNone; + Res.SEWLMULRatio = false; + Res.TailPolicy = false; + Res.MaskPolicy = false; + } + return Res; } @@ -1208,6 +1243,18 @@ static VSETVLIInfo adjustIncoming(VSETVLIInfo PrevInfo, VSETVLIInfo NewInfo, // legal for MI, but may not be the state requested by MI. void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info, const MachineInstr &MI) const { + if (isVectorCopy(ST->getRegisterInfo(), MI) && + (Info.isUnknown() || !Info.isValid() || Info.hasSEWLMULRatioOnly())) { + // Use an arbitrary but valid AVL and VTYPE so vill will be cleared. It may + // be coalesced into another vsetvli since we won't demand any fields. + VSETVLIInfo NewInfo; // Need a new VSETVLIInfo to clear SEWLMULRatioOnly + NewInfo.setAVLImm(1); + NewInfo.setVTYPE(RISCVII::VLMUL::LMUL_1, /*sew*/ 8, /*ta*/ true, + /*ma*/ true); + Info = NewInfo; + return; + } + if (!RISCVII::hasSEWOp(MI.getDesc().TSFlags)) return; @@ -1296,7 +1343,8 @@ bool RISCVInsertVSETVLI::computeVLVTYPEChanges(const MachineBasicBlock &MBB, for (const MachineInstr &MI : MBB) { transferBefore(Info, MI); - if (isVectorConfigInstr(MI) || RISCVII::hasSEWOp(MI.getDesc().TSFlags)) + if (isVectorConfigInstr(MI) || RISCVII::hasSEWOp(MI.getDesc().TSFlags) || + isVectorCopy(ST->getRegisterInfo(), MI)) HadVectorOp = true; transferAfter(Info, MI); @@ -1426,6 +1474,16 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) { PrefixTransparent = false; } + if (EnsureWholeVectorRegisterMoveValidVTYPE && + isVectorCopy(ST->getRegisterInfo(), MI)) { + if (!PrevInfo.isCompatible(DemandedFields::all(), CurInfo, LIS)) { + insertVSETVLI(MBB, MI, MI.getDebugLoc(), CurInfo, PrevInfo); + PrefixTransparent = false; + } + MI.addOperand(MachineOperand::CreateReg(RISCV::VTYPE, /*isDef*/ false, + /*isImp*/ true)); + } + uint64_t TSFlags = MI.getDesc().TSFlags; if (RISCVII::hasSEWOp(TSFlags)) { if (!PrevInfo.isCompatible(DemandedFields::all(), CurInfo, LIS)) { diff --git a/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll b/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll index c04e4fea7b2c2..64957ed6e4ba6 100644 --- a/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll +++ b/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll @@ -45,6 +45,7 @@ define @constraint_vd( %0, define @constraint_vm( %0, %1) nounwind { ; RV32I-LABEL: constraint_vm: ; RV32I: # %bb.0: +; RV32I-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32I-NEXT: vmv1r.v v9, v0 ; RV32I-NEXT: vmv1r.v v0, v8 ; RV32I-NEXT: #APP @@ -54,6 +55,7 @@ define @constraint_vm( %0, ; ; RV64I-LABEL: constraint_vm: ; RV64I: # %bb.0: +; RV64I-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64I-NEXT: vmv1r.v v9, v0 ; RV64I-NEXT: vmv1r.v v0, v8 ; RV64I-NEXT: #APP diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll index 163d9145bc362..ee0016ec080e2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll @@ -567,6 +567,7 @@ define @vp_abs_nxv16i64( %va, @vp_abs_nxv16i64( %va, @vp_bitreverse_nxv64i16( %va, @vp_bitreverse_nxv64i16( %va, @vp_bitreverse_nxv64i16( %va, @vp_bswap_nxv64i16( %va, @vp_bswap_nxv64i16( %va, @vp_bswap_nxv64i16( %va, @ret_nxv32i32_call_nxv32i32_nxv32i32_i32( @ret_nxv32i32_call_nxv32i32_nxv32i32_i32( @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_ ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 128 ; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv8r.v v16, v0 ; RV32-NEXT: call ext3 ; RV32-NEXT: addi sp, s0, -144 @@ -523,6 +526,7 @@ define fastcc @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_ ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 128 ; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-NEXT: vmv8r.v v16, v0 ; RV64-NEXT: call ext3 ; RV64-NEXT: addi sp, s0, -144 diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll index 9b27116fef7ca..2e181e0914c88 100644 --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll @@ -103,6 +103,7 @@ define target("riscv.vector.tuple", , 2) @caller_tuple_return( ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: call callee_tuple_return +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv2r.v v6, v8 ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: vmv2r.v v10, v6 @@ -119,6 +120,7 @@ define target("riscv.vector.tuple", , 2) @caller_tuple_return( ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call callee_tuple_return +; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-NEXT: vmv2r.v v6, v8 ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: vmv2r.v v10, v6 @@ -144,6 +146,7 @@ define void @caller_tuple_argument(target("riscv.vector.tuple", @llvm.vp.ceil.nxv4bf16(, @vp_ceil_vv_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv4bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -169,8 +169,8 @@ declare @llvm.vp.ceil.nxv8bf16(, @vp_ceil_vv_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv8bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -221,8 +221,8 @@ declare @llvm.vp.ceil.nxv16bf16(, < define @vp_ceil_vv_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv16bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -279,9 +279,9 @@ define @vp_ceil_vv_nxv32bf16( %va, ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 ; CHECK-NEXT: lui a3, 307200 ; CHECK-NEXT: slli a1, a2, 1 @@ -582,8 +582,8 @@ define @vp_ceil_vv_nxv4f16( %va, @llvm.vp.ceil.nxv8f16(, @vp_ceil_vv_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI18_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -668,8 +668,8 @@ define @vp_ceil_vv_nxv8f16( %va, @llvm.vp.ceil.nxv16f16(, @vp_ceil_vv_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v12, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI20_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vmv1r.v v12, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0) ; ZVFH-NEXT: vfabs.v v16, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -754,8 +754,8 @@ define @vp_ceil_vv_nxv16f16( %va, @llvm.vp.ceil.nxv32f16(, @vp_ceil_vv_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v16, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI22_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vmv1r.v v16, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI22_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0) ; ZVFH-NEXT: vfabs.v v24, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -846,9 +846,9 @@ define @vp_ceil_vv_nxv32f16( %va, @llvm.vp.ceil.nxv4f32(, @vp_ceil_vv_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1112,8 +1112,8 @@ declare @llvm.vp.ceil.nxv8f32(, @vp_ceil_vv_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1156,8 +1156,8 @@ declare @llvm.vp.ceil.nxv16f32(, @vp_ceil_vv_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1242,10 +1242,10 @@ declare @llvm.vp.ceil.nxv2f64(, @vp_ceil_vv_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI36_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -1286,10 +1286,10 @@ declare @llvm.vp.ceil.nxv4f64(, @vp_ceil_vv_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI38_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -1330,10 +1330,10 @@ declare @llvm.vp.ceil.nxv7f64(, @vp_ceil_vv_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI40_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1374,10 +1374,10 @@ declare @llvm.vp.ceil.nxv8f64(, @vp_ceil_vv_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI42_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1425,13 +1425,13 @@ define @vp_ceil_vv_nxv16f64( %va, < ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: lui a2, %hi(.LCPI44_0) ; CHECK-NEXT: srli a3, a1, 3 ; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2) ; CHECK-NEXT: sub a2, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v6, v0, a3 ; CHECK-NEXT: sltu a3, a0, a2 ; CHECK-NEXT: addi a3, a3, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll index a407cd048ffe3..08bf82d4bc796 100644 --- a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll @@ -197,9 +197,9 @@ entry: define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data) { ; RV64-LABEL: test_compresstore_v256i8: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv1r.v v7, v8 ; RV64-NEXT: li a2, 128 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vi v9, v0, 1 ; RV64-NEXT: vmv.x.s a3, v0 ; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma @@ -230,9 +230,9 @@ define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: sub sp, sp, a2 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv8r.v v24, v16 ; RV32-NEXT: li a2, 128 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslidedown.vi v9, v0, 1 ; RV32-NEXT: li a3, 32 ; RV32-NEXT: vmv.x.s a4, v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll index ad176df71397e..f6c26bbba89fe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll @@ -18,11 +18,11 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lanes.b, <4 x i1> %sel) { ; RV32-LABEL: constant_folding_crash: ; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV32-NEXT: vmv1r.v v10, v0 ; RV32-NEXT: lw a0, 8(a0) ; RV32-NEXT: andi a0, a0, 1 ; RV32-NEXT: seqz a0, a0 -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV32-NEXT: vmv.v.x v11, a0 ; RV32-NEXT: vmsne.vi v0, v11, 0 ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma @@ -43,11 +43,11 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan ; ; RV64-LABEL: constant_folding_crash: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64-NEXT: vmv1r.v v12, v0 ; RV64-NEXT: ld a0, 8(a0) ; RV64-NEXT: andi a0, a0, 1 ; RV64-NEXT: seqz a0, a0 -; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64-NEXT: vmv.v.x v13, a0 ; RV64-NEXT: vmsne.vi v0, v13, 0 ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll index f56a792fdef6a..ce4bc48dff042 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll @@ -1235,13 +1235,13 @@ declare @llvm.vp.ctlz.nxv16i64(, i1 immar define @vp_ctlz_nxv16i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv16i64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: fsrmi a4, 1 ; CHECK-NEXT: li a2, 52 ; CHECK-NEXT: srli a3, a1, 3 ; CHECK-NEXT: sub a5, a0, a1 -; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a3 ; CHECK-NEXT: sltu a3, a0, a5 ; CHECK-NEXT: addi a3, a3, -1 @@ -1270,11 +1270,11 @@ define @vp_ctlz_nxv16i64( %va, @vp_ctlz_zero_undef_nxv8i64_unmasked( @vp_ctlz_zero_undef_nxv16i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: fsrmi a3, 1 ; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: sub a4, a0, a1 -; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sltu a2, a0, a4 ; CHECK-NEXT: addi a2, a2, -1 @@ -2497,11 +2497,11 @@ define @vp_ctlz_zero_undef_nxv16i64( %va, ; ; CHECK-ZVBB-LABEL: vp_ctlz_zero_undef_nxv16i64: ; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-ZVBB-NEXT: vmv1r.v v24, v0 ; CHECK-ZVBB-NEXT: csrr a1, vlenb ; CHECK-ZVBB-NEXT: srli a2, a1, 3 ; CHECK-ZVBB-NEXT: sub a3, a0, a1 -; CHECK-ZVBB-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-ZVBB-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-ZVBB-NEXT: sltu a2, a0, a3 ; CHECK-ZVBB-NEXT: addi a2, a2, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll index 9e75dc9dccffd..52ddd9ab2f832 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll @@ -2022,6 +2022,7 @@ define @vp_ctpop_nxv16i64( %va, @vp_ctpop_nxv16i64( %va, @vp_ctpop_nxv16i64( %va, @vp_cttz_nxv16i64( %va, @vp_cttz_nxv16i64( %va, @vp_cttz_nxv16i64( %va, @vp_cttz_nxv16i64( %va, @vp_cttz_nxv16i64( %va, @vp_cttz_zero_undef_nxv16i64( %va, ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -4012,7 +4013,6 @@ define @vp_cttz_zero_undef_nxv16i64( %va, ; CHECK-NEXT: fsrmi a3, 1 ; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: sub a4, a0, a1 -; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sltu a2, a0, a4 ; CHECK-NEXT: addi a2, a2, -1 @@ -4057,11 +4057,11 @@ define @vp_cttz_zero_undef_nxv16i64( %va, ; ; CHECK-ZVBB-LABEL: vp_cttz_zero_undef_nxv16i64: ; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-ZVBB-NEXT: vmv1r.v v24, v0 ; CHECK-ZVBB-NEXT: csrr a1, vlenb ; CHECK-ZVBB-NEXT: srli a2, a1, 3 ; CHECK-ZVBB-NEXT: sub a3, a0, a1 -; CHECK-ZVBB-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-ZVBB-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-ZVBB-NEXT: sltu a2, a0, a3 ; CHECK-ZVBB-NEXT: addi a2, a2, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll index b32d85bb1943a..a35cf14203f78 100644 --- a/llvm/test/CodeGen/RISCV/rvv/expandload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll @@ -227,9 +227,9 @@ define <256 x i8> @test_expandload_v256i8(ptr %base, <256 x i1> %mask, <256 x i8 ; CHECK-RV32-NEXT: add a2, sp, a2 ; CHECK-RV32-NEXT: addi a2, a2, 16 ; CHECK-RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-RV32-NEXT: vmv1r.v v7, v8 ; CHECK-RV32-NEXT: li a2, 128 -; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-RV32-NEXT: vslidedown.vi v9, v0, 1 ; CHECK-RV32-NEXT: li a3, 32 ; CHECK-RV32-NEXT: vmv.x.s a4, v0 @@ -338,9 +338,9 @@ define <256 x i8> @test_expandload_v256i8(ptr %base, <256 x i1> %mask, <256 x i8 ; CHECK-RV64-NEXT: add a2, sp, a2 ; CHECK-RV64-NEXT: addi a2, a2, 16 ; CHECK-RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-RV64-NEXT: vmv1r.v v7, v8 ; CHECK-RV64-NEXT: li a2, 128 -; CHECK-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-RV64-NEXT: vslidedown.vi v9, v0, 1 ; CHECK-RV64-NEXT: vmv.x.s a3, v0 ; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma @@ -1626,8 +1626,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: bgez a1, .LBB61_30 ; CHECK-RV32-NEXT: .LBB61_29: # %cond.load109 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 29, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 28 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -1639,8 +1639,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: bgez a2, .LBB61_32 ; CHECK-RV32-NEXT: # %bb.31: # %cond.load113 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 30, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a2 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 29 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -1786,9 +1786,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: bgez a3, .LBB61_66 ; CHECK-RV32-NEXT: .LBB61_65: # %cond.load241 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 62 ; CHECK-RV32-NEXT: li a4, 61 @@ -1939,9 +1938,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: bgez a2, .LBB61_100 ; CHECK-RV32-NEXT: .LBB61_99: # %cond.load369 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 94 ; CHECK-RV32-NEXT: li a4, 93 @@ -2092,9 +2090,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: bgez a3, .LBB61_134 ; CHECK-RV32-NEXT: .LBB61_133: # %cond.load497 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 126 ; CHECK-RV32-NEXT: li a4, 125 @@ -2245,9 +2242,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: bgez a2, .LBB61_168 ; CHECK-RV32-NEXT: .LBB61_167: # %cond.load625 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 158 ; CHECK-RV32-NEXT: li a4, 157 @@ -2398,9 +2394,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: bgez a3, .LBB61_202 ; CHECK-RV32-NEXT: .LBB61_201: # %cond.load753 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 190 ; CHECK-RV32-NEXT: li a4, 189 @@ -2551,9 +2546,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: bgez a2, .LBB61_236 ; CHECK-RV32-NEXT: .LBB61_235: # %cond.load881 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 222 ; CHECK-RV32-NEXT: li a4, 221 @@ -2704,9 +2698,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: bgez a3, .LBB61_270 ; CHECK-RV32-NEXT: .LBB61_269: # %cond.load1009 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 254 ; CHECK-RV32-NEXT: li a4, 253 @@ -3907,10 +3900,9 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_2 ; CHECK-RV32-NEXT: .LBB61_545: # %cond.load1 ; CHECK-RV32-NEXT: lbu a1, 0(a0) +; CHECK-RV32-NEXT: vsetivli zero, 2, e8, m1, tu, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a1 -; CHECK-RV32-NEXT: vsetivli zero, 2, e8, m1, tu, ma ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 1 ; CHECK-RV32-NEXT: addi a0, a0, 1 ; CHECK-RV32-NEXT: vmv1r.v v16, v8 @@ -3920,8 +3912,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_3 ; CHECK-RV32-NEXT: .LBB61_546: # %cond.load5 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 3, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 2 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -3932,8 +3924,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_4 ; CHECK-RV32-NEXT: .LBB61_547: # %cond.load9 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 4, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 3 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -3944,8 +3936,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_5 ; CHECK-RV32-NEXT: .LBB61_548: # %cond.load13 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 5, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 4 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -3956,8 +3948,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_6 ; CHECK-RV32-NEXT: .LBB61_549: # %cond.load17 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 6, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 5 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -3968,8 +3960,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_7 ; CHECK-RV32-NEXT: .LBB61_550: # %cond.load21 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 7, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 6 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -3980,8 +3972,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_8 ; CHECK-RV32-NEXT: .LBB61_551: # %cond.load25 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 8, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 7 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -3992,8 +3984,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_9 ; CHECK-RV32-NEXT: .LBB61_552: # %cond.load29 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 9, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 8 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4004,8 +3996,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_10 ; CHECK-RV32-NEXT: .LBB61_553: # %cond.load33 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 10, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 9 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4016,8 +4008,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_11 ; CHECK-RV32-NEXT: .LBB61_554: # %cond.load37 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 11, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 10 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4028,8 +4020,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_12 ; CHECK-RV32-NEXT: .LBB61_555: # %cond.load41 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 12, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 11 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4040,8 +4032,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_13 ; CHECK-RV32-NEXT: .LBB61_556: # %cond.load45 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 13, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 12 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4052,8 +4044,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_14 ; CHECK-RV32-NEXT: .LBB61_557: # %cond.load49 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 14, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 13 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4064,8 +4056,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_15 ; CHECK-RV32-NEXT: .LBB61_558: # %cond.load53 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 15, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 14 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4076,8 +4068,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_16 ; CHECK-RV32-NEXT: .LBB61_559: # %cond.load57 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 16, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 15 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4088,8 +4080,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_17 ; CHECK-RV32-NEXT: .LBB61_560: # %cond.load61 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 17, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 16 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4100,8 +4092,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_18 ; CHECK-RV32-NEXT: .LBB61_561: # %cond.load65 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 18, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 17 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4112,8 +4104,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_19 ; CHECK-RV32-NEXT: .LBB61_562: # %cond.load69 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 19, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 18 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4124,8 +4116,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_20 ; CHECK-RV32-NEXT: .LBB61_563: # %cond.load73 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 20, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 19 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4136,8 +4128,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_21 ; CHECK-RV32-NEXT: .LBB61_564: # %cond.load77 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 21, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 20 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4148,8 +4140,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_22 ; CHECK-RV32-NEXT: .LBB61_565: # %cond.load81 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 22, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 21 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4160,8 +4152,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_23 ; CHECK-RV32-NEXT: .LBB61_566: # %cond.load85 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 23, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 22 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4172,8 +4164,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_24 ; CHECK-RV32-NEXT: .LBB61_567: # %cond.load89 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 24, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 23 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4184,8 +4176,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_25 ; CHECK-RV32-NEXT: .LBB61_568: # %cond.load93 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 25, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 24 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4196,8 +4188,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_26 ; CHECK-RV32-NEXT: .LBB61_569: # %cond.load97 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 26, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 25 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4208,8 +4200,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_27 ; CHECK-RV32-NEXT: .LBB61_570: # %cond.load101 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 27, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 26 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4220,8 +4212,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_28 ; CHECK-RV32-NEXT: .LBB61_571: # %cond.load105 ; CHECK-RV32-NEXT: lbu a1, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vsetivli zero, 28, e8, m1, tu, ma +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a1 ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 27 ; CHECK-RV32-NEXT: addi a0, a0, 1 @@ -4247,9 +4239,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_36 ; CHECK-RV32-NEXT: .LBB61_573: # %cond.load125 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 33 ; CHECK-RV32-NEXT: li a4, 32 @@ -4263,9 +4254,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_37 ; CHECK-RV32-NEXT: .LBB61_574: # %cond.load129 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 34 ; CHECK-RV32-NEXT: li a4, 33 @@ -4279,9 +4269,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_38 ; CHECK-RV32-NEXT: .LBB61_575: # %cond.load133 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 35 ; CHECK-RV32-NEXT: li a4, 34 @@ -4295,9 +4284,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_39 ; CHECK-RV32-NEXT: .LBB61_576: # %cond.load137 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 36 ; CHECK-RV32-NEXT: li a4, 35 @@ -4311,9 +4299,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_40 ; CHECK-RV32-NEXT: .LBB61_577: # %cond.load141 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 37 ; CHECK-RV32-NEXT: li a4, 36 @@ -4327,9 +4314,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_41 ; CHECK-RV32-NEXT: .LBB61_578: # %cond.load145 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 38 ; CHECK-RV32-NEXT: li a4, 37 @@ -4343,9 +4329,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_42 ; CHECK-RV32-NEXT: .LBB61_579: # %cond.load149 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 39 ; CHECK-RV32-NEXT: li a4, 38 @@ -4359,9 +4344,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_43 ; CHECK-RV32-NEXT: .LBB61_580: # %cond.load153 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 40 ; CHECK-RV32-NEXT: li a4, 39 @@ -4375,9 +4359,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_44 ; CHECK-RV32-NEXT: .LBB61_581: # %cond.load157 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 41 ; CHECK-RV32-NEXT: li a4, 40 @@ -4391,9 +4374,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_45 ; CHECK-RV32-NEXT: .LBB61_582: # %cond.load161 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 42 ; CHECK-RV32-NEXT: li a4, 41 @@ -4407,9 +4389,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_46 ; CHECK-RV32-NEXT: .LBB61_583: # %cond.load165 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 43 ; CHECK-RV32-NEXT: li a4, 42 @@ -4423,9 +4404,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_47 ; CHECK-RV32-NEXT: .LBB61_584: # %cond.load169 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 44 ; CHECK-RV32-NEXT: li a4, 43 @@ -4439,9 +4419,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_48 ; CHECK-RV32-NEXT: .LBB61_585: # %cond.load173 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 45 ; CHECK-RV32-NEXT: li a4, 44 @@ -4455,9 +4434,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_49 ; CHECK-RV32-NEXT: .LBB61_586: # %cond.load177 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 46 ; CHECK-RV32-NEXT: li a4, 45 @@ -4471,9 +4449,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_50 ; CHECK-RV32-NEXT: .LBB61_587: # %cond.load181 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 47 ; CHECK-RV32-NEXT: li a4, 46 @@ -4487,9 +4464,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_51 ; CHECK-RV32-NEXT: .LBB61_588: # %cond.load185 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 48 ; CHECK-RV32-NEXT: li a4, 47 @@ -4503,9 +4479,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_52 ; CHECK-RV32-NEXT: .LBB61_589: # %cond.load189 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 49 ; CHECK-RV32-NEXT: li a4, 48 @@ -4519,9 +4494,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_53 ; CHECK-RV32-NEXT: .LBB61_590: # %cond.load193 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 50 ; CHECK-RV32-NEXT: li a4, 49 @@ -4535,9 +4509,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_54 ; CHECK-RV32-NEXT: .LBB61_591: # %cond.load197 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 51 ; CHECK-RV32-NEXT: li a4, 50 @@ -4551,9 +4524,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_55 ; CHECK-RV32-NEXT: .LBB61_592: # %cond.load201 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 52 ; CHECK-RV32-NEXT: li a4, 51 @@ -4567,9 +4539,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_56 ; CHECK-RV32-NEXT: .LBB61_593: # %cond.load205 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 53 ; CHECK-RV32-NEXT: li a4, 52 @@ -4583,9 +4554,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_57 ; CHECK-RV32-NEXT: .LBB61_594: # %cond.load209 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 54 ; CHECK-RV32-NEXT: li a4, 53 @@ -4599,9 +4569,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_58 ; CHECK-RV32-NEXT: .LBB61_595: # %cond.load213 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 55 ; CHECK-RV32-NEXT: li a4, 54 @@ -4615,9 +4584,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_59 ; CHECK-RV32-NEXT: .LBB61_596: # %cond.load217 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 56 ; CHECK-RV32-NEXT: li a4, 55 @@ -4631,9 +4599,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_60 ; CHECK-RV32-NEXT: .LBB61_597: # %cond.load221 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 57 ; CHECK-RV32-NEXT: li a4, 56 @@ -4647,9 +4614,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_61 ; CHECK-RV32-NEXT: .LBB61_598: # %cond.load225 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 58 ; CHECK-RV32-NEXT: li a4, 57 @@ -4663,9 +4629,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_62 ; CHECK-RV32-NEXT: .LBB61_599: # %cond.load229 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 59 ; CHECK-RV32-NEXT: li a4, 58 @@ -4679,9 +4644,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_63 ; CHECK-RV32-NEXT: .LBB61_600: # %cond.load233 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 60 ; CHECK-RV32-NEXT: li a4, 59 @@ -4695,9 +4659,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_64 ; CHECK-RV32-NEXT: .LBB61_601: # %cond.load237 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v9, a3 ; CHECK-RV32-NEXT: li a3, 61 ; CHECK-RV32-NEXT: li a4, 60 @@ -4727,9 +4690,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_70 ; CHECK-RV32-NEXT: .LBB61_603: # %cond.load253 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 65 ; CHECK-RV32-NEXT: li a4, 64 @@ -4743,9 +4705,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_71 ; CHECK-RV32-NEXT: .LBB61_604: # %cond.load257 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 66 ; CHECK-RV32-NEXT: li a4, 65 @@ -4759,9 +4720,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_72 ; CHECK-RV32-NEXT: .LBB61_605: # %cond.load261 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 67 ; CHECK-RV32-NEXT: li a4, 66 @@ -4775,9 +4735,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_73 ; CHECK-RV32-NEXT: .LBB61_606: # %cond.load265 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 68 ; CHECK-RV32-NEXT: li a4, 67 @@ -4791,9 +4750,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_74 ; CHECK-RV32-NEXT: .LBB61_607: # %cond.load269 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 69 ; CHECK-RV32-NEXT: li a4, 68 @@ -4807,9 +4765,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_75 ; CHECK-RV32-NEXT: .LBB61_608: # %cond.load273 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 70 ; CHECK-RV32-NEXT: li a4, 69 @@ -4823,9 +4780,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_76 ; CHECK-RV32-NEXT: .LBB61_609: # %cond.load277 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 71 ; CHECK-RV32-NEXT: li a4, 70 @@ -4839,9 +4795,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_77 ; CHECK-RV32-NEXT: .LBB61_610: # %cond.load281 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 72 ; CHECK-RV32-NEXT: li a4, 71 @@ -4855,9 +4810,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_78 ; CHECK-RV32-NEXT: .LBB61_611: # %cond.load285 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 73 ; CHECK-RV32-NEXT: li a4, 72 @@ -4871,9 +4825,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_79 ; CHECK-RV32-NEXT: .LBB61_612: # %cond.load289 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 74 ; CHECK-RV32-NEXT: li a4, 73 @@ -4887,9 +4840,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_80 ; CHECK-RV32-NEXT: .LBB61_613: # %cond.load293 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 75 ; CHECK-RV32-NEXT: li a4, 74 @@ -4903,9 +4855,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_81 ; CHECK-RV32-NEXT: .LBB61_614: # %cond.load297 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 76 ; CHECK-RV32-NEXT: li a4, 75 @@ -4919,9 +4870,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_82 ; CHECK-RV32-NEXT: .LBB61_615: # %cond.load301 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 77 ; CHECK-RV32-NEXT: li a4, 76 @@ -4935,9 +4885,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_83 ; CHECK-RV32-NEXT: .LBB61_616: # %cond.load305 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 78 ; CHECK-RV32-NEXT: li a4, 77 @@ -4951,9 +4900,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_84 ; CHECK-RV32-NEXT: .LBB61_617: # %cond.load309 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 79 ; CHECK-RV32-NEXT: li a4, 78 @@ -4967,9 +4915,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_85 ; CHECK-RV32-NEXT: .LBB61_618: # %cond.load313 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 80 ; CHECK-RV32-NEXT: li a4, 79 @@ -4983,9 +4930,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_86 ; CHECK-RV32-NEXT: .LBB61_619: # %cond.load317 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 81 ; CHECK-RV32-NEXT: li a4, 80 @@ -4999,9 +4945,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_87 ; CHECK-RV32-NEXT: .LBB61_620: # %cond.load321 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 82 ; CHECK-RV32-NEXT: li a4, 81 @@ -5015,9 +4960,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_88 ; CHECK-RV32-NEXT: .LBB61_621: # %cond.load325 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 83 ; CHECK-RV32-NEXT: li a4, 82 @@ -5031,9 +4975,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_89 ; CHECK-RV32-NEXT: .LBB61_622: # %cond.load329 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 84 ; CHECK-RV32-NEXT: li a4, 83 @@ -5047,9 +4990,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_90 ; CHECK-RV32-NEXT: .LBB61_623: # %cond.load333 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 85 ; CHECK-RV32-NEXT: li a4, 84 @@ -5063,9 +5005,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_91 ; CHECK-RV32-NEXT: .LBB61_624: # %cond.load337 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 86 ; CHECK-RV32-NEXT: li a4, 85 @@ -5079,9 +5020,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_92 ; CHECK-RV32-NEXT: .LBB61_625: # %cond.load341 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 87 ; CHECK-RV32-NEXT: li a4, 86 @@ -5095,9 +5035,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_93 ; CHECK-RV32-NEXT: .LBB61_626: # %cond.load345 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 88 ; CHECK-RV32-NEXT: li a4, 87 @@ -5111,9 +5050,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_94 ; CHECK-RV32-NEXT: .LBB61_627: # %cond.load349 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 89 ; CHECK-RV32-NEXT: li a4, 88 @@ -5127,9 +5065,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_95 ; CHECK-RV32-NEXT: .LBB61_628: # %cond.load353 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 90 ; CHECK-RV32-NEXT: li a4, 89 @@ -5143,9 +5080,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_96 ; CHECK-RV32-NEXT: .LBB61_629: # %cond.load357 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 91 ; CHECK-RV32-NEXT: li a4, 90 @@ -5159,9 +5095,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_97 ; CHECK-RV32-NEXT: .LBB61_630: # %cond.load361 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 92 ; CHECK-RV32-NEXT: li a4, 91 @@ -5175,9 +5110,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_98 ; CHECK-RV32-NEXT: .LBB61_631: # %cond.load365 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a2 ; CHECK-RV32-NEXT: li a2, 93 ; CHECK-RV32-NEXT: li a4, 92 @@ -5207,9 +5141,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_104 ; CHECK-RV32-NEXT: .LBB61_633: # %cond.load381 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 97 ; CHECK-RV32-NEXT: li a4, 96 @@ -5223,9 +5156,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_105 ; CHECK-RV32-NEXT: .LBB61_634: # %cond.load385 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 98 ; CHECK-RV32-NEXT: li a4, 97 @@ -5239,9 +5171,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_106 ; CHECK-RV32-NEXT: .LBB61_635: # %cond.load389 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 99 ; CHECK-RV32-NEXT: li a4, 98 @@ -5255,9 +5186,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_107 ; CHECK-RV32-NEXT: .LBB61_636: # %cond.load393 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 100 ; CHECK-RV32-NEXT: li a4, 99 @@ -5271,9 +5201,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_108 ; CHECK-RV32-NEXT: .LBB61_637: # %cond.load397 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 101 ; CHECK-RV32-NEXT: li a4, 100 @@ -5287,9 +5216,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_109 ; CHECK-RV32-NEXT: .LBB61_638: # %cond.load401 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 102 ; CHECK-RV32-NEXT: li a4, 101 @@ -5303,9 +5231,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_110 ; CHECK-RV32-NEXT: .LBB61_639: # %cond.load405 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 103 ; CHECK-RV32-NEXT: li a4, 102 @@ -5319,9 +5246,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_111 ; CHECK-RV32-NEXT: .LBB61_640: # %cond.load409 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 104 ; CHECK-RV32-NEXT: li a4, 103 @@ -5335,9 +5261,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_112 ; CHECK-RV32-NEXT: .LBB61_641: # %cond.load413 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 105 ; CHECK-RV32-NEXT: li a4, 104 @@ -5351,9 +5276,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_113 ; CHECK-RV32-NEXT: .LBB61_642: # %cond.load417 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 106 ; CHECK-RV32-NEXT: li a4, 105 @@ -5367,9 +5291,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_114 ; CHECK-RV32-NEXT: .LBB61_643: # %cond.load421 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 107 ; CHECK-RV32-NEXT: li a4, 106 @@ -5383,9 +5306,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_115 ; CHECK-RV32-NEXT: .LBB61_644: # %cond.load425 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 108 ; CHECK-RV32-NEXT: li a4, 107 @@ -5399,9 +5321,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_116 ; CHECK-RV32-NEXT: .LBB61_645: # %cond.load429 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 109 ; CHECK-RV32-NEXT: li a4, 108 @@ -5415,9 +5336,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_117 ; CHECK-RV32-NEXT: .LBB61_646: # %cond.load433 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 110 ; CHECK-RV32-NEXT: li a4, 109 @@ -5431,9 +5351,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_118 ; CHECK-RV32-NEXT: .LBB61_647: # %cond.load437 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 111 ; CHECK-RV32-NEXT: li a4, 110 @@ -5447,9 +5366,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_119 ; CHECK-RV32-NEXT: .LBB61_648: # %cond.load441 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 112 ; CHECK-RV32-NEXT: li a4, 111 @@ -5463,9 +5381,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_120 ; CHECK-RV32-NEXT: .LBB61_649: # %cond.load445 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 113 ; CHECK-RV32-NEXT: li a4, 112 @@ -5479,9 +5396,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_121 ; CHECK-RV32-NEXT: .LBB61_650: # %cond.load449 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 114 ; CHECK-RV32-NEXT: li a4, 113 @@ -5495,9 +5411,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_122 ; CHECK-RV32-NEXT: .LBB61_651: # %cond.load453 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 115 ; CHECK-RV32-NEXT: li a4, 114 @@ -5511,9 +5426,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_123 ; CHECK-RV32-NEXT: .LBB61_652: # %cond.load457 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 116 ; CHECK-RV32-NEXT: li a4, 115 @@ -5527,9 +5441,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_124 ; CHECK-RV32-NEXT: .LBB61_653: # %cond.load461 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 117 ; CHECK-RV32-NEXT: li a4, 116 @@ -5543,9 +5456,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_125 ; CHECK-RV32-NEXT: .LBB61_654: # %cond.load465 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 118 ; CHECK-RV32-NEXT: li a4, 117 @@ -5559,9 +5471,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_126 ; CHECK-RV32-NEXT: .LBB61_655: # %cond.load469 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 119 ; CHECK-RV32-NEXT: li a4, 118 @@ -5575,9 +5486,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_127 ; CHECK-RV32-NEXT: .LBB61_656: # %cond.load473 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 120 ; CHECK-RV32-NEXT: li a4, 119 @@ -5591,9 +5501,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_128 ; CHECK-RV32-NEXT: .LBB61_657: # %cond.load477 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 121 ; CHECK-RV32-NEXT: li a4, 120 @@ -5607,9 +5516,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_129 ; CHECK-RV32-NEXT: .LBB61_658: # %cond.load481 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 122 ; CHECK-RV32-NEXT: li a4, 121 @@ -5623,9 +5531,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_130 ; CHECK-RV32-NEXT: .LBB61_659: # %cond.load485 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 123 ; CHECK-RV32-NEXT: li a4, 122 @@ -5639,9 +5546,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_131 ; CHECK-RV32-NEXT: .LBB61_660: # %cond.load489 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 124 ; CHECK-RV32-NEXT: li a4, 123 @@ -5655,9 +5561,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_132 ; CHECK-RV32-NEXT: .LBB61_661: # %cond.load493 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v10, a3 ; CHECK-RV32-NEXT: li a3, 125 ; CHECK-RV32-NEXT: li a4, 124 @@ -5687,9 +5592,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_138 ; CHECK-RV32-NEXT: .LBB61_663: # %cond.load509 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 129 ; CHECK-RV32-NEXT: li a4, 128 @@ -5703,9 +5607,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_139 ; CHECK-RV32-NEXT: .LBB61_664: # %cond.load513 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 130 ; CHECK-RV32-NEXT: li a4, 129 @@ -5719,9 +5622,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_140 ; CHECK-RV32-NEXT: .LBB61_665: # %cond.load517 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 131 ; CHECK-RV32-NEXT: li a4, 130 @@ -5735,9 +5637,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_141 ; CHECK-RV32-NEXT: .LBB61_666: # %cond.load521 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 132 ; CHECK-RV32-NEXT: li a4, 131 @@ -5751,9 +5652,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_142 ; CHECK-RV32-NEXT: .LBB61_667: # %cond.load525 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 133 ; CHECK-RV32-NEXT: li a4, 132 @@ -5767,9 +5667,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_143 ; CHECK-RV32-NEXT: .LBB61_668: # %cond.load529 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 134 ; CHECK-RV32-NEXT: li a4, 133 @@ -5783,9 +5682,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_144 ; CHECK-RV32-NEXT: .LBB61_669: # %cond.load533 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 135 ; CHECK-RV32-NEXT: li a4, 134 @@ -5799,9 +5697,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_145 ; CHECK-RV32-NEXT: .LBB61_670: # %cond.load537 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 136 ; CHECK-RV32-NEXT: li a4, 135 @@ -5815,9 +5712,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_146 ; CHECK-RV32-NEXT: .LBB61_671: # %cond.load541 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 137 ; CHECK-RV32-NEXT: li a4, 136 @@ -5831,9 +5727,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_147 ; CHECK-RV32-NEXT: .LBB61_672: # %cond.load545 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 138 ; CHECK-RV32-NEXT: li a4, 137 @@ -5847,9 +5742,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_148 ; CHECK-RV32-NEXT: .LBB61_673: # %cond.load549 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 139 ; CHECK-RV32-NEXT: li a4, 138 @@ -5863,9 +5757,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_149 ; CHECK-RV32-NEXT: .LBB61_674: # %cond.load553 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 140 ; CHECK-RV32-NEXT: li a4, 139 @@ -5879,9 +5772,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_150 ; CHECK-RV32-NEXT: .LBB61_675: # %cond.load557 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 141 ; CHECK-RV32-NEXT: li a4, 140 @@ -5895,9 +5787,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_151 ; CHECK-RV32-NEXT: .LBB61_676: # %cond.load561 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 142 ; CHECK-RV32-NEXT: li a4, 141 @@ -5911,9 +5802,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_152 ; CHECK-RV32-NEXT: .LBB61_677: # %cond.load565 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 143 ; CHECK-RV32-NEXT: li a4, 142 @@ -5927,9 +5817,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_153 ; CHECK-RV32-NEXT: .LBB61_678: # %cond.load569 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 144 ; CHECK-RV32-NEXT: li a4, 143 @@ -5943,9 +5832,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_154 ; CHECK-RV32-NEXT: .LBB61_679: # %cond.load573 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 145 ; CHECK-RV32-NEXT: li a4, 144 @@ -5959,9 +5847,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_155 ; CHECK-RV32-NEXT: .LBB61_680: # %cond.load577 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 146 ; CHECK-RV32-NEXT: li a4, 145 @@ -5975,9 +5862,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_156 ; CHECK-RV32-NEXT: .LBB61_681: # %cond.load581 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 147 ; CHECK-RV32-NEXT: li a4, 146 @@ -5991,9 +5877,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_157 ; CHECK-RV32-NEXT: .LBB61_682: # %cond.load585 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 148 ; CHECK-RV32-NEXT: li a4, 147 @@ -6007,9 +5892,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_158 ; CHECK-RV32-NEXT: .LBB61_683: # %cond.load589 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 149 ; CHECK-RV32-NEXT: li a4, 148 @@ -6023,9 +5907,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_159 ; CHECK-RV32-NEXT: .LBB61_684: # %cond.load593 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 150 ; CHECK-RV32-NEXT: li a4, 149 @@ -6039,9 +5922,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_160 ; CHECK-RV32-NEXT: .LBB61_685: # %cond.load597 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 151 ; CHECK-RV32-NEXT: li a4, 150 @@ -6055,9 +5937,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_161 ; CHECK-RV32-NEXT: .LBB61_686: # %cond.load601 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 152 ; CHECK-RV32-NEXT: li a4, 151 @@ -6071,9 +5952,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_162 ; CHECK-RV32-NEXT: .LBB61_687: # %cond.load605 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 153 ; CHECK-RV32-NEXT: li a4, 152 @@ -6087,9 +5967,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_163 ; CHECK-RV32-NEXT: .LBB61_688: # %cond.load609 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 154 ; CHECK-RV32-NEXT: li a4, 153 @@ -6103,9 +5982,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_164 ; CHECK-RV32-NEXT: .LBB61_689: # %cond.load613 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 155 ; CHECK-RV32-NEXT: li a4, 154 @@ -6119,9 +5997,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_165 ; CHECK-RV32-NEXT: .LBB61_690: # %cond.load617 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 156 ; CHECK-RV32-NEXT: li a4, 155 @@ -6135,9 +6012,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_166 ; CHECK-RV32-NEXT: .LBB61_691: # %cond.load621 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 157 ; CHECK-RV32-NEXT: li a4, 156 @@ -6167,9 +6043,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_172 ; CHECK-RV32-NEXT: .LBB61_693: # %cond.load637 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 161 ; CHECK-RV32-NEXT: li a4, 160 @@ -6183,9 +6058,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_173 ; CHECK-RV32-NEXT: .LBB61_694: # %cond.load641 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 162 ; CHECK-RV32-NEXT: li a4, 161 @@ -6199,9 +6073,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_174 ; CHECK-RV32-NEXT: .LBB61_695: # %cond.load645 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 163 ; CHECK-RV32-NEXT: li a4, 162 @@ -6215,9 +6088,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_175 ; CHECK-RV32-NEXT: .LBB61_696: # %cond.load649 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 164 ; CHECK-RV32-NEXT: li a4, 163 @@ -6231,9 +6103,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_176 ; CHECK-RV32-NEXT: .LBB61_697: # %cond.load653 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 165 ; CHECK-RV32-NEXT: li a4, 164 @@ -6247,9 +6118,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_177 ; CHECK-RV32-NEXT: .LBB61_698: # %cond.load657 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 166 ; CHECK-RV32-NEXT: li a4, 165 @@ -6263,9 +6133,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_178 ; CHECK-RV32-NEXT: .LBB61_699: # %cond.load661 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 167 ; CHECK-RV32-NEXT: li a4, 166 @@ -6279,9 +6148,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_179 ; CHECK-RV32-NEXT: .LBB61_700: # %cond.load665 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 168 ; CHECK-RV32-NEXT: li a4, 167 @@ -6295,9 +6163,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_180 ; CHECK-RV32-NEXT: .LBB61_701: # %cond.load669 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 169 ; CHECK-RV32-NEXT: li a4, 168 @@ -6311,9 +6178,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_181 ; CHECK-RV32-NEXT: .LBB61_702: # %cond.load673 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 170 ; CHECK-RV32-NEXT: li a4, 169 @@ -6327,9 +6193,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_182 ; CHECK-RV32-NEXT: .LBB61_703: # %cond.load677 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 171 ; CHECK-RV32-NEXT: li a4, 170 @@ -6343,9 +6208,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_183 ; CHECK-RV32-NEXT: .LBB61_704: # %cond.load681 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 172 ; CHECK-RV32-NEXT: li a4, 171 @@ -6359,9 +6223,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_184 ; CHECK-RV32-NEXT: .LBB61_705: # %cond.load685 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 173 ; CHECK-RV32-NEXT: li a4, 172 @@ -6375,9 +6238,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_185 ; CHECK-RV32-NEXT: .LBB61_706: # %cond.load689 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 174 ; CHECK-RV32-NEXT: li a4, 173 @@ -6391,9 +6253,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_186 ; CHECK-RV32-NEXT: .LBB61_707: # %cond.load693 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 175 ; CHECK-RV32-NEXT: li a4, 174 @@ -6407,9 +6268,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_187 ; CHECK-RV32-NEXT: .LBB61_708: # %cond.load697 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 176 ; CHECK-RV32-NEXT: li a4, 175 @@ -6423,9 +6283,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_188 ; CHECK-RV32-NEXT: .LBB61_709: # %cond.load701 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 177 ; CHECK-RV32-NEXT: li a4, 176 @@ -6439,9 +6298,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_189 ; CHECK-RV32-NEXT: .LBB61_710: # %cond.load705 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 178 ; CHECK-RV32-NEXT: li a4, 177 @@ -6455,9 +6313,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_190 ; CHECK-RV32-NEXT: .LBB61_711: # %cond.load709 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 179 ; CHECK-RV32-NEXT: li a4, 178 @@ -6471,9 +6328,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_191 ; CHECK-RV32-NEXT: .LBB61_712: # %cond.load713 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 180 ; CHECK-RV32-NEXT: li a4, 179 @@ -6487,9 +6343,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_192 ; CHECK-RV32-NEXT: .LBB61_713: # %cond.load717 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 181 ; CHECK-RV32-NEXT: li a4, 180 @@ -6503,9 +6358,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_193 ; CHECK-RV32-NEXT: .LBB61_714: # %cond.load721 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 182 ; CHECK-RV32-NEXT: li a4, 181 @@ -6519,9 +6373,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_194 ; CHECK-RV32-NEXT: .LBB61_715: # %cond.load725 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 183 ; CHECK-RV32-NEXT: li a4, 182 @@ -6535,9 +6388,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_195 ; CHECK-RV32-NEXT: .LBB61_716: # %cond.load729 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 184 ; CHECK-RV32-NEXT: li a4, 183 @@ -6551,9 +6403,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_196 ; CHECK-RV32-NEXT: .LBB61_717: # %cond.load733 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 185 ; CHECK-RV32-NEXT: li a4, 184 @@ -6567,9 +6418,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_197 ; CHECK-RV32-NEXT: .LBB61_718: # %cond.load737 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 186 ; CHECK-RV32-NEXT: li a4, 185 @@ -6583,9 +6433,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_198 ; CHECK-RV32-NEXT: .LBB61_719: # %cond.load741 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 187 ; CHECK-RV32-NEXT: li a4, 186 @@ -6599,9 +6448,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_199 ; CHECK-RV32-NEXT: .LBB61_720: # %cond.load745 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 188 ; CHECK-RV32-NEXT: li a4, 187 @@ -6615,9 +6463,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_200 ; CHECK-RV32-NEXT: .LBB61_721: # %cond.load749 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 189 ; CHECK-RV32-NEXT: li a4, 188 @@ -6647,9 +6494,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_206 ; CHECK-RV32-NEXT: .LBB61_723: # %cond.load765 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 193 ; CHECK-RV32-NEXT: li a4, 192 @@ -6663,9 +6509,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_207 ; CHECK-RV32-NEXT: .LBB61_724: # %cond.load769 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 194 ; CHECK-RV32-NEXT: li a4, 193 @@ -6679,9 +6524,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_208 ; CHECK-RV32-NEXT: .LBB61_725: # %cond.load773 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 195 ; CHECK-RV32-NEXT: li a4, 194 @@ -6695,9 +6539,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_209 ; CHECK-RV32-NEXT: .LBB61_726: # %cond.load777 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 196 ; CHECK-RV32-NEXT: li a4, 195 @@ -6711,9 +6554,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_210 ; CHECK-RV32-NEXT: .LBB61_727: # %cond.load781 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 197 ; CHECK-RV32-NEXT: li a4, 196 @@ -6727,9 +6569,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_211 ; CHECK-RV32-NEXT: .LBB61_728: # %cond.load785 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 198 ; CHECK-RV32-NEXT: li a4, 197 @@ -6743,9 +6584,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_212 ; CHECK-RV32-NEXT: .LBB61_729: # %cond.load789 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 199 ; CHECK-RV32-NEXT: li a4, 198 @@ -6759,9 +6599,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_213 ; CHECK-RV32-NEXT: .LBB61_730: # %cond.load793 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 200 ; CHECK-RV32-NEXT: li a4, 199 @@ -6775,9 +6614,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_214 ; CHECK-RV32-NEXT: .LBB61_731: # %cond.load797 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 201 ; CHECK-RV32-NEXT: li a4, 200 @@ -6791,9 +6629,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_215 ; CHECK-RV32-NEXT: .LBB61_732: # %cond.load801 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 202 ; CHECK-RV32-NEXT: li a4, 201 @@ -6807,9 +6644,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_216 ; CHECK-RV32-NEXT: .LBB61_733: # %cond.load805 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 203 ; CHECK-RV32-NEXT: li a4, 202 @@ -6823,9 +6659,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_217 ; CHECK-RV32-NEXT: .LBB61_734: # %cond.load809 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 204 ; CHECK-RV32-NEXT: li a4, 203 @@ -6839,9 +6674,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_218 ; CHECK-RV32-NEXT: .LBB61_735: # %cond.load813 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 205 ; CHECK-RV32-NEXT: li a4, 204 @@ -6855,9 +6689,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_219 ; CHECK-RV32-NEXT: .LBB61_736: # %cond.load817 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 206 ; CHECK-RV32-NEXT: li a4, 205 @@ -6871,9 +6704,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_220 ; CHECK-RV32-NEXT: .LBB61_737: # %cond.load821 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 207 ; CHECK-RV32-NEXT: li a4, 206 @@ -6887,9 +6719,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_221 ; CHECK-RV32-NEXT: .LBB61_738: # %cond.load825 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 208 ; CHECK-RV32-NEXT: li a4, 207 @@ -6903,9 +6734,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_222 ; CHECK-RV32-NEXT: .LBB61_739: # %cond.load829 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 209 ; CHECK-RV32-NEXT: li a4, 208 @@ -6919,9 +6749,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_223 ; CHECK-RV32-NEXT: .LBB61_740: # %cond.load833 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 210 ; CHECK-RV32-NEXT: li a4, 209 @@ -6935,9 +6764,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_224 ; CHECK-RV32-NEXT: .LBB61_741: # %cond.load837 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 211 ; CHECK-RV32-NEXT: li a4, 210 @@ -6951,9 +6779,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_225 ; CHECK-RV32-NEXT: .LBB61_742: # %cond.load841 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 212 ; CHECK-RV32-NEXT: li a4, 211 @@ -6967,9 +6794,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_226 ; CHECK-RV32-NEXT: .LBB61_743: # %cond.load845 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 213 ; CHECK-RV32-NEXT: li a4, 212 @@ -6983,9 +6809,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_227 ; CHECK-RV32-NEXT: .LBB61_744: # %cond.load849 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 214 ; CHECK-RV32-NEXT: li a4, 213 @@ -6999,9 +6824,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_228 ; CHECK-RV32-NEXT: .LBB61_745: # %cond.load853 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 215 ; CHECK-RV32-NEXT: li a4, 214 @@ -7015,9 +6839,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_229 ; CHECK-RV32-NEXT: .LBB61_746: # %cond.load857 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 216 ; CHECK-RV32-NEXT: li a4, 215 @@ -7031,9 +6854,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_230 ; CHECK-RV32-NEXT: .LBB61_747: # %cond.load861 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 217 ; CHECK-RV32-NEXT: li a4, 216 @@ -7047,9 +6869,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_231 ; CHECK-RV32-NEXT: .LBB61_748: # %cond.load865 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 218 ; CHECK-RV32-NEXT: li a4, 217 @@ -7063,9 +6884,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_232 ; CHECK-RV32-NEXT: .LBB61_749: # %cond.load869 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 219 ; CHECK-RV32-NEXT: li a4, 218 @@ -7079,9 +6899,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_233 ; CHECK-RV32-NEXT: .LBB61_750: # %cond.load873 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 220 ; CHECK-RV32-NEXT: li a4, 219 @@ -7095,9 +6914,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_234 ; CHECK-RV32-NEXT: .LBB61_751: # %cond.load877 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v24, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a2 ; CHECK-RV32-NEXT: li a2, 221 ; CHECK-RV32-NEXT: li a4, 220 @@ -7127,9 +6945,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_240 ; CHECK-RV32-NEXT: .LBB61_753: # %cond.load893 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 225 ; CHECK-RV32-NEXT: li a4, 224 @@ -7143,9 +6960,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_241 ; CHECK-RV32-NEXT: .LBB61_754: # %cond.load897 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 226 ; CHECK-RV32-NEXT: li a4, 225 @@ -7159,9 +6975,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_242 ; CHECK-RV32-NEXT: .LBB61_755: # %cond.load901 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 227 ; CHECK-RV32-NEXT: li a4, 226 @@ -7175,9 +6990,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_243 ; CHECK-RV32-NEXT: .LBB61_756: # %cond.load905 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 228 ; CHECK-RV32-NEXT: li a4, 227 @@ -7191,9 +7005,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_244 ; CHECK-RV32-NEXT: .LBB61_757: # %cond.load909 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 229 ; CHECK-RV32-NEXT: li a4, 228 @@ -7207,9 +7020,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_245 ; CHECK-RV32-NEXT: .LBB61_758: # %cond.load913 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 230 ; CHECK-RV32-NEXT: li a4, 229 @@ -7223,9 +7035,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_246 ; CHECK-RV32-NEXT: .LBB61_759: # %cond.load917 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 231 ; CHECK-RV32-NEXT: li a4, 230 @@ -7239,9 +7050,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_247 ; CHECK-RV32-NEXT: .LBB61_760: # %cond.load921 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 232 ; CHECK-RV32-NEXT: li a4, 231 @@ -7255,9 +7065,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_248 ; CHECK-RV32-NEXT: .LBB61_761: # %cond.load925 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 233 ; CHECK-RV32-NEXT: li a4, 232 @@ -7271,9 +7080,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_249 ; CHECK-RV32-NEXT: .LBB61_762: # %cond.load929 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 234 ; CHECK-RV32-NEXT: li a4, 233 @@ -7287,9 +7095,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_250 ; CHECK-RV32-NEXT: .LBB61_763: # %cond.load933 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 235 ; CHECK-RV32-NEXT: li a4, 234 @@ -7303,9 +7110,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_251 ; CHECK-RV32-NEXT: .LBB61_764: # %cond.load937 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 236 ; CHECK-RV32-NEXT: li a4, 235 @@ -7319,9 +7125,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_252 ; CHECK-RV32-NEXT: .LBB61_765: # %cond.load941 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 237 ; CHECK-RV32-NEXT: li a4, 236 @@ -7335,9 +7140,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_253 ; CHECK-RV32-NEXT: .LBB61_766: # %cond.load945 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 238 ; CHECK-RV32-NEXT: li a4, 237 @@ -7351,9 +7155,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_254 ; CHECK-RV32-NEXT: .LBB61_767: # %cond.load949 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 239 ; CHECK-RV32-NEXT: li a4, 238 @@ -7367,9 +7170,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_255 ; CHECK-RV32-NEXT: .LBB61_768: # %cond.load953 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 240 ; CHECK-RV32-NEXT: li a4, 239 @@ -7383,9 +7185,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_256 ; CHECK-RV32-NEXT: .LBB61_769: # %cond.load957 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 241 ; CHECK-RV32-NEXT: li a4, 240 @@ -7399,9 +7200,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_257 ; CHECK-RV32-NEXT: .LBB61_770: # %cond.load961 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 242 ; CHECK-RV32-NEXT: li a4, 241 @@ -7415,9 +7215,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_258 ; CHECK-RV32-NEXT: .LBB61_771: # %cond.load965 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 243 ; CHECK-RV32-NEXT: li a4, 242 @@ -7431,9 +7230,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_259 ; CHECK-RV32-NEXT: .LBB61_772: # %cond.load969 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 244 ; CHECK-RV32-NEXT: li a4, 243 @@ -7447,9 +7245,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_260 ; CHECK-RV32-NEXT: .LBB61_773: # %cond.load973 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 245 ; CHECK-RV32-NEXT: li a4, 244 @@ -7463,9 +7260,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_261 ; CHECK-RV32-NEXT: .LBB61_774: # %cond.load977 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 246 ; CHECK-RV32-NEXT: li a4, 245 @@ -7479,9 +7275,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_262 ; CHECK-RV32-NEXT: .LBB61_775: # %cond.load981 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 247 ; CHECK-RV32-NEXT: li a4, 246 @@ -7495,9 +7290,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_263 ; CHECK-RV32-NEXT: .LBB61_776: # %cond.load985 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 248 ; CHECK-RV32-NEXT: li a4, 247 @@ -7511,9 +7305,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_264 ; CHECK-RV32-NEXT: .LBB61_777: # %cond.load989 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 249 ; CHECK-RV32-NEXT: li a4, 248 @@ -7527,9 +7320,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_265 ; CHECK-RV32-NEXT: .LBB61_778: # %cond.load993 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 250 ; CHECK-RV32-NEXT: li a4, 249 @@ -7543,9 +7335,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_266 ; CHECK-RV32-NEXT: .LBB61_779: # %cond.load997 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 251 ; CHECK-RV32-NEXT: li a4, 250 @@ -7559,9 +7350,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_267 ; CHECK-RV32-NEXT: .LBB61_780: # %cond.load1001 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 252 ; CHECK-RV32-NEXT: li a4, 251 @@ -7575,9 +7365,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: j .LBB61_268 ; CHECK-RV32-NEXT: .LBB61_781: # %cond.load1005 ; CHECK-RV32-NEXT: lbu a3, 0(a0) -; CHECK-RV32-NEXT: li a4, 512 +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv8r.v v16, v8 -; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v12, a3 ; CHECK-RV32-NEXT: li a3, 253 ; CHECK-RV32-NEXT: li a4, 252 @@ -10998,9 +10787,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: bgez a1, .LBB61_63 ; CHECK-RV64-NEXT: .LBB61_62: # %cond.load241 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 62 ; CHECK-RV64-NEXT: li a3, 61 @@ -11279,9 +11067,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: bgez a2, .LBB61_129 ; CHECK-RV64-NEXT: .LBB61_128: # %cond.load497 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 126 ; CHECK-RV64-NEXT: li a3, 125 @@ -11560,9 +11347,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: bgez a1, .LBB61_195 ; CHECK-RV64-NEXT: .LBB61_194: # %cond.load753 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 190 ; CHECK-RV64-NEXT: li a3, 189 @@ -11841,9 +11627,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: bgez a2, .LBB61_261 ; CHECK-RV64-NEXT: .LBB61_260: # %cond.load1009 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 254 ; CHECK-RV64-NEXT: li a3, 253 @@ -12968,10 +12753,9 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_2 ; CHECK-RV64-NEXT: .LBB61_528: # %cond.load1 ; CHECK-RV64-NEXT: lbu a1, 0(a0) +; CHECK-RV64-NEXT: vsetivli zero, 2, e8, m1, tu, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 -; CHECK-RV64-NEXT: vsetivli zero, 2, e8, m1, tu, ma ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 1 ; CHECK-RV64-NEXT: addi a0, a0, 1 ; CHECK-RV64-NEXT: vmv1r.v v16, v8 @@ -12981,8 +12765,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_3 ; CHECK-RV64-NEXT: .LBB61_529: # %cond.load5 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 3, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 2 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -12993,8 +12777,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_4 ; CHECK-RV64-NEXT: .LBB61_530: # %cond.load9 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 4, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 3 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13005,8 +12789,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_5 ; CHECK-RV64-NEXT: .LBB61_531: # %cond.load13 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 5, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 4 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13017,8 +12801,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_6 ; CHECK-RV64-NEXT: .LBB61_532: # %cond.load17 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 6, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 5 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13029,8 +12813,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_7 ; CHECK-RV64-NEXT: .LBB61_533: # %cond.load21 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 7, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 6 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13041,8 +12825,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_8 ; CHECK-RV64-NEXT: .LBB61_534: # %cond.load25 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 8, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 7 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13053,8 +12837,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_9 ; CHECK-RV64-NEXT: .LBB61_535: # %cond.load29 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 9, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 8 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13065,8 +12849,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_10 ; CHECK-RV64-NEXT: .LBB61_536: # %cond.load33 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 10, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 9 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13077,8 +12861,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_11 ; CHECK-RV64-NEXT: .LBB61_537: # %cond.load37 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 11, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 10 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13089,8 +12873,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_12 ; CHECK-RV64-NEXT: .LBB61_538: # %cond.load41 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 12, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 11 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13101,8 +12885,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_13 ; CHECK-RV64-NEXT: .LBB61_539: # %cond.load45 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 13, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 12 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13113,8 +12897,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_14 ; CHECK-RV64-NEXT: .LBB61_540: # %cond.load49 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 14, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 13 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13125,8 +12909,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_15 ; CHECK-RV64-NEXT: .LBB61_541: # %cond.load53 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 15, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 14 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13137,8 +12921,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_16 ; CHECK-RV64-NEXT: .LBB61_542: # %cond.load57 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 16, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 15 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13149,8 +12933,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_17 ; CHECK-RV64-NEXT: .LBB61_543: # %cond.load61 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 17, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 16 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13161,8 +12945,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_18 ; CHECK-RV64-NEXT: .LBB61_544: # %cond.load65 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 18, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 17 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13173,8 +12957,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_19 ; CHECK-RV64-NEXT: .LBB61_545: # %cond.load69 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 19, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 18 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13185,8 +12969,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_20 ; CHECK-RV64-NEXT: .LBB61_546: # %cond.load73 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 20, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 19 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13197,8 +12981,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_21 ; CHECK-RV64-NEXT: .LBB61_547: # %cond.load77 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 21, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 20 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13209,8 +12993,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_22 ; CHECK-RV64-NEXT: .LBB61_548: # %cond.load81 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 22, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 21 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13221,8 +13005,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_23 ; CHECK-RV64-NEXT: .LBB61_549: # %cond.load85 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 23, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 22 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13233,8 +13017,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_24 ; CHECK-RV64-NEXT: .LBB61_550: # %cond.load89 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 24, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 23 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13245,8 +13029,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_25 ; CHECK-RV64-NEXT: .LBB61_551: # %cond.load93 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 25, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 24 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13257,8 +13041,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_26 ; CHECK-RV64-NEXT: .LBB61_552: # %cond.load97 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 26, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 25 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13269,8 +13053,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_27 ; CHECK-RV64-NEXT: .LBB61_553: # %cond.load101 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 27, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 26 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13281,8 +13065,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_28 ; CHECK-RV64-NEXT: .LBB61_554: # %cond.load105 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 28, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 27 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13293,8 +13077,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_29 ; CHECK-RV64-NEXT: .LBB61_555: # %cond.load109 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 29, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 28 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13305,8 +13089,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_30 ; CHECK-RV64-NEXT: .LBB61_556: # %cond.load113 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 30, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 29 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13317,8 +13101,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_31 ; CHECK-RV64-NEXT: .LBB61_557: # %cond.load117 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vsetivli zero, 31, e8, m1, tu, ma +; CHECK-RV64-NEXT: vmv8r.v v16, v8 ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: vslideup.vi v8, v9, 30 ; CHECK-RV64-NEXT: addi a0, a0, 1 @@ -13329,9 +13113,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_32 ; CHECK-RV64-NEXT: .LBB61_558: # %cond.load121 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 32 ; CHECK-RV64-NEXT: vsetvli zero, a1, e8, m1, tu, ma @@ -13344,9 +13127,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_33 ; CHECK-RV64-NEXT: .LBB61_559: # %cond.load125 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 33 ; CHECK-RV64-NEXT: li a3, 32 @@ -13360,9 +13142,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_34 ; CHECK-RV64-NEXT: .LBB61_560: # %cond.load129 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 34 ; CHECK-RV64-NEXT: li a3, 33 @@ -13376,9 +13157,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_35 ; CHECK-RV64-NEXT: .LBB61_561: # %cond.load133 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 35 ; CHECK-RV64-NEXT: li a3, 34 @@ -13392,9 +13172,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_36 ; CHECK-RV64-NEXT: .LBB61_562: # %cond.load137 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 36 ; CHECK-RV64-NEXT: li a3, 35 @@ -13408,9 +13187,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_37 ; CHECK-RV64-NEXT: .LBB61_563: # %cond.load141 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 37 ; CHECK-RV64-NEXT: li a3, 36 @@ -13424,9 +13202,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_38 ; CHECK-RV64-NEXT: .LBB61_564: # %cond.load145 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 38 ; CHECK-RV64-NEXT: li a3, 37 @@ -13440,9 +13217,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_39 ; CHECK-RV64-NEXT: .LBB61_565: # %cond.load149 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 39 ; CHECK-RV64-NEXT: li a3, 38 @@ -13456,9 +13232,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_40 ; CHECK-RV64-NEXT: .LBB61_566: # %cond.load153 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 40 ; CHECK-RV64-NEXT: li a3, 39 @@ -13472,9 +13247,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_41 ; CHECK-RV64-NEXT: .LBB61_567: # %cond.load157 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 41 ; CHECK-RV64-NEXT: li a3, 40 @@ -13488,9 +13262,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_42 ; CHECK-RV64-NEXT: .LBB61_568: # %cond.load161 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 42 ; CHECK-RV64-NEXT: li a3, 41 @@ -13504,9 +13277,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_43 ; CHECK-RV64-NEXT: .LBB61_569: # %cond.load165 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 43 ; CHECK-RV64-NEXT: li a3, 42 @@ -13520,9 +13292,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_44 ; CHECK-RV64-NEXT: .LBB61_570: # %cond.load169 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 44 ; CHECK-RV64-NEXT: li a3, 43 @@ -13536,9 +13307,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_45 ; CHECK-RV64-NEXT: .LBB61_571: # %cond.load173 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 45 ; CHECK-RV64-NEXT: li a3, 44 @@ -13552,9 +13322,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_46 ; CHECK-RV64-NEXT: .LBB61_572: # %cond.load177 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 46 ; CHECK-RV64-NEXT: li a3, 45 @@ -13568,9 +13337,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_47 ; CHECK-RV64-NEXT: .LBB61_573: # %cond.load181 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 47 ; CHECK-RV64-NEXT: li a3, 46 @@ -13584,9 +13352,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_48 ; CHECK-RV64-NEXT: .LBB61_574: # %cond.load185 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 48 ; CHECK-RV64-NEXT: li a3, 47 @@ -13600,9 +13367,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_49 ; CHECK-RV64-NEXT: .LBB61_575: # %cond.load189 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 49 ; CHECK-RV64-NEXT: li a3, 48 @@ -13616,9 +13382,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_50 ; CHECK-RV64-NEXT: .LBB61_576: # %cond.load193 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 50 ; CHECK-RV64-NEXT: li a3, 49 @@ -13632,9 +13397,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_51 ; CHECK-RV64-NEXT: .LBB61_577: # %cond.load197 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 51 ; CHECK-RV64-NEXT: li a3, 50 @@ -13648,9 +13412,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_52 ; CHECK-RV64-NEXT: .LBB61_578: # %cond.load201 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 52 ; CHECK-RV64-NEXT: li a3, 51 @@ -13664,9 +13427,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_53 ; CHECK-RV64-NEXT: .LBB61_579: # %cond.load205 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 53 ; CHECK-RV64-NEXT: li a3, 52 @@ -13680,9 +13442,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_54 ; CHECK-RV64-NEXT: .LBB61_580: # %cond.load209 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 54 ; CHECK-RV64-NEXT: li a3, 53 @@ -13696,9 +13457,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_55 ; CHECK-RV64-NEXT: .LBB61_581: # %cond.load213 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 55 ; CHECK-RV64-NEXT: li a3, 54 @@ -13712,9 +13472,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_56 ; CHECK-RV64-NEXT: .LBB61_582: # %cond.load217 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 56 ; CHECK-RV64-NEXT: li a3, 55 @@ -13728,9 +13487,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_57 ; CHECK-RV64-NEXT: .LBB61_583: # %cond.load221 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 57 ; CHECK-RV64-NEXT: li a3, 56 @@ -13744,9 +13502,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_58 ; CHECK-RV64-NEXT: .LBB61_584: # %cond.load225 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 58 ; CHECK-RV64-NEXT: li a3, 57 @@ -13760,9 +13517,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_59 ; CHECK-RV64-NEXT: .LBB61_585: # %cond.load229 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 59 ; CHECK-RV64-NEXT: li a3, 58 @@ -13776,9 +13532,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_60 ; CHECK-RV64-NEXT: .LBB61_586: # %cond.load233 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 60 ; CHECK-RV64-NEXT: li a3, 59 @@ -13792,9 +13547,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_61 ; CHECK-RV64-NEXT: .LBB61_587: # %cond.load237 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v9, a1 ; CHECK-RV64-NEXT: li a1, 61 ; CHECK-RV64-NEXT: li a3, 60 @@ -13824,9 +13578,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_67 ; CHECK-RV64-NEXT: .LBB61_589: # %cond.load253 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 65 ; CHECK-RV64-NEXT: li a3, 64 @@ -13840,9 +13593,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_68 ; CHECK-RV64-NEXT: .LBB61_590: # %cond.load257 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 66 ; CHECK-RV64-NEXT: li a3, 65 @@ -13856,9 +13608,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_69 ; CHECK-RV64-NEXT: .LBB61_591: # %cond.load261 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 67 ; CHECK-RV64-NEXT: li a3, 66 @@ -13872,9 +13623,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_70 ; CHECK-RV64-NEXT: .LBB61_592: # %cond.load265 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 68 ; CHECK-RV64-NEXT: li a3, 67 @@ -13888,9 +13638,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_71 ; CHECK-RV64-NEXT: .LBB61_593: # %cond.load269 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 69 ; CHECK-RV64-NEXT: li a3, 68 @@ -13904,9 +13653,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_72 ; CHECK-RV64-NEXT: .LBB61_594: # %cond.load273 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 70 ; CHECK-RV64-NEXT: li a3, 69 @@ -13920,9 +13668,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_73 ; CHECK-RV64-NEXT: .LBB61_595: # %cond.load277 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 71 ; CHECK-RV64-NEXT: li a3, 70 @@ -13936,9 +13683,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_74 ; CHECK-RV64-NEXT: .LBB61_596: # %cond.load281 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 72 ; CHECK-RV64-NEXT: li a3, 71 @@ -13952,9 +13698,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_75 ; CHECK-RV64-NEXT: .LBB61_597: # %cond.load285 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 73 ; CHECK-RV64-NEXT: li a3, 72 @@ -13968,9 +13713,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_76 ; CHECK-RV64-NEXT: .LBB61_598: # %cond.load289 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 74 ; CHECK-RV64-NEXT: li a3, 73 @@ -13984,9 +13728,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_77 ; CHECK-RV64-NEXT: .LBB61_599: # %cond.load293 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 75 ; CHECK-RV64-NEXT: li a3, 74 @@ -14000,9 +13743,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_78 ; CHECK-RV64-NEXT: .LBB61_600: # %cond.load297 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 76 ; CHECK-RV64-NEXT: li a3, 75 @@ -14016,9 +13758,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_79 ; CHECK-RV64-NEXT: .LBB61_601: # %cond.load301 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 77 ; CHECK-RV64-NEXT: li a3, 76 @@ -14032,9 +13773,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_80 ; CHECK-RV64-NEXT: .LBB61_602: # %cond.load305 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 78 ; CHECK-RV64-NEXT: li a3, 77 @@ -14048,9 +13788,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_81 ; CHECK-RV64-NEXT: .LBB61_603: # %cond.load309 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 79 ; CHECK-RV64-NEXT: li a3, 78 @@ -14064,9 +13803,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_82 ; CHECK-RV64-NEXT: .LBB61_604: # %cond.load313 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 80 ; CHECK-RV64-NEXT: li a3, 79 @@ -14080,9 +13818,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_83 ; CHECK-RV64-NEXT: .LBB61_605: # %cond.load317 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 81 ; CHECK-RV64-NEXT: li a3, 80 @@ -14096,9 +13833,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_84 ; CHECK-RV64-NEXT: .LBB61_606: # %cond.load321 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 82 ; CHECK-RV64-NEXT: li a3, 81 @@ -14112,9 +13848,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_85 ; CHECK-RV64-NEXT: .LBB61_607: # %cond.load325 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 83 ; CHECK-RV64-NEXT: li a3, 82 @@ -14128,9 +13863,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_86 ; CHECK-RV64-NEXT: .LBB61_608: # %cond.load329 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 84 ; CHECK-RV64-NEXT: li a3, 83 @@ -14144,9 +13878,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_87 ; CHECK-RV64-NEXT: .LBB61_609: # %cond.load333 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 85 ; CHECK-RV64-NEXT: li a3, 84 @@ -14160,9 +13893,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_88 ; CHECK-RV64-NEXT: .LBB61_610: # %cond.load337 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 86 ; CHECK-RV64-NEXT: li a3, 85 @@ -14176,9 +13908,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_89 ; CHECK-RV64-NEXT: .LBB61_611: # %cond.load341 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 87 ; CHECK-RV64-NEXT: li a3, 86 @@ -14192,9 +13923,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_90 ; CHECK-RV64-NEXT: .LBB61_612: # %cond.load345 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 88 ; CHECK-RV64-NEXT: li a3, 87 @@ -14208,9 +13938,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_91 ; CHECK-RV64-NEXT: .LBB61_613: # %cond.load349 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 89 ; CHECK-RV64-NEXT: li a3, 88 @@ -14224,9 +13953,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_92 ; CHECK-RV64-NEXT: .LBB61_614: # %cond.load353 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 90 ; CHECK-RV64-NEXT: li a3, 89 @@ -14240,9 +13968,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_93 ; CHECK-RV64-NEXT: .LBB61_615: # %cond.load357 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 91 ; CHECK-RV64-NEXT: li a3, 90 @@ -14256,9 +13983,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_94 ; CHECK-RV64-NEXT: .LBB61_616: # %cond.load361 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 92 ; CHECK-RV64-NEXT: li a3, 91 @@ -14272,9 +13998,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_95 ; CHECK-RV64-NEXT: .LBB61_617: # %cond.load365 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 93 ; CHECK-RV64-NEXT: li a3, 92 @@ -14288,9 +14013,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_96 ; CHECK-RV64-NEXT: .LBB61_618: # %cond.load369 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 94 ; CHECK-RV64-NEXT: li a3, 93 @@ -14304,9 +14028,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_97 ; CHECK-RV64-NEXT: .LBB61_619: # %cond.load373 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 95 ; CHECK-RV64-NEXT: li a3, 94 @@ -14320,9 +14043,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_98 ; CHECK-RV64-NEXT: .LBB61_620: # %cond.load377 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 96 ; CHECK-RV64-NEXT: li a3, 95 @@ -14336,9 +14058,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_99 ; CHECK-RV64-NEXT: .LBB61_621: # %cond.load381 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 97 ; CHECK-RV64-NEXT: li a3, 96 @@ -14352,9 +14073,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_100 ; CHECK-RV64-NEXT: .LBB61_622: # %cond.load385 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 98 ; CHECK-RV64-NEXT: li a3, 97 @@ -14368,9 +14088,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_101 ; CHECK-RV64-NEXT: .LBB61_623: # %cond.load389 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 99 ; CHECK-RV64-NEXT: li a3, 98 @@ -14384,9 +14103,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_102 ; CHECK-RV64-NEXT: .LBB61_624: # %cond.load393 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 100 ; CHECK-RV64-NEXT: li a3, 99 @@ -14400,9 +14118,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_103 ; CHECK-RV64-NEXT: .LBB61_625: # %cond.load397 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 101 ; CHECK-RV64-NEXT: li a3, 100 @@ -14416,9 +14133,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_104 ; CHECK-RV64-NEXT: .LBB61_626: # %cond.load401 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 102 ; CHECK-RV64-NEXT: li a3, 101 @@ -14432,9 +14148,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_105 ; CHECK-RV64-NEXT: .LBB61_627: # %cond.load405 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 103 ; CHECK-RV64-NEXT: li a3, 102 @@ -14448,9 +14163,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_106 ; CHECK-RV64-NEXT: .LBB61_628: # %cond.load409 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 104 ; CHECK-RV64-NEXT: li a3, 103 @@ -14464,9 +14178,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_107 ; CHECK-RV64-NEXT: .LBB61_629: # %cond.load413 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 105 ; CHECK-RV64-NEXT: li a3, 104 @@ -14480,9 +14193,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_108 ; CHECK-RV64-NEXT: .LBB61_630: # %cond.load417 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 106 ; CHECK-RV64-NEXT: li a3, 105 @@ -14496,9 +14208,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_109 ; CHECK-RV64-NEXT: .LBB61_631: # %cond.load421 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 107 ; CHECK-RV64-NEXT: li a3, 106 @@ -14512,9 +14223,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_110 ; CHECK-RV64-NEXT: .LBB61_632: # %cond.load425 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 108 ; CHECK-RV64-NEXT: li a3, 107 @@ -14528,9 +14238,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_111 ; CHECK-RV64-NEXT: .LBB61_633: # %cond.load429 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 109 ; CHECK-RV64-NEXT: li a3, 108 @@ -14544,9 +14253,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_112 ; CHECK-RV64-NEXT: .LBB61_634: # %cond.load433 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 110 ; CHECK-RV64-NEXT: li a3, 109 @@ -14560,9 +14268,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_113 ; CHECK-RV64-NEXT: .LBB61_635: # %cond.load437 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 111 ; CHECK-RV64-NEXT: li a3, 110 @@ -14576,9 +14283,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_114 ; CHECK-RV64-NEXT: .LBB61_636: # %cond.load441 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 112 ; CHECK-RV64-NEXT: li a3, 111 @@ -14592,9 +14298,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_115 ; CHECK-RV64-NEXT: .LBB61_637: # %cond.load445 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 113 ; CHECK-RV64-NEXT: li a3, 112 @@ -14608,9 +14313,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_116 ; CHECK-RV64-NEXT: .LBB61_638: # %cond.load449 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 114 ; CHECK-RV64-NEXT: li a3, 113 @@ -14624,9 +14328,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_117 ; CHECK-RV64-NEXT: .LBB61_639: # %cond.load453 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 115 ; CHECK-RV64-NEXT: li a3, 114 @@ -14640,9 +14343,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_118 ; CHECK-RV64-NEXT: .LBB61_640: # %cond.load457 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 116 ; CHECK-RV64-NEXT: li a3, 115 @@ -14656,9 +14358,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_119 ; CHECK-RV64-NEXT: .LBB61_641: # %cond.load461 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 117 ; CHECK-RV64-NEXT: li a3, 116 @@ -14672,9 +14373,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_120 ; CHECK-RV64-NEXT: .LBB61_642: # %cond.load465 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 118 ; CHECK-RV64-NEXT: li a3, 117 @@ -14688,9 +14388,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_121 ; CHECK-RV64-NEXT: .LBB61_643: # %cond.load469 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 119 ; CHECK-RV64-NEXT: li a3, 118 @@ -14704,9 +14403,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_122 ; CHECK-RV64-NEXT: .LBB61_644: # %cond.load473 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 120 ; CHECK-RV64-NEXT: li a3, 119 @@ -14720,9 +14418,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_123 ; CHECK-RV64-NEXT: .LBB61_645: # %cond.load477 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 121 ; CHECK-RV64-NEXT: li a3, 120 @@ -14736,9 +14433,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_124 ; CHECK-RV64-NEXT: .LBB61_646: # %cond.load481 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 122 ; CHECK-RV64-NEXT: li a3, 121 @@ -14752,9 +14448,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_125 ; CHECK-RV64-NEXT: .LBB61_647: # %cond.load485 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 123 ; CHECK-RV64-NEXT: li a3, 122 @@ -14768,9 +14463,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_126 ; CHECK-RV64-NEXT: .LBB61_648: # %cond.load489 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 124 ; CHECK-RV64-NEXT: li a3, 123 @@ -14784,9 +14478,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_127 ; CHECK-RV64-NEXT: .LBB61_649: # %cond.load493 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v10, a2 ; CHECK-RV64-NEXT: li a2, 125 ; CHECK-RV64-NEXT: li a3, 124 @@ -14816,9 +14509,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_133 ; CHECK-RV64-NEXT: .LBB61_651: # %cond.load509 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 129 ; CHECK-RV64-NEXT: li a3, 128 @@ -14832,9 +14524,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_134 ; CHECK-RV64-NEXT: .LBB61_652: # %cond.load513 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 130 ; CHECK-RV64-NEXT: li a3, 129 @@ -14848,9 +14539,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_135 ; CHECK-RV64-NEXT: .LBB61_653: # %cond.load517 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 131 ; CHECK-RV64-NEXT: li a3, 130 @@ -14864,9 +14554,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_136 ; CHECK-RV64-NEXT: .LBB61_654: # %cond.load521 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 132 ; CHECK-RV64-NEXT: li a3, 131 @@ -14880,9 +14569,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_137 ; CHECK-RV64-NEXT: .LBB61_655: # %cond.load525 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 133 ; CHECK-RV64-NEXT: li a3, 132 @@ -14896,9 +14584,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_138 ; CHECK-RV64-NEXT: .LBB61_656: # %cond.load529 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 134 ; CHECK-RV64-NEXT: li a3, 133 @@ -14912,9 +14599,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_139 ; CHECK-RV64-NEXT: .LBB61_657: # %cond.load533 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 135 ; CHECK-RV64-NEXT: li a3, 134 @@ -14928,9 +14614,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_140 ; CHECK-RV64-NEXT: .LBB61_658: # %cond.load537 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 136 ; CHECK-RV64-NEXT: li a3, 135 @@ -14944,9 +14629,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_141 ; CHECK-RV64-NEXT: .LBB61_659: # %cond.load541 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 137 ; CHECK-RV64-NEXT: li a3, 136 @@ -14960,9 +14644,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_142 ; CHECK-RV64-NEXT: .LBB61_660: # %cond.load545 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 138 ; CHECK-RV64-NEXT: li a3, 137 @@ -14976,9 +14659,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_143 ; CHECK-RV64-NEXT: .LBB61_661: # %cond.load549 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 139 ; CHECK-RV64-NEXT: li a3, 138 @@ -14992,9 +14674,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_144 ; CHECK-RV64-NEXT: .LBB61_662: # %cond.load553 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 140 ; CHECK-RV64-NEXT: li a3, 139 @@ -15008,9 +14689,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_145 ; CHECK-RV64-NEXT: .LBB61_663: # %cond.load557 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 141 ; CHECK-RV64-NEXT: li a3, 140 @@ -15024,9 +14704,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_146 ; CHECK-RV64-NEXT: .LBB61_664: # %cond.load561 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 142 ; CHECK-RV64-NEXT: li a3, 141 @@ -15040,9 +14719,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_147 ; CHECK-RV64-NEXT: .LBB61_665: # %cond.load565 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 143 ; CHECK-RV64-NEXT: li a3, 142 @@ -15056,9 +14734,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_148 ; CHECK-RV64-NEXT: .LBB61_666: # %cond.load569 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 144 ; CHECK-RV64-NEXT: li a3, 143 @@ -15072,9 +14749,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_149 ; CHECK-RV64-NEXT: .LBB61_667: # %cond.load573 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 145 ; CHECK-RV64-NEXT: li a3, 144 @@ -15088,9 +14764,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_150 ; CHECK-RV64-NEXT: .LBB61_668: # %cond.load577 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 146 ; CHECK-RV64-NEXT: li a3, 145 @@ -15104,9 +14779,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_151 ; CHECK-RV64-NEXT: .LBB61_669: # %cond.load581 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 147 ; CHECK-RV64-NEXT: li a3, 146 @@ -15120,9 +14794,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_152 ; CHECK-RV64-NEXT: .LBB61_670: # %cond.load585 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 148 ; CHECK-RV64-NEXT: li a3, 147 @@ -15136,9 +14809,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_153 ; CHECK-RV64-NEXT: .LBB61_671: # %cond.load589 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 149 ; CHECK-RV64-NEXT: li a3, 148 @@ -15152,9 +14824,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_154 ; CHECK-RV64-NEXT: .LBB61_672: # %cond.load593 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 150 ; CHECK-RV64-NEXT: li a3, 149 @@ -15168,9 +14839,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_155 ; CHECK-RV64-NEXT: .LBB61_673: # %cond.load597 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 151 ; CHECK-RV64-NEXT: li a3, 150 @@ -15184,9 +14854,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_156 ; CHECK-RV64-NEXT: .LBB61_674: # %cond.load601 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 152 ; CHECK-RV64-NEXT: li a3, 151 @@ -15200,9 +14869,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_157 ; CHECK-RV64-NEXT: .LBB61_675: # %cond.load605 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 153 ; CHECK-RV64-NEXT: li a3, 152 @@ -15216,9 +14884,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_158 ; CHECK-RV64-NEXT: .LBB61_676: # %cond.load609 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 154 ; CHECK-RV64-NEXT: li a3, 153 @@ -15232,9 +14899,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_159 ; CHECK-RV64-NEXT: .LBB61_677: # %cond.load613 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 155 ; CHECK-RV64-NEXT: li a3, 154 @@ -15248,9 +14914,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_160 ; CHECK-RV64-NEXT: .LBB61_678: # %cond.load617 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 156 ; CHECK-RV64-NEXT: li a3, 155 @@ -15264,9 +14929,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_161 ; CHECK-RV64-NEXT: .LBB61_679: # %cond.load621 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 157 ; CHECK-RV64-NEXT: li a3, 156 @@ -15280,9 +14944,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_162 ; CHECK-RV64-NEXT: .LBB61_680: # %cond.load625 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 158 ; CHECK-RV64-NEXT: li a3, 157 @@ -15296,9 +14959,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_163 ; CHECK-RV64-NEXT: .LBB61_681: # %cond.load629 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 159 ; CHECK-RV64-NEXT: li a3, 158 @@ -15312,9 +14974,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_164 ; CHECK-RV64-NEXT: .LBB61_682: # %cond.load633 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 160 ; CHECK-RV64-NEXT: li a3, 159 @@ -15328,9 +14989,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_165 ; CHECK-RV64-NEXT: .LBB61_683: # %cond.load637 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 161 ; CHECK-RV64-NEXT: li a3, 160 @@ -15344,9 +15004,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_166 ; CHECK-RV64-NEXT: .LBB61_684: # %cond.load641 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 162 ; CHECK-RV64-NEXT: li a3, 161 @@ -15360,9 +15019,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_167 ; CHECK-RV64-NEXT: .LBB61_685: # %cond.load645 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 163 ; CHECK-RV64-NEXT: li a3, 162 @@ -15376,9 +15034,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_168 ; CHECK-RV64-NEXT: .LBB61_686: # %cond.load649 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 164 ; CHECK-RV64-NEXT: li a3, 163 @@ -15392,9 +15049,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_169 ; CHECK-RV64-NEXT: .LBB61_687: # %cond.load653 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 165 ; CHECK-RV64-NEXT: li a3, 164 @@ -15408,9 +15064,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_170 ; CHECK-RV64-NEXT: .LBB61_688: # %cond.load657 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 166 ; CHECK-RV64-NEXT: li a3, 165 @@ -15424,9 +15079,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_171 ; CHECK-RV64-NEXT: .LBB61_689: # %cond.load661 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 167 ; CHECK-RV64-NEXT: li a3, 166 @@ -15440,9 +15094,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_172 ; CHECK-RV64-NEXT: .LBB61_690: # %cond.load665 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 168 ; CHECK-RV64-NEXT: li a3, 167 @@ -15456,9 +15109,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_173 ; CHECK-RV64-NEXT: .LBB61_691: # %cond.load669 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 169 ; CHECK-RV64-NEXT: li a3, 168 @@ -15472,9 +15124,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_174 ; CHECK-RV64-NEXT: .LBB61_692: # %cond.load673 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 170 ; CHECK-RV64-NEXT: li a3, 169 @@ -15488,9 +15139,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_175 ; CHECK-RV64-NEXT: .LBB61_693: # %cond.load677 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 171 ; CHECK-RV64-NEXT: li a3, 170 @@ -15504,9 +15154,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_176 ; CHECK-RV64-NEXT: .LBB61_694: # %cond.load681 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 172 ; CHECK-RV64-NEXT: li a3, 171 @@ -15520,9 +15169,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_177 ; CHECK-RV64-NEXT: .LBB61_695: # %cond.load685 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 173 ; CHECK-RV64-NEXT: li a3, 172 @@ -15536,9 +15184,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_178 ; CHECK-RV64-NEXT: .LBB61_696: # %cond.load689 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 174 ; CHECK-RV64-NEXT: li a3, 173 @@ -15552,9 +15199,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_179 ; CHECK-RV64-NEXT: .LBB61_697: # %cond.load693 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 175 ; CHECK-RV64-NEXT: li a3, 174 @@ -15568,9 +15214,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_180 ; CHECK-RV64-NEXT: .LBB61_698: # %cond.load697 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 176 ; CHECK-RV64-NEXT: li a3, 175 @@ -15584,9 +15229,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_181 ; CHECK-RV64-NEXT: .LBB61_699: # %cond.load701 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 177 ; CHECK-RV64-NEXT: li a3, 176 @@ -15600,9 +15244,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_182 ; CHECK-RV64-NEXT: .LBB61_700: # %cond.load705 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 178 ; CHECK-RV64-NEXT: li a3, 177 @@ -15616,9 +15259,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_183 ; CHECK-RV64-NEXT: .LBB61_701: # %cond.load709 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 179 ; CHECK-RV64-NEXT: li a3, 178 @@ -15632,9 +15274,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_184 ; CHECK-RV64-NEXT: .LBB61_702: # %cond.load713 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 180 ; CHECK-RV64-NEXT: li a3, 179 @@ -15648,9 +15289,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_185 ; CHECK-RV64-NEXT: .LBB61_703: # %cond.load717 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 181 ; CHECK-RV64-NEXT: li a3, 180 @@ -15664,9 +15304,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_186 ; CHECK-RV64-NEXT: .LBB61_704: # %cond.load721 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 182 ; CHECK-RV64-NEXT: li a3, 181 @@ -15680,9 +15319,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_187 ; CHECK-RV64-NEXT: .LBB61_705: # %cond.load725 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 183 ; CHECK-RV64-NEXT: li a3, 182 @@ -15696,9 +15334,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_188 ; CHECK-RV64-NEXT: .LBB61_706: # %cond.load729 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 184 ; CHECK-RV64-NEXT: li a3, 183 @@ -15712,9 +15349,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_189 ; CHECK-RV64-NEXT: .LBB61_707: # %cond.load733 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 185 ; CHECK-RV64-NEXT: li a3, 184 @@ -15728,9 +15364,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_190 ; CHECK-RV64-NEXT: .LBB61_708: # %cond.load737 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 186 ; CHECK-RV64-NEXT: li a3, 185 @@ -15744,9 +15379,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_191 ; CHECK-RV64-NEXT: .LBB61_709: # %cond.load741 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 187 ; CHECK-RV64-NEXT: li a3, 186 @@ -15760,9 +15394,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_192 ; CHECK-RV64-NEXT: .LBB61_710: # %cond.load745 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 188 ; CHECK-RV64-NEXT: li a3, 187 @@ -15776,9 +15409,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_193 ; CHECK-RV64-NEXT: .LBB61_711: # %cond.load749 ; CHECK-RV64-NEXT: lbu a1, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a1 ; CHECK-RV64-NEXT: li a1, 189 ; CHECK-RV64-NEXT: li a3, 188 @@ -15808,9 +15440,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_199 ; CHECK-RV64-NEXT: .LBB61_713: # %cond.load765 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 193 ; CHECK-RV64-NEXT: li a3, 192 @@ -15824,9 +15455,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_200 ; CHECK-RV64-NEXT: .LBB61_714: # %cond.load769 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 194 ; CHECK-RV64-NEXT: li a3, 193 @@ -15840,9 +15470,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_201 ; CHECK-RV64-NEXT: .LBB61_715: # %cond.load773 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 195 ; CHECK-RV64-NEXT: li a3, 194 @@ -15856,9 +15485,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_202 ; CHECK-RV64-NEXT: .LBB61_716: # %cond.load777 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 196 ; CHECK-RV64-NEXT: li a3, 195 @@ -15872,9 +15500,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_203 ; CHECK-RV64-NEXT: .LBB61_717: # %cond.load781 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 197 ; CHECK-RV64-NEXT: li a3, 196 @@ -15888,9 +15515,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_204 ; CHECK-RV64-NEXT: .LBB61_718: # %cond.load785 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 198 ; CHECK-RV64-NEXT: li a3, 197 @@ -15904,9 +15530,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_205 ; CHECK-RV64-NEXT: .LBB61_719: # %cond.load789 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 199 ; CHECK-RV64-NEXT: li a3, 198 @@ -15920,9 +15545,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_206 ; CHECK-RV64-NEXT: .LBB61_720: # %cond.load793 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 200 ; CHECK-RV64-NEXT: li a3, 199 @@ -15936,9 +15560,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_207 ; CHECK-RV64-NEXT: .LBB61_721: # %cond.load797 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 201 ; CHECK-RV64-NEXT: li a3, 200 @@ -15952,9 +15575,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_208 ; CHECK-RV64-NEXT: .LBB61_722: # %cond.load801 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 202 ; CHECK-RV64-NEXT: li a3, 201 @@ -15968,9 +15590,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_209 ; CHECK-RV64-NEXT: .LBB61_723: # %cond.load805 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 203 ; CHECK-RV64-NEXT: li a3, 202 @@ -15984,9 +15605,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_210 ; CHECK-RV64-NEXT: .LBB61_724: # %cond.load809 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 204 ; CHECK-RV64-NEXT: li a3, 203 @@ -16000,9 +15620,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_211 ; CHECK-RV64-NEXT: .LBB61_725: # %cond.load813 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 205 ; CHECK-RV64-NEXT: li a3, 204 @@ -16016,9 +15635,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_212 ; CHECK-RV64-NEXT: .LBB61_726: # %cond.load817 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 206 ; CHECK-RV64-NEXT: li a3, 205 @@ -16032,9 +15650,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_213 ; CHECK-RV64-NEXT: .LBB61_727: # %cond.load821 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 207 ; CHECK-RV64-NEXT: li a3, 206 @@ -16048,9 +15665,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_214 ; CHECK-RV64-NEXT: .LBB61_728: # %cond.load825 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 208 ; CHECK-RV64-NEXT: li a3, 207 @@ -16064,9 +15680,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_215 ; CHECK-RV64-NEXT: .LBB61_729: # %cond.load829 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 209 ; CHECK-RV64-NEXT: li a3, 208 @@ -16080,9 +15695,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_216 ; CHECK-RV64-NEXT: .LBB61_730: # %cond.load833 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 210 ; CHECK-RV64-NEXT: li a3, 209 @@ -16096,9 +15710,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_217 ; CHECK-RV64-NEXT: .LBB61_731: # %cond.load837 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 211 ; CHECK-RV64-NEXT: li a3, 210 @@ -16112,9 +15725,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_218 ; CHECK-RV64-NEXT: .LBB61_732: # %cond.load841 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 212 ; CHECK-RV64-NEXT: li a3, 211 @@ -16128,9 +15740,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_219 ; CHECK-RV64-NEXT: .LBB61_733: # %cond.load845 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 213 ; CHECK-RV64-NEXT: li a3, 212 @@ -16144,9 +15755,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_220 ; CHECK-RV64-NEXT: .LBB61_734: # %cond.load849 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 214 ; CHECK-RV64-NEXT: li a3, 213 @@ -16160,9 +15770,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_221 ; CHECK-RV64-NEXT: .LBB61_735: # %cond.load853 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 215 ; CHECK-RV64-NEXT: li a3, 214 @@ -16176,9 +15785,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_222 ; CHECK-RV64-NEXT: .LBB61_736: # %cond.load857 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 216 ; CHECK-RV64-NEXT: li a3, 215 @@ -16192,9 +15800,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_223 ; CHECK-RV64-NEXT: .LBB61_737: # %cond.load861 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 217 ; CHECK-RV64-NEXT: li a3, 216 @@ -16208,9 +15815,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_224 ; CHECK-RV64-NEXT: .LBB61_738: # %cond.load865 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 218 ; CHECK-RV64-NEXT: li a3, 217 @@ -16224,9 +15830,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_225 ; CHECK-RV64-NEXT: .LBB61_739: # %cond.load869 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 219 ; CHECK-RV64-NEXT: li a3, 218 @@ -16240,9 +15845,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_226 ; CHECK-RV64-NEXT: .LBB61_740: # %cond.load873 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 220 ; CHECK-RV64-NEXT: li a3, 219 @@ -16256,9 +15860,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_227 ; CHECK-RV64-NEXT: .LBB61_741: # %cond.load877 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 221 ; CHECK-RV64-NEXT: li a3, 220 @@ -16272,9 +15875,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_228 ; CHECK-RV64-NEXT: .LBB61_742: # %cond.load881 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 222 ; CHECK-RV64-NEXT: li a3, 221 @@ -16288,9 +15890,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_229 ; CHECK-RV64-NEXT: .LBB61_743: # %cond.load885 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 223 ; CHECK-RV64-NEXT: li a3, 222 @@ -16304,9 +15905,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_230 ; CHECK-RV64-NEXT: .LBB61_744: # %cond.load889 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 224 ; CHECK-RV64-NEXT: li a3, 223 @@ -16320,9 +15920,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_231 ; CHECK-RV64-NEXT: .LBB61_745: # %cond.load893 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 225 ; CHECK-RV64-NEXT: li a3, 224 @@ -16336,9 +15935,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_232 ; CHECK-RV64-NEXT: .LBB61_746: # %cond.load897 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 226 ; CHECK-RV64-NEXT: li a3, 225 @@ -16352,9 +15950,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_233 ; CHECK-RV64-NEXT: .LBB61_747: # %cond.load901 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 227 ; CHECK-RV64-NEXT: li a3, 226 @@ -16368,9 +15965,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_234 ; CHECK-RV64-NEXT: .LBB61_748: # %cond.load905 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 228 ; CHECK-RV64-NEXT: li a3, 227 @@ -16384,9 +15980,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_235 ; CHECK-RV64-NEXT: .LBB61_749: # %cond.load909 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 229 ; CHECK-RV64-NEXT: li a3, 228 @@ -16400,9 +15995,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_236 ; CHECK-RV64-NEXT: .LBB61_750: # %cond.load913 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 230 ; CHECK-RV64-NEXT: li a3, 229 @@ -16416,9 +16010,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_237 ; CHECK-RV64-NEXT: .LBB61_751: # %cond.load917 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 231 ; CHECK-RV64-NEXT: li a3, 230 @@ -16432,9 +16025,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_238 ; CHECK-RV64-NEXT: .LBB61_752: # %cond.load921 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 232 ; CHECK-RV64-NEXT: li a3, 231 @@ -16448,9 +16040,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_239 ; CHECK-RV64-NEXT: .LBB61_753: # %cond.load925 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 233 ; CHECK-RV64-NEXT: li a3, 232 @@ -16464,9 +16055,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_240 ; CHECK-RV64-NEXT: .LBB61_754: # %cond.load929 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 234 ; CHECK-RV64-NEXT: li a3, 233 @@ -16480,9 +16070,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_241 ; CHECK-RV64-NEXT: .LBB61_755: # %cond.load933 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 235 ; CHECK-RV64-NEXT: li a3, 234 @@ -16496,9 +16085,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_242 ; CHECK-RV64-NEXT: .LBB61_756: # %cond.load937 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 236 ; CHECK-RV64-NEXT: li a3, 235 @@ -16512,9 +16100,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_243 ; CHECK-RV64-NEXT: .LBB61_757: # %cond.load941 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 237 ; CHECK-RV64-NEXT: li a3, 236 @@ -16528,9 +16115,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_244 ; CHECK-RV64-NEXT: .LBB61_758: # %cond.load945 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 238 ; CHECK-RV64-NEXT: li a3, 237 @@ -16544,9 +16130,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_245 ; CHECK-RV64-NEXT: .LBB61_759: # %cond.load949 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 239 ; CHECK-RV64-NEXT: li a3, 238 @@ -16560,9 +16145,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_246 ; CHECK-RV64-NEXT: .LBB61_760: # %cond.load953 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 240 ; CHECK-RV64-NEXT: li a3, 239 @@ -16576,9 +16160,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_247 ; CHECK-RV64-NEXT: .LBB61_761: # %cond.load957 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 241 ; CHECK-RV64-NEXT: li a3, 240 @@ -16592,9 +16175,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_248 ; CHECK-RV64-NEXT: .LBB61_762: # %cond.load961 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 242 ; CHECK-RV64-NEXT: li a3, 241 @@ -16608,9 +16190,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_249 ; CHECK-RV64-NEXT: .LBB61_763: # %cond.load965 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 243 ; CHECK-RV64-NEXT: li a3, 242 @@ -16624,9 +16205,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_250 ; CHECK-RV64-NEXT: .LBB61_764: # %cond.load969 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 244 ; CHECK-RV64-NEXT: li a3, 243 @@ -16640,9 +16220,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_251 ; CHECK-RV64-NEXT: .LBB61_765: # %cond.load973 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 245 ; CHECK-RV64-NEXT: li a3, 244 @@ -16656,9 +16235,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_252 ; CHECK-RV64-NEXT: .LBB61_766: # %cond.load977 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 246 ; CHECK-RV64-NEXT: li a3, 245 @@ -16672,9 +16250,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_253 ; CHECK-RV64-NEXT: .LBB61_767: # %cond.load981 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 247 ; CHECK-RV64-NEXT: li a3, 246 @@ -16688,9 +16265,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_254 ; CHECK-RV64-NEXT: .LBB61_768: # %cond.load985 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 248 ; CHECK-RV64-NEXT: li a3, 247 @@ -16704,9 +16280,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_255 ; CHECK-RV64-NEXT: .LBB61_769: # %cond.load989 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 249 ; CHECK-RV64-NEXT: li a3, 248 @@ -16720,9 +16295,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_256 ; CHECK-RV64-NEXT: .LBB61_770: # %cond.load993 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 250 ; CHECK-RV64-NEXT: li a3, 249 @@ -16736,9 +16310,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_257 ; CHECK-RV64-NEXT: .LBB61_771: # %cond.load997 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 251 ; CHECK-RV64-NEXT: li a3, 250 @@ -16752,9 +16325,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_258 ; CHECK-RV64-NEXT: .LBB61_772: # %cond.load1001 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 252 ; CHECK-RV64-NEXT: li a3, 251 @@ -16768,9 +16340,8 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV64-NEXT: j .LBB61_259 ; CHECK-RV64-NEXT: .LBB61_773: # %cond.load1005 ; CHECK-RV64-NEXT: lbu a2, 0(a0) -; CHECK-RV64-NEXT: li a3, 512 +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv8r.v v16, v8 -; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v12, a2 ; CHECK-RV64-NEXT: li a2, 253 ; CHECK-RV64-NEXT: li a3, 252 diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll index 869478a1efa78..83637e4a71d45 100644 --- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll @@ -13,6 +13,7 @@ define @extract_nxv8i32_nxv4i32_0( %vec) { define @extract_nxv8i32_nxv4i32_4( %vec) { ; CHECK-LABEL: extract_nxv8i32_nxv4i32_4: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv4i32.nxv8i32( %vec, i64 4) @@ -30,6 +31,7 @@ define @extract_nxv8i32_nxv2i32_0( %vec) { define @extract_nxv8i32_nxv2i32_2( %vec) { ; CHECK-LABEL: extract_nxv8i32_nxv2i32_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 2) @@ -39,6 +41,7 @@ define @extract_nxv8i32_nxv2i32_2( %vec) { define @extract_nxv8i32_nxv2i32_4( %vec) { ; CHECK-LABEL: extract_nxv8i32_nxv2i32_4: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 4) @@ -48,6 +51,7 @@ define @extract_nxv8i32_nxv2i32_4( %vec) { define @extract_nxv8i32_nxv2i32_6( %vec) { ; CHECK-LABEL: extract_nxv8i32_nxv2i32_6: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 6) @@ -65,6 +69,7 @@ define @extract_nxv16i32_nxv8i32_0( %vec) define @extract_nxv16i32_nxv8i32_8( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv8i32_8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv8i32.nxv16i32( %vec, i64 8) @@ -82,6 +87,7 @@ define @extract_nxv16i32_nxv4i32_0( %vec) define @extract_nxv16i32_nxv4i32_4( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv4i32_4: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 4) @@ -91,6 +97,7 @@ define @extract_nxv16i32_nxv4i32_4( %vec) define @extract_nxv16i32_nxv4i32_8( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv4i32_8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 8) @@ -100,6 +107,7 @@ define @extract_nxv16i32_nxv4i32_8( %vec) define @extract_nxv16i32_nxv4i32_12( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv4i32_12: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 12) @@ -117,6 +125,7 @@ define @extract_nxv16i32_nxv2i32_0( %vec) define @extract_nxv16i32_nxv2i32_2( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 2) @@ -126,6 +135,7 @@ define @extract_nxv16i32_nxv2i32_2( %vec) define @extract_nxv16i32_nxv2i32_4( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_4: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 4) @@ -135,6 +145,7 @@ define @extract_nxv16i32_nxv2i32_4( %vec) define @extract_nxv16i32_nxv2i32_6( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_6: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 6) @@ -144,6 +155,7 @@ define @extract_nxv16i32_nxv2i32_6( %vec) define @extract_nxv16i32_nxv2i32_8( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v12 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 8) @@ -153,6 +165,7 @@ define @extract_nxv16i32_nxv2i32_8( %vec) define @extract_nxv16i32_nxv2i32_10( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_10: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 10) @@ -162,6 +175,7 @@ define @extract_nxv16i32_nxv2i32_10( %vec) define @extract_nxv16i32_nxv2i32_12( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_12: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v14 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 12) @@ -171,6 +185,7 @@ define @extract_nxv16i32_nxv2i32_12( %vec) define @extract_nxv16i32_nxv2i32_14( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_14: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v15 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 14) @@ -224,6 +239,7 @@ define @extract_nxv16i32_nxv1i32_15( %vec) define @extract_nxv16i32_nxv1i32_2( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv1i32_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 2) @@ -287,6 +303,7 @@ define @extract_nxv32i8_nxv2i8_6( %vec) { define @extract_nxv32i8_nxv2i8_8( %vec) { ; CHECK-LABEL: extract_nxv32i8_nxv2i8_8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 8) @@ -357,6 +374,7 @@ define @extract_nxv2f16_nxv16f16_2( %vec define @extract_nxv2f16_nxv16f16_4( %vec) { ; CHECK-LABEL: extract_nxv2f16_nxv16f16_4: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2f16.nxv16f16( %vec, i64 4) @@ -504,6 +522,7 @@ define @extract_nxv2bf16_nxv16bf16_2( @extract_nxv2bf16_nxv16bf16_4( %vec) { ; CHECK-LABEL: extract_nxv2bf16_nxv16bf16_4: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2bf16.nxv16bf16( %vec, i64 4) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll index ce83e2d8a6220..1a5ca429b531f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll @@ -16,10 +16,10 @@ define <512 x i8> @single_source(<512 x i8> %a) { ; CHECK-NEXT: addi s0, sp, 1536 ; CHECK-NEXT: .cfi_def_cfa s0, 0 ; CHECK-NEXT: andi sp, sp, -512 +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: li a0, 512 ; CHECK-NEXT: addi a1, sp, 512 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.x.s a2, v16 ; CHECK-NEXT: vslidedown.vi v24, v16, 5 ; CHECK-NEXT: li a3, 432 @@ -104,10 +104,10 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) { ; CHECK-NEXT: addi s0, sp, 1536 ; CHECK-NEXT: .cfi_def_cfa s0, 0 ; CHECK-NEXT: andi sp, sp, -512 +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: li a0, 512 ; CHECK-NEXT: addi a1, sp, 512 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v24, 5 ; CHECK-NEXT: vmv.x.s a2, v24 ; CHECK-NEXT: li a3, 432 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll index 3eb5d36b4896a..5ea4924468595 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll @@ -1659,6 +1659,7 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmv8r.v v24, v8 ; RV32-NEXT: lui a2, 1044480 ; RV32-NEXT: lui a3, 61681 @@ -1677,7 +1678,6 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex ; RV32-NEXT: sw a3, 36(sp) ; RV32-NEXT: addi a3, sp, 16 ; RV32-NEXT: addi a4, a5, 1365 -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vx v16, v8, a1, v0.t ; RV32-NEXT: addi a5, a6, -256 ; RV32-NEXT: sw a4, 24(sp) @@ -2055,6 +2055,7 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmv8r.v v24, v8 ; RV32-NEXT: lui a2, 1044480 ; RV32-NEXT: lui a3, 61681 @@ -2073,7 +2074,6 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex ; RV32-NEXT: sw a3, 36(sp) ; RV32-NEXT: addi a3, sp, 16 ; RV32-NEXT: addi a4, a5, 1365 -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vx v16, v8, a1, v0.t ; RV32-NEXT: addi a5, a6, -256 ; RV32-NEXT: sw a4, 24(sp) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll index ee953a66a004f..60a9948198c8f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll @@ -180,6 +180,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset ra, -8 +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: li a1, 2 ; CHECK-NEXT: vmv8r.v v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll index 73e148edbe2d6..f42b4a3a26aad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll @@ -180,6 +180,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> % ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset ra, -8 +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: li a1, 2 ; CHECK-NEXT: vmv8r.v v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll index 511242aa677c2..5fe55583f314c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll @@ -194,8 +194,8 @@ define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) ; ; ZVFHMIN-LABEL: vp_ceil_v8f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v9, v0 ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v9, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 ; ZVFHMIN-NEXT: lui a1, 307200 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -261,10 +261,10 @@ declare <16 x half> @llvm.vp.ceil.v16f16(<16 x half>, <16 x i1>, i32) define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -280,8 +280,8 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e ; ; ZVFHMIN-LABEL: vp_ceil_v16f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 ; ZVFHMIN-NEXT: lui a1, 307200 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -431,8 +431,8 @@ declare <8 x float> @llvm.vp.ceil.v8f32(<8 x float>, <8 x i1>, i32) define <8 x float> @vp_ceil_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -475,8 +475,8 @@ declare <16 x float> @llvm.vp.ceil.v16f32(<16 x float>, <16 x i1>, i32) define <16 x float> @vp_ceil_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -561,10 +561,10 @@ declare <4 x double> @llvm.vp.ceil.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI18_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -605,10 +605,10 @@ declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI20_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -649,10 +649,10 @@ declare <15 x double> @llvm.vp.ceil.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI22_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -693,10 +693,10 @@ declare <16 x double> @llvm.vp.ceil.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI24_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -743,6 +743,7 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -750,7 +751,6 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v0, 2 ; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll index 5e73e6df9170c..a5a1061842427 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll @@ -1796,6 +1796,7 @@ define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv8r.v v24, v16 ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: lui a2, 209715 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll index 02e99ea513e69..49255320c40a6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll @@ -194,8 +194,8 @@ define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) ; ; ZVFHMIN-LABEL: vp_floor_v8f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v9, v0 ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v9, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 ; ZVFHMIN-NEXT: lui a1, 307200 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -261,10 +261,10 @@ declare <16 x half> @llvm.vp.floor.v16f16(<16 x half>, <16 x i1>, i32) define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -280,8 +280,8 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext % ; ; ZVFHMIN-LABEL: vp_floor_v16f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 ; ZVFHMIN-NEXT: lui a1, 307200 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -431,8 +431,8 @@ declare <8 x float> @llvm.vp.floor.v8f32(<8 x float>, <8 x i1>, i32) define <8 x float> @vp_floor_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -475,8 +475,8 @@ declare <16 x float> @llvm.vp.floor.v16f32(<16 x float>, <16 x i1>, i32) define <16 x float> @vp_floor_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -561,10 +561,10 @@ declare <4 x double> @llvm.vp.floor.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI18_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -605,10 +605,10 @@ declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI20_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -649,10 +649,10 @@ declare <15 x double> @llvm.vp.floor.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI22_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -693,10 +693,10 @@ declare <16 x double> @llvm.vp.floor.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI24_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -743,6 +743,7 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -750,7 +751,6 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v0, 2 ; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll index f43934afc370d..11f92555f56cd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll @@ -13,8 +13,8 @@ declare <2 x half> @llvm.vp.maximum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t ; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0 ; ZVFH-NEXT: vmv1r.v v0, v10 @@ -26,8 +26,8 @@ define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i ; ; ZVFHMIN-LABEL: vfmax_vv_v2f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; ZVFHMIN-NEXT: vmfeq.vv v0, v11, v11, v0.t @@ -83,8 +83,8 @@ declare <4 x half> @llvm.vp.maximum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t ; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0 ; ZVFH-NEXT: vmv1r.v v0, v10 @@ -96,8 +96,8 @@ define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i ; ; ZVFHMIN-LABEL: vfmax_vv_v4f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; ZVFHMIN-NEXT: vmfeq.vv v0, v11, v11, v0.t @@ -153,8 +153,8 @@ declare <8 x half> @llvm.vp.maximum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t ; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0 ; ZVFH-NEXT: vmv1r.v v0, v10 @@ -166,8 +166,8 @@ define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i ; ; ZVFHMIN-LABEL: vfmax_vv_v8f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t @@ -225,8 +225,8 @@ declare <16 x half> @llvm.vp.maximum.v16f16(<16 x half>, <16 x half>, <16 x i1>, define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v12, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v12, v0 ; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t ; ZVFH-NEXT: vmv1r.v v0, v13 ; ZVFH-NEXT: vmerge.vvm v14, v8, v10, v0 @@ -240,8 +240,8 @@ define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> ; ; ZVFHMIN-LABEL: vfmax_vv_v16f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v12, v0 ; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v12, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t @@ -299,8 +299,8 @@ declare <2 x float> @llvm.vp.maximum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i define <2 x float> @vfmax_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t ; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -332,8 +332,8 @@ declare <4 x float> @llvm.vp.maximum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t ; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -365,8 +365,8 @@ declare <8 x float> @llvm.vp.maximum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: vmerge.vvm v14, v8, v10, v0 @@ -400,8 +400,8 @@ declare <16 x float> @llvm.vp.maximum.v16f32(<16 x float>, <16 x float>, <16 x i define <16 x float> @vfmax_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmerge.vvm v20, v8, v12, v0 @@ -435,8 +435,8 @@ declare <2 x double> @llvm.vp.maximum.v2f64(<2 x double>, <2 x double>, <2 x i1> define <2 x double> @vfmax_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t ; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -468,8 +468,8 @@ declare <4 x double> @llvm.vp.maximum.v4f64(<4 x double>, <4 x double>, <4 x i1> define <4 x double> @vfmax_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: vmerge.vvm v14, v8, v10, v0 @@ -503,8 +503,8 @@ declare <8 x double> @llvm.vp.maximum.v8f64(<8 x double>, <8 x double>, <8 x i1> define <8 x double> @vfmax_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmerge.vvm v20, v8, v12, v0 @@ -544,8 +544,8 @@ define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0 @@ -595,6 +595,7 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 ; CHECK-NEXT: slli a1, a1, 5 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 @@ -608,7 +609,6 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v16, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll index 7067cc21ab56d..3fb586b67a21b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll @@ -13,8 +13,8 @@ declare <2 x half> @llvm.vp.minimum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t ; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0 ; ZVFH-NEXT: vmv1r.v v0, v10 @@ -26,8 +26,8 @@ define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i ; ; ZVFHMIN-LABEL: vfmin_vv_v2f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; ZVFHMIN-NEXT: vmfeq.vv v0, v11, v11, v0.t @@ -83,8 +83,8 @@ declare <4 x half> @llvm.vp.minimum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t ; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0 ; ZVFH-NEXT: vmv1r.v v0, v10 @@ -96,8 +96,8 @@ define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i ; ; ZVFHMIN-LABEL: vfmin_vv_v4f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; ZVFHMIN-NEXT: vmfeq.vv v0, v11, v11, v0.t @@ -153,8 +153,8 @@ declare <8 x half> @llvm.vp.minimum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t ; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0 ; ZVFH-NEXT: vmv1r.v v0, v10 @@ -166,8 +166,8 @@ define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i ; ; ZVFHMIN-LABEL: vfmin_vv_v8f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t @@ -225,8 +225,8 @@ declare <16 x half> @llvm.vp.minimum.v16f16(<16 x half>, <16 x half>, <16 x i1>, define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v12, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v12, v0 ; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t ; ZVFH-NEXT: vmv1r.v v0, v13 ; ZVFH-NEXT: vmerge.vvm v14, v8, v10, v0 @@ -240,8 +240,8 @@ define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> ; ; ZVFHMIN-LABEL: vfmin_vv_v16f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v12, v0 ; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v12, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t @@ -299,8 +299,8 @@ declare <2 x float> @llvm.vp.minimum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t ; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -332,8 +332,8 @@ declare <4 x float> @llvm.vp.minimum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t ; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -365,8 +365,8 @@ declare <8 x float> @llvm.vp.minimum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: vmerge.vvm v14, v8, v10, v0 @@ -400,8 +400,8 @@ declare <16 x float> @llvm.vp.minimum.v16f32(<16 x float>, <16 x float>, <16 x i define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmerge.vvm v20, v8, v12, v0 @@ -435,8 +435,8 @@ declare <2 x double> @llvm.vp.minimum.v2f64(<2 x double>, <2 x double>, <2 x i1> define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t ; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -468,8 +468,8 @@ declare <4 x double> @llvm.vp.minimum.v4f64(<4 x double>, <4 x double>, <4 x i1> define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: vmerge.vvm v14, v8, v10, v0 @@ -503,8 +503,8 @@ declare <8 x double> @llvm.vp.minimum.v8f64(<8 x double>, <8 x double>, <8 x i1> define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmerge.vvm v20, v8, v12, v0 @@ -544,8 +544,8 @@ define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: vmerge.vvm v24, v8, v16, v0 @@ -595,6 +595,7 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 ; CHECK-NEXT: slli a1, a1, 5 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 @@ -608,7 +609,6 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v16, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll index 97e458e70565c..99aaecf4c6843 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll @@ -38,8 +38,8 @@ define <4 x float> @interleave_v2f32(<2 x float> %x, <2 x float> %y) { define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) { ; V128-LABEL: interleave_v2f64: ; V128: # %bb.0: -; V128-NEXT: vmv1r.v v12, v9 ; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; V128-NEXT: vmv1r.v v12, v9 ; V128-NEXT: vid.v v9 ; V128-NEXT: vmv.v.i v0, 10 ; V128-NEXT: vsrl.vi v14, v9, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll index e64c7c87132ee..582706e4dfa18 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll @@ -97,9 +97,9 @@ declare <32 x float> @llvm.vp.fptrunc.v32f64.v32f32(<32 x double>, <32 x i1>, i3 define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v32f32_v32f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v12, v0, 2 ; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB7_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll index a68dc11f3d21e..fc7cd94ca3de8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll @@ -712,8 +712,8 @@ define <16 x i64> @fshl_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: vle64.v v24, (a0) ; CHECK-NEXT: li a0, 63 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll index 1fbc8dfd688c4..62e7e3b109902 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -133,6 +133,7 @@ define @insert_nxv8i32_v4i32_0( %vec, <4 x ; ; VLS-LABEL: insert_nxv8i32_v4i32_0: ; VLS: # %bb.0: +; VLS-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; VLS-NEXT: vmv1r.v v8, v9 ; VLS-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.v4i32( %vec, <4 x i32> %subvec, i64 0) @@ -143,6 +144,7 @@ define @insert_nxv8i32_v4i32_0( %vec, <4 x define <4 x i32> @insert_v4i32_v4i32_0(<4 x i32> %vec, <4 x i32> %subvec) { ; CHECK-LABEL: insert_v4i32_v4i32_0: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vector.insert.v4i32.v4i32(<4 x i32> %vec, <4 x i32> %subvec, i64 0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll index a8eb1f97fd1a2..7500198f14002 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll @@ -51,8 +51,8 @@ define <4 x i32> @interleave_v2i32(<2 x i32> %x, <2 x i32> %y) { define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) { ; V128-LABEL: interleave_v2i64: ; V128: # %bb.0: -; V128-NEXT: vmv1r.v v12, v9 ; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; V128-NEXT: vmv1r.v v12, v9 ; V128-NEXT: vid.v v9 ; V128-NEXT: vmv.v.i v0, 10 ; V128-NEXT: vsrl.vi v14, v9, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll index 6cc3f7e76797b..141d54cf585f2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -556,11 +556,13 @@ define <4 x i8> @mgather_truemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) { define <4 x i8> @mgather_falsemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) { ; RV32-LABEL: mgather_falsemask_v4i8: ; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64V-LABEL: mgather_falsemask_v4i8: ; RV64V: # %bb.0: +; RV64V-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64V-NEXT: vmv1r.v v8, v10 ; RV64V-NEXT: ret ; @@ -733,13 +735,13 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB12_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB12_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB12_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB12_15 ; RV64ZVE32F-NEXT: .LBB12_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB12_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB12_16 ; RV64ZVE32F-NEXT: .LBB12_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB12_9 @@ -756,14 +758,31 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB12_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB12_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lbu a2, 0(a2) +; RV64ZVE32F-NEXT: vmv.s.x v10, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-NEXT: .LBB12_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB12_16 -; RV64ZVE32F-NEXT: .LBB12_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB12_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lbu a0, 0(a0) +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-NEXT: .LBB12_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB12_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB12_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lbu a2, 0(a2) @@ -772,7 +791,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8 ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB12_6 -; RV64ZVE32F-NEXT: .LBB12_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB12_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -783,7 +802,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8 ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB12_7 -; RV64ZVE32F-NEXT: .LBB12_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB12_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -793,26 +812,6 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB12_8 ; RV64ZVE32F-NEXT: j .LBB12_9 -; RV64ZVE32F-NEXT: .LBB12_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lbu a2, 0(a2) -; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB12_11 -; RV64ZVE32F-NEXT: .LBB12_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lbu a0, 0(a0) -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-NEXT: ret %ptrs = getelementptr inbounds i8, ptr %base, <8 x i8> %idxs %v = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %ptrs, i32 1, <8 x i1> %m, <8 x i8> %passthru) ret <8 x i8> %v @@ -1253,11 +1252,13 @@ define <4 x i16> @mgather_truemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) { define <4 x i16> @mgather_falsemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) { ; RV32-LABEL: mgather_falsemask_v4i16: ; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64V-LABEL: mgather_falsemask_v4i16: ; RV64V: # %bb.0: +; RV64V-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64V-NEXT: vmv1r.v v8, v10 ; RV64V-NEXT: ret ; @@ -1435,13 +1436,13 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB23_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB23_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB23_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB23_15 ; RV64ZVE32F-NEXT: .LBB23_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB23_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB23_16 ; RV64ZVE32F-NEXT: .LBB23_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB23_9 @@ -1460,14 +1461,35 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB23_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB23_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 1 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lh a2, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v10, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-NEXT: .LBB23_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB23_16 -; RV64ZVE32F-NEXT: .LBB23_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB23_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 1 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lh a0, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-NEXT: .LBB23_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB23_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB23_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -1478,7 +1500,7 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB23_6 -; RV64ZVE32F-NEXT: .LBB23_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB23_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -1491,7 +1513,7 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB23_7 -; RV64ZVE32F-NEXT: .LBB23_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB23_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 @@ -1504,30 +1526,6 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB23_8 ; RV64ZVE32F-NEXT: j .LBB23_9 -; RV64ZVE32F-NEXT: .LBB23_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 1 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB23_11 -; RV64ZVE32F-NEXT: .LBB23_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 1 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-NEXT: ret %ptrs = getelementptr inbounds i16, ptr %base, <8 x i8> %idxs %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru) ret <8 x i16> %v @@ -1587,13 +1585,13 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB24_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB24_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB24_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB24_15 ; RV64ZVE32F-NEXT: .LBB24_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB24_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB24_16 ; RV64ZVE32F-NEXT: .LBB24_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB24_9 @@ -1612,14 +1610,35 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB24_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB24_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 1 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lh a2, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v10, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-NEXT: .LBB24_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB24_16 -; RV64ZVE32F-NEXT: .LBB24_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB24_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 1 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lh a0, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-NEXT: .LBB24_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB24_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB24_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -1630,7 +1649,7 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB24_6 -; RV64ZVE32F-NEXT: .LBB24_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB24_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -1643,7 +1662,7 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB24_7 -; RV64ZVE32F-NEXT: .LBB24_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB24_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 @@ -1656,30 +1675,6 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB24_8 ; RV64ZVE32F-NEXT: j .LBB24_9 -; RV64ZVE32F-NEXT: .LBB24_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 1 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB24_11 -; RV64ZVE32F-NEXT: .LBB24_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 1 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds i16, ptr %base, <8 x i16> %eidxs %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru) @@ -1740,13 +1735,13 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB25_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB25_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB25_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB25_15 ; RV64ZVE32F-NEXT: .LBB25_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB25_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB25_16 ; RV64ZVE32F-NEXT: .LBB25_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB25_9 @@ -1766,14 +1761,37 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB25_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB25_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: andi a2, a2, 255 +; RV64ZVE32F-NEXT: slli a2, a2, 1 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lh a2, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v10, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-NEXT: .LBB25_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB25_16 -; RV64ZVE32F-NEXT: .LBB25_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB25_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: andi a1, a1, 255 +; RV64ZVE32F-NEXT: slli a1, a1, 1 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lh a0, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-NEXT: .LBB25_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB25_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB25_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 @@ -1785,7 +1803,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB25_6 -; RV64ZVE32F-NEXT: .LBB25_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB25_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -1799,7 +1817,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB25_7 -; RV64ZVE32F-NEXT: .LBB25_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB25_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 @@ -1813,32 +1831,6 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB25_8 ; RV64ZVE32F-NEXT: j .LBB25_9 -; RV64ZVE32F-NEXT: .LBB25_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: andi a2, a2, 255 -; RV64ZVE32F-NEXT: slli a2, a2, 1 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB25_11 -; RV64ZVE32F-NEXT: .LBB25_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: andi a1, a1, 255 -; RV64ZVE32F-NEXT: slli a1, a1, 1 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds i16, ptr %base, <8 x i16> %eidxs %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru) @@ -1896,13 +1888,13 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m, ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB26_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB26_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB26_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB26_15 ; RV64ZVE32F-NEXT: .LBB26_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB26_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB26_16 ; RV64ZVE32F-NEXT: .LBB26_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB26_9 @@ -1920,14 +1912,33 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m, ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB26_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB26_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 1 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lh a2, 0(a2) +; RV64ZVE32F-NEXT: vmv.s.x v10, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-NEXT: .LBB26_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB26_16 -; RV64ZVE32F-NEXT: .LBB26_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB26_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 1 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lh a0, 0(a0) +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-NEXT: .LBB26_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB26_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB26_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -1937,7 +1948,7 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m, ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB26_6 -; RV64ZVE32F-NEXT: .LBB26_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB26_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -1949,7 +1960,7 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m, ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB26_7 -; RV64ZVE32F-NEXT: .LBB26_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB26_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 @@ -1960,28 +1971,6 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m, ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB26_8 ; RV64ZVE32F-NEXT: j .LBB26_9 -; RV64ZVE32F-NEXT: .LBB26_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 1 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB26_11 -; RV64ZVE32F-NEXT: .LBB26_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 1 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-NEXT: ret %ptrs = getelementptr inbounds i16, ptr %base, <8 x i16> %idxs %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru) ret <8 x i16> %v @@ -2311,11 +2300,13 @@ define <4 x i32> @mgather_truemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) { define <4 x i32> @mgather_falsemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) { ; RV32-LABEL: mgather_falsemask_v4i32: ; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64V-LABEL: mgather_falsemask_v4i32: ; RV64V: # %bb.0: +; RV64V-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64V-NEXT: vmv1r.v v8, v10 ; RV64V-NEXT: ret ; @@ -2492,13 +2483,13 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB35_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB35_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB35_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB35_15 ; RV64ZVE32F-NEXT: .LBB35_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB35_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB35_16 ; RV64ZVE32F-NEXT: .LBB35_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_9 @@ -2517,14 +2508,35 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB35_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB35_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 2 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lw a2, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB35_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB35_16 -; RV64ZVE32F-NEXT: .LBB35_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB35_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lw a0, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB35_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB35_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB35_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -2535,7 +2547,7 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_6 -; RV64ZVE32F-NEXT: .LBB35_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB35_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -2548,7 +2560,7 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_7 -; RV64ZVE32F-NEXT: .LBB35_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB35_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 @@ -2561,30 +2573,6 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB35_8 ; RV64ZVE32F-NEXT: j .LBB35_9 -; RV64ZVE32F-NEXT: .LBB35_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 2 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB35_11 -; RV64ZVE32F-NEXT: .LBB35_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret %ptrs = getelementptr inbounds i32, ptr %base, <8 x i8> %idxs %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) ret <8 x i32> %v @@ -2643,13 +2631,13 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB36_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB36_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB36_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB36_15 ; RV64ZVE32F-NEXT: .LBB36_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB36_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB36_16 ; RV64ZVE32F-NEXT: .LBB36_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB36_9 @@ -2668,14 +2656,35 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB36_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB36_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 2 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lw a2, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB36_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB36_16 -; RV64ZVE32F-NEXT: .LBB36_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB36_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lw a0, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB36_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB36_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB36_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -2686,7 +2695,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB36_6 -; RV64ZVE32F-NEXT: .LBB36_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB36_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -2699,7 +2708,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB36_7 -; RV64ZVE32F-NEXT: .LBB36_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB36_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 @@ -2712,30 +2721,6 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB36_8 ; RV64ZVE32F-NEXT: j .LBB36_9 -; RV64ZVE32F-NEXT: .LBB36_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 2 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB36_11 -; RV64ZVE32F-NEXT: .LBB36_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %eidxs %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) @@ -2798,13 +2783,13 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB37_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB37_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB37_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB37_15 ; RV64ZVE32F-NEXT: .LBB37_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB37_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB37_16 ; RV64ZVE32F-NEXT: .LBB37_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB37_9 @@ -2824,14 +2809,37 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB37_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB37_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: andi a2, a2, 255 +; RV64ZVE32F-NEXT: slli a2, a2, 2 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lw a2, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB37_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB37_16 -; RV64ZVE32F-NEXT: .LBB37_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB37_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: andi a1, a1, 255 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lw a0, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB37_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB37_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB37_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 @@ -2843,7 +2851,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB37_6 -; RV64ZVE32F-NEXT: .LBB37_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB37_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -2857,7 +2865,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB37_7 -; RV64ZVE32F-NEXT: .LBB37_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB37_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: andi a2, a2, 255 @@ -2871,32 +2879,6 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB37_8 ; RV64ZVE32F-NEXT: j .LBB37_9 -; RV64ZVE32F-NEXT: .LBB37_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: andi a2, a2, 255 -; RV64ZVE32F-NEXT: slli a2, a2, 2 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB37_11 -; RV64ZVE32F-NEXT: .LBB37_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: andi a1, a1, 255 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %eidxs %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) @@ -2957,13 +2939,13 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB38_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB38_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB38_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB38_15 ; RV64ZVE32F-NEXT: .LBB38_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB38_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB38_16 ; RV64ZVE32F-NEXT: .LBB38_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB38_9 @@ -2982,14 +2964,35 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB38_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB38_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 2 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lw a2, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB38_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB38_16 -; RV64ZVE32F-NEXT: .LBB38_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB38_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lw a0, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB38_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB38_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB38_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -3000,7 +3003,7 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB38_6 -; RV64ZVE32F-NEXT: .LBB38_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB38_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -3013,7 +3016,7 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB38_7 -; RV64ZVE32F-NEXT: .LBB38_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB38_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 @@ -3026,30 +3029,6 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB38_8 ; RV64ZVE32F-NEXT: j .LBB38_9 -; RV64ZVE32F-NEXT: .LBB38_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 2 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB38_11 -; RV64ZVE32F-NEXT: .LBB38_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret %ptrs = getelementptr inbounds i32, ptr %base, <8 x i16> %idxs %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) ret <8 x i32> %v @@ -3109,13 +3088,13 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB39_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB39_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB39_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB39_15 ; RV64ZVE32F-NEXT: .LBB39_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB39_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB39_16 ; RV64ZVE32F-NEXT: .LBB39_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB39_9 @@ -3134,14 +3113,35 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB39_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB39_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 2 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lw a2, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB39_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB39_16 -; RV64ZVE32F-NEXT: .LBB39_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB39_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lw a0, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB39_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB39_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB39_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -3152,7 +3152,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB39_6 -; RV64ZVE32F-NEXT: .LBB39_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB39_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -3165,7 +3165,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB39_7 -; RV64ZVE32F-NEXT: .LBB39_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB39_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 @@ -3178,30 +3178,6 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB39_8 ; RV64ZVE32F-NEXT: j .LBB39_9 -; RV64ZVE32F-NEXT: .LBB39_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 2 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB39_11 -; RV64ZVE32F-NEXT: .LBB39_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %eidxs %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) @@ -3265,13 +3241,13 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a3, .LBB40_12 +; RV64ZVE32F-NEXT: bnez a3, .LBB40_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a3, a2, 8 -; RV64ZVE32F-NEXT: bnez a3, .LBB40_13 +; RV64ZVE32F-NEXT: bnez a3, .LBB40_15 ; RV64ZVE32F-NEXT: .LBB40_6: # %else8 ; RV64ZVE32F-NEXT: andi a3, a2, 16 -; RV64ZVE32F-NEXT: bnez a3, .LBB40_14 +; RV64ZVE32F-NEXT: bnez a3, .LBB40_16 ; RV64ZVE32F-NEXT: .LBB40_7: # %else11 ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB40_9 @@ -3291,14 +3267,37 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2 -; RV64ZVE32F-NEXT: bnez a3, .LBB40_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a3, .LBB40_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a3, v8 +; RV64ZVE32F-NEXT: and a3, a3, a1 +; RV64ZVE32F-NEXT: slli a3, a3, 2 +; RV64ZVE32F-NEXT: add a3, a0, a3 +; RV64ZVE32F-NEXT: lw a3, 0(a3) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v12, a3 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB40_11: # %else17 ; RV64ZVE32F-NEXT: andi a2, a2, -128 -; RV64ZVE32F-NEXT: bnez a2, .LBB40_16 -; RV64ZVE32F-NEXT: .LBB40_11: # %else20 +; RV64ZVE32F-NEXT: beqz a2, .LBB40_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: and a1, a2, a1 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lw a0, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB40_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB40_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB40_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 @@ -3310,7 +3309,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: beqz a3, .LBB40_6 -; RV64ZVE32F-NEXT: .LBB40_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB40_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 @@ -3324,7 +3323,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB40_7 -; RV64ZVE32F-NEXT: .LBB40_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB40_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: and a3, a3, a1 @@ -3338,32 +3337,6 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: bnez a3, .LBB40_8 ; RV64ZVE32F-NEXT: j .LBB40_9 -; RV64ZVE32F-NEXT: .LBB40_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a3, v8 -; RV64ZVE32F-NEXT: and a3, a3, a1 -; RV64ZVE32F-NEXT: slli a3, a3, 2 -; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v12, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a2, a2, -128 -; RV64ZVE32F-NEXT: beqz a2, .LBB40_11 -; RV64ZVE32F-NEXT: .LBB40_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: and a1, a2, a1 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %eidxs %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) @@ -3420,13 +3393,13 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m, ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB41_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB41_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB41_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB41_15 ; RV64ZVE32F-NEXT: .LBB41_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB41_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB41_16 ; RV64ZVE32F-NEXT: .LBB41_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB41_9 @@ -3444,14 +3417,33 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m, ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v12, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB41_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB41_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 2 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lw a2, 0(a2) +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB41_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB41_16 -; RV64ZVE32F-NEXT: .LBB41_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB41_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lw a0, 0(a0) +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB41_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB41_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB41_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -3461,7 +3453,7 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m, ; RV64ZVE32F-NEXT: vslideup.vi v10, v9, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB41_6 -; RV64ZVE32F-NEXT: .LBB41_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB41_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -3472,7 +3464,7 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m, ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB41_7 -; RV64ZVE32F-NEXT: .LBB41_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB41_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: slli a2, a2, 2 @@ -3483,28 +3475,6 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m, ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB41_8 ; RV64ZVE32F-NEXT: j .LBB41_9 -; RV64ZVE32F-NEXT: .LBB41_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 2 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB41_11 -; RV64ZVE32F-NEXT: .LBB41_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret %ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %idxs %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) ret <8 x i32> %v @@ -3822,11 +3792,13 @@ define <4 x i64> @mgather_truemask_v4i64(<4 x ptr> %ptrs, <4 x i64> %passthru) { define <4 x i64> @mgather_falsemask_v4i64(<4 x ptr> %ptrs, <4 x i64> %passthru) { ; RV32V-LABEL: mgather_falsemask_v4i64: ; RV32V: # %bb.0: +; RV32V-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32V-NEXT: vmv2r.v v8, v10 ; RV32V-NEXT: ret ; ; RV64V-LABEL: mgather_falsemask_v4i64: ; RV64V: # %bb.0: +; RV64V-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64V-NEXT: vmv2r.v v8, v10 ; RV64V-NEXT: ret ; @@ -7113,11 +7085,13 @@ define <4 x bfloat> @mgather_truemask_v4bf16(<4 x ptr> %ptrs, <4 x bfloat> %pass define <4 x bfloat> @mgather_falsemask_v4bf16(<4 x ptr> %ptrs, <4 x bfloat> %passthru) { ; RV32-LABEL: mgather_falsemask_v4bf16: ; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64V-LABEL: mgather_falsemask_v4bf16: ; RV64V: # %bb.0: +; RV64V-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64V-NEXT: vmv1r.v v8, v10 ; RV64V-NEXT: ret ; @@ -7295,13 +7269,13 @@ define <8 x bfloat> @mgather_baseidx_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, <8 x ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB64_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB64_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB64_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB64_15 ; RV64ZVE32F-NEXT: .LBB64_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB64_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB64_16 ; RV64ZVE32F-NEXT: .LBB64_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB64_9 @@ -7320,14 +7294,35 @@ define <8 x bfloat> @mgather_baseidx_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, <8 x ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB64_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB64_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 1 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lh a2, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v10, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-NEXT: .LBB64_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB64_16 -; RV64ZVE32F-NEXT: .LBB64_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB64_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 1 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lh a0, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-NEXT: .LBB64_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB64_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB64_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -7338,7 +7333,7 @@ define <8 x bfloat> @mgather_baseidx_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, <8 x ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB64_6 -; RV64ZVE32F-NEXT: .LBB64_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB64_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -7351,7 +7346,7 @@ define <8 x bfloat> @mgather_baseidx_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, <8 x ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB64_7 -; RV64ZVE32F-NEXT: .LBB64_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB64_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 @@ -7364,30 +7359,6 @@ define <8 x bfloat> @mgather_baseidx_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, <8 x ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB64_8 ; RV64ZVE32F-NEXT: j .LBB64_9 -; RV64ZVE32F-NEXT: .LBB64_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 1 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB64_11 -; RV64ZVE32F-NEXT: .LBB64_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 1 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-NEXT: ret %ptrs = getelementptr inbounds bfloat, ptr %base, <8 x i8> %idxs %v = call <8 x bfloat> @llvm.masked.gather.v8bf16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x bfloat> %passthru) ret <8 x bfloat> %v @@ -7447,13 +7418,13 @@ define <8 x bfloat> @mgather_baseidx_sext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB65_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB65_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB65_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB65_15 ; RV64ZVE32F-NEXT: .LBB65_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB65_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB65_16 ; RV64ZVE32F-NEXT: .LBB65_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB65_9 @@ -7472,14 +7443,35 @@ define <8 x bfloat> @mgather_baseidx_sext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB65_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB65_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 1 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lh a2, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v10, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-NEXT: .LBB65_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB65_16 -; RV64ZVE32F-NEXT: .LBB65_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB65_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 1 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lh a0, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-NEXT: .LBB65_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB65_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB65_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -7490,7 +7482,7 @@ define <8 x bfloat> @mgather_baseidx_sext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB65_6 -; RV64ZVE32F-NEXT: .LBB65_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB65_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -7503,7 +7495,7 @@ define <8 x bfloat> @mgather_baseidx_sext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB65_7 -; RV64ZVE32F-NEXT: .LBB65_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB65_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 @@ -7516,30 +7508,6 @@ define <8 x bfloat> @mgather_baseidx_sext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB65_8 ; RV64ZVE32F-NEXT: j .LBB65_9 -; RV64ZVE32F-NEXT: .LBB65_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 1 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB65_11 -; RV64ZVE32F-NEXT: .LBB65_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 1 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds bfloat, ptr %base, <8 x i16> %eidxs %v = call <8 x bfloat> @llvm.masked.gather.v8bf16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x bfloat> %passthru) @@ -7600,13 +7568,13 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB66_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB66_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB66_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB66_15 ; RV64ZVE32F-NEXT: .LBB66_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB66_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB66_16 ; RV64ZVE32F-NEXT: .LBB66_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB66_9 @@ -7626,14 +7594,37 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB66_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB66_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: andi a2, a2, 255 +; RV64ZVE32F-NEXT: slli a2, a2, 1 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lh a2, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v10, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-NEXT: .LBB66_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB66_16 -; RV64ZVE32F-NEXT: .LBB66_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB66_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: andi a1, a1, 255 +; RV64ZVE32F-NEXT: slli a1, a1, 1 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lh a0, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-NEXT: .LBB66_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB66_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB66_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 @@ -7645,7 +7636,7 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB66_6 -; RV64ZVE32F-NEXT: .LBB66_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB66_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -7658,47 +7649,21 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, ; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: beqz a2, .LBB66_7 -; RV64ZVE32F-NEXT: .LBB66_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.x.s a2, v10 -; RV64ZVE32F-NEXT: andi a2, a2, 255 -; RV64ZVE32F-NEXT: slli a2, a2, 1 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 4 -; RV64ZVE32F-NEXT: andi a2, a1, 32 -; RV64ZVE32F-NEXT: bnez a2, .LBB66_8 -; RV64ZVE32F-NEXT: j .LBB66_9 -; RV64ZVE32F-NEXT: .LBB66_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: andi a2, a2, 255 -; RV64ZVE32F-NEXT: slli a2, a2, 1 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB66_11 -; RV64ZVE32F-NEXT: .LBB66_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: andi a1, a1, 255 -; RV64ZVE32F-NEXT: slli a1, a1, 1 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-NEXT: ret +; RV64ZVE32F-NEXT: beqz a2, .LBB66_7 +; RV64ZVE32F-NEXT: .LBB66_16: # %cond.load10 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.x.s a2, v10 +; RV64ZVE32F-NEXT: andi a2, a2, 255 +; RV64ZVE32F-NEXT: slli a2, a2, 1 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lh a2, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; RV64ZVE32F-NEXT: vmv.s.x v8, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 4 +; RV64ZVE32F-NEXT: andi a2, a1, 32 +; RV64ZVE32F-NEXT: bnez a2, .LBB66_8 +; RV64ZVE32F-NEXT: j .LBB66_9 %eidxs = zext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds bfloat, ptr %base, <8 x i16> %eidxs %v = call <8 x bfloat> @llvm.masked.gather.v8bf16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x bfloat> %passthru) @@ -7756,13 +7721,13 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB67_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB67_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB67_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB67_15 ; RV64ZVE32F-NEXT: .LBB67_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB67_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB67_16 ; RV64ZVE32F-NEXT: .LBB67_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB67_9 @@ -7780,14 +7745,33 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB67_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB67_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 1 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: lh a2, 0(a2) +; RV64ZVE32F-NEXT: vmv.s.x v10, a2 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-NEXT: .LBB67_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB67_16 -; RV64ZVE32F-NEXT: .LBB67_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB67_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 1 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: lh a0, 0(a0) +; RV64ZVE32F-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-NEXT: .LBB67_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB67_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB67_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -7797,7 +7781,7 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB67_6 -; RV64ZVE32F-NEXT: .LBB67_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB67_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -7809,7 +7793,7 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB67_7 -; RV64ZVE32F-NEXT: .LBB67_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB67_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 @@ -7820,28 +7804,6 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB67_8 ; RV64ZVE32F-NEXT: j .LBB67_9 -; RV64ZVE32F-NEXT: .LBB67_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 1 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB67_11 -; RV64ZVE32F-NEXT: .LBB67_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 1 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-NEXT: ret %ptrs = getelementptr inbounds bfloat, ptr %base, <8 x i16> %idxs %v = call <8 x bfloat> @llvm.masked.gather.v8bf16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x bfloat> %passthru) ret <8 x bfloat> %v @@ -8135,11 +8097,13 @@ define <4 x half> @mgather_truemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru) define <4 x half> @mgather_falsemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru) { ; RV32-LABEL: mgather_falsemask_v4f16: ; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64V-LABEL: mgather_falsemask_v4f16: ; RV64V: # %bb.0: +; RV64V-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64V-NEXT: vmv1r.v v8, v10 ; RV64V-NEXT: ret ; @@ -8410,13 +8374,13 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 4 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_12 +; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_14 ; RV64ZVE32F-ZVFH-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_13 +; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_15 ; RV64ZVE32F-ZVFH-NEXT: .LBB74_6: # %else8 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_14 +; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_16 ; RV64ZVE32F-ZVFH-NEXT: .LBB74_7: # %else11 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB74_9 @@ -8435,14 +8399,35 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 64 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_15 -; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB74_11 +; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 +; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2 +; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2) +; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-ZVFH-NEXT: .LBB74_11: # %else17 ; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFH-NEXT: bnez a1, .LBB74_16 -; RV64ZVE32F-ZVFH-NEXT: .LBB74_11: # %else20 +; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB74_13 +; RV64ZVE32F-ZVFH-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1 +; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1 +; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0) +; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-ZVFH-NEXT: .LBB74_13: # %else20 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-ZVFH-NEXT: ret -; RV64ZVE32F-ZVFH-NEXT: .LBB74_12: # %cond.load4 +; RV64ZVE32F-ZVFH-NEXT: .LBB74_14: # %cond.load4 ; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 ; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2 @@ -8453,7 +8438,7 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1 ; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8 ; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB74_6 -; RV64ZVE32F-ZVFH-NEXT: .LBB74_13: # %cond.load7 +; RV64ZVE32F-ZVFH-NEXT: .LBB74_15: # %cond.load7 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 @@ -8466,7 +8451,7 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1 ; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16 ; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB74_7 -; RV64ZVE32F-ZVFH-NEXT: .LBB74_14: # %cond.load10 +; RV64ZVE32F-ZVFH-NEXT: .LBB74_16: # %cond.load10 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 @@ -8479,30 +8464,6 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_8 ; RV64ZVE32F-ZVFH-NEXT: j .LBB74_9 -; RV64ZVE32F-ZVFH-NEXT: .LBB74_15: # %cond.load16 -; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 -; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2 -; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2) -; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5 -; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB74_11 -; RV64ZVE32F-ZVFH-NEXT: .LBB74_16: # %cond.load19 -; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1 -; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1 -; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0) -; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5 -; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-ZVFH-NEXT: ret ; ; RV64ZVE32F-ZVFHMIN-LABEL: mgather_baseidx_v8i8_v8f16: ; RV64ZVE32F-ZVFHMIN: # %bb.0: @@ -8537,13 +8498,13 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 4 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_12 +; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_14 ; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_13 +; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_15 ; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_6: # %else8 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_14 +; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_16 ; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_7: # %else11 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB74_9 @@ -8562,14 +8523,35 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 64 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_15 -; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB74_11 +; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 +; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2 +; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2) +; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_11: # %else17 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a1, .LBB74_16 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_11: # %else20 +; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB74_13 +; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1 +; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1 +; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0) +; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_13: # %else20 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-ZVFHMIN-NEXT: ret -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_12: # %cond.load4 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_14: # %cond.load4 ; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 ; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2 @@ -8580,7 +8562,7 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1 ; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8 ; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB74_6 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_13: # %cond.load7 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_15: # %cond.load7 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 @@ -8593,7 +8575,7 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1 ; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16 ; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB74_7 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_14: # %cond.load10 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_16: # %cond.load10 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 @@ -8606,30 +8588,6 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_8 ; RV64ZVE32F-ZVFHMIN-NEXT: j .LBB74_9 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_15: # %cond.load16 -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 -; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2 -; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2) -; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB74_11 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_16: # %cond.load19 -; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1 -; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1 -; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0) -; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-ZVFHMIN-NEXT: ret %ptrs = getelementptr inbounds half, ptr %base, <8 x i8> %idxs %v = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru) ret <8 x half> %v @@ -8689,13 +8647,13 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 4 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_12 +; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_14 ; RV64ZVE32F-ZVFH-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_13 +; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_15 ; RV64ZVE32F-ZVFH-NEXT: .LBB75_6: # %else8 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_14 +; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_16 ; RV64ZVE32F-ZVFH-NEXT: .LBB75_7: # %else11 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB75_9 @@ -8714,14 +8672,35 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 64 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_15 -; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB75_11 +; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 +; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2 +; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2) +; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-ZVFH-NEXT: .LBB75_11: # %else17 ; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFH-NEXT: bnez a1, .LBB75_16 -; RV64ZVE32F-ZVFH-NEXT: .LBB75_11: # %else20 +; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB75_13 +; RV64ZVE32F-ZVFH-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1 +; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1 +; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0) +; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-ZVFH-NEXT: .LBB75_13: # %else20 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-ZVFH-NEXT: ret -; RV64ZVE32F-ZVFH-NEXT: .LBB75_12: # %cond.load4 +; RV64ZVE32F-ZVFH-NEXT: .LBB75_14: # %cond.load4 ; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 ; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2 @@ -8732,7 +8711,7 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8 ; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB75_6 -; RV64ZVE32F-ZVFH-NEXT: .LBB75_13: # %cond.load7 +; RV64ZVE32F-ZVFH-NEXT: .LBB75_15: # %cond.load7 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 @@ -8745,7 +8724,7 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16 ; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB75_7 -; RV64ZVE32F-ZVFH-NEXT: .LBB75_14: # %cond.load10 +; RV64ZVE32F-ZVFH-NEXT: .LBB75_16: # %cond.load10 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 @@ -8758,30 +8737,6 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_8 ; RV64ZVE32F-ZVFH-NEXT: j .LBB75_9 -; RV64ZVE32F-ZVFH-NEXT: .LBB75_15: # %cond.load16 -; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 -; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2 -; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2) -; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5 -; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB75_11 -; RV64ZVE32F-ZVFH-NEXT: .LBB75_16: # %cond.load19 -; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1 -; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1 -; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0) -; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5 -; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-ZVFH-NEXT: ret ; ; RV64ZVE32F-ZVFHMIN-LABEL: mgather_baseidx_sext_v8i8_v8f16: ; RV64ZVE32F-ZVFHMIN: # %bb.0: @@ -8816,13 +8771,13 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 4 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_12 +; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_14 ; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_13 +; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_15 ; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_6: # %else8 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_14 +; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_16 ; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_7: # %else11 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB75_9 @@ -8841,14 +8796,35 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 64 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_15 -; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB75_11 +; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 +; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2 +; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2) +; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_11: # %else17 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a1, .LBB75_16 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_11: # %else20 +; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB75_13 +; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1 +; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1 +; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0) +; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_13: # %else20 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-ZVFHMIN-NEXT: ret -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_12: # %cond.load4 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_14: # %cond.load4 ; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 ; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2 @@ -8859,7 +8835,7 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8 ; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB75_6 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_13: # %cond.load7 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_15: # %cond.load7 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 @@ -8872,7 +8848,7 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16 ; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB75_7 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_14: # %cond.load10 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_16: # %cond.load10 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 @@ -8885,30 +8861,6 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_8 ; RV64ZVE32F-ZVFHMIN-NEXT: j .LBB75_9 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_15: # %cond.load16 -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 -; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2 -; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2) -; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB75_11 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_16: # %cond.load19 -; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1 -; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1 -; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0) -; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-ZVFHMIN-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds half, ptr %base, <8 x i16> %eidxs %v = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru) @@ -8969,13 +8921,13 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 4 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_12 +; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_14 ; RV64ZVE32F-ZVFH-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_13 +; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_15 ; RV64ZVE32F-ZVFH-NEXT: .LBB76_6: # %else8 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_14 +; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_16 ; RV64ZVE32F-ZVFH-NEXT: .LBB76_7: # %else11 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB76_9 @@ -8995,14 +8947,37 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 64 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_15 -; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB76_11 +; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255 +; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 +; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2 +; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2) +; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-ZVFH-NEXT: .LBB76_11: # %else17 ; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFH-NEXT: bnez a1, .LBB76_16 -; RV64ZVE32F-ZVFH-NEXT: .LBB76_11: # %else20 +; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB76_13 +; RV64ZVE32F-ZVFH-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, 255 +; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1 +; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1 +; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0) +; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-ZVFH-NEXT: .LBB76_13: # %else20 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-ZVFH-NEXT: ret -; RV64ZVE32F-ZVFH-NEXT: .LBB76_12: # %cond.load4 +; RV64ZVE32F-ZVFH-NEXT: .LBB76_14: # %cond.load4 ; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255 ; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 @@ -9014,7 +8989,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8 ; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB76_6 -; RV64ZVE32F-ZVFH-NEXT: .LBB76_13: # %cond.load7 +; RV64ZVE32F-ZVFH-NEXT: .LBB76_15: # %cond.load7 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 @@ -9028,7 +9003,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16 ; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB76_7 -; RV64ZVE32F-ZVFH-NEXT: .LBB76_14: # %cond.load10 +; RV64ZVE32F-ZVFH-NEXT: .LBB76_16: # %cond.load10 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255 @@ -9042,32 +9017,6 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_8 ; RV64ZVE32F-ZVFH-NEXT: j .LBB76_9 -; RV64ZVE32F-ZVFH-NEXT: .LBB76_15: # %cond.load16 -; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255 -; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 -; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2 -; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2) -; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5 -; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB76_11 -; RV64ZVE32F-ZVFH-NEXT: .LBB76_16: # %cond.load19 -; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, 255 -; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1 -; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1 -; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0) -; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5 -; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-ZVFH-NEXT: ret ; ; RV64ZVE32F-ZVFHMIN-LABEL: mgather_baseidx_zext_v8i8_v8f16: ; RV64ZVE32F-ZVFHMIN: # %bb.0: @@ -9104,13 +9053,13 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 4 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_12 +; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_14 ; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_13 +; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_15 ; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_6: # %else8 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_14 +; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_16 ; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_7: # %else11 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB76_9 @@ -9130,14 +9079,37 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 64 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_15 -; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB76_11 +; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255 +; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 +; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2 +; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2) +; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_11: # %else17 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a1, .LBB76_16 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_11: # %else20 +; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB76_13 +; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, 255 +; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1 +; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1 +; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0) +; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_13: # %else20 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-ZVFHMIN-NEXT: ret -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_12: # %cond.load4 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_14: # %cond.load4 ; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255 ; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 @@ -9149,7 +9121,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8 ; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB76_6 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_13: # %cond.load7 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_15: # %cond.load7 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 @@ -9163,7 +9135,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16 ; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB76_7 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_14: # %cond.load10 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_16: # %cond.load10 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255 @@ -9177,32 +9149,6 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_8 ; RV64ZVE32F-ZVFHMIN-NEXT: j .LBB76_9 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_15: # %cond.load16 -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255 -; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 -; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2 -; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2) -; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB76_11 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_16: # %cond.load19 -; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, 255 -; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1 -; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1 -; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0) -; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-ZVFHMIN-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds half, ptr %base, <8 x i16> %eidxs %v = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru) @@ -9260,13 +9206,13 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 4 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_12 +; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_14 ; RV64ZVE32F-ZVFH-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_13 +; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_15 ; RV64ZVE32F-ZVFH-NEXT: .LBB77_6: # %else8 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_14 +; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_16 ; RV64ZVE32F-ZVFH-NEXT: .LBB77_7: # %else11 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB77_9 @@ -9284,14 +9230,33 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 64 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_15 -; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB77_11 +; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 +; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2 +; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2) +; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-ZVFH-NEXT: .LBB77_11: # %else17 ; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFH-NEXT: bnez a1, .LBB77_16 -; RV64ZVE32F-ZVFH-NEXT: .LBB77_11: # %else20 +; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB77_13 +; RV64ZVE32F-ZVFH-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1 +; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1 +; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0) +; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-ZVFH-NEXT: .LBB77_13: # %else20 +; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-ZVFH-NEXT: ret -; RV64ZVE32F-ZVFH-NEXT: .LBB77_12: # %cond.load4 +; RV64ZVE32F-ZVFH-NEXT: .LBB77_14: # %cond.load4 ; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 ; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2 @@ -9301,7 +9266,7 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8 ; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB77_6 -; RV64ZVE32F-ZVFH-NEXT: .LBB77_13: # %cond.load7 +; RV64ZVE32F-ZVFH-NEXT: .LBB77_15: # %cond.load7 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 @@ -9313,7 +9278,7 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16 ; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB77_7 -; RV64ZVE32F-ZVFH-NEXT: .LBB77_14: # %cond.load10 +; RV64ZVE32F-ZVFH-NEXT: .LBB77_16: # %cond.load10 ; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 @@ -9324,28 +9289,6 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_8 ; RV64ZVE32F-ZVFH-NEXT: j .LBB77_9 -; RV64ZVE32F-ZVFH-NEXT: .LBB77_15: # %cond.load16 -; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1 -; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2 -; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2) -; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5 -; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB77_11 -; RV64ZVE32F-ZVFH-NEXT: .LBB77_16: # %cond.load19 -; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1 -; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1 -; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0) -; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5 -; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-ZVFH-NEXT: ret ; ; RV64ZVE32F-ZVFHMIN-LABEL: mgather_baseidx_v8f16: ; RV64ZVE32F-ZVFHMIN: # %bb.0: @@ -9379,13 +9322,13 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 4 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_12 +; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_14 ; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_13 +; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_15 ; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_6: # %else8 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_14 +; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_16 ; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_7: # %else11 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB77_9 @@ -9403,14 +9346,33 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 64 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v10, 2 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_15 -; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB77_11 +; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 +; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2 +; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2) +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_11: # %else17 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFHMIN-NEXT: bnez a1, .LBB77_16 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_11: # %else20 +; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB77_13 +; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1 +; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1 +; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0) +; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_13: # %else20 +; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-ZVFHMIN-NEXT: ret -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_12: # %cond.load4 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_14: # %cond.load4 ; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 ; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2 @@ -9420,7 +9382,7 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8 ; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB77_6 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_13: # %cond.load7 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_15: # %cond.load7 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 @@ -9432,7 +9394,7 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 3 ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16 ; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB77_7 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_14: # %cond.load10 +; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_16: # %cond.load10 ; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 @@ -9443,28 +9405,6 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32 ; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_8 ; RV64ZVE32F-ZVFHMIN-NEXT: j .LBB77_9 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_15: # %cond.load16 -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1 -; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2 -; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2) -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6 -; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128 -; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB77_11 -; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_16: # %cond.load19 -; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1 -; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1 -; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0) -; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7 -; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9 -; RV64ZVE32F-ZVFHMIN-NEXT: ret %ptrs = getelementptr inbounds half, ptr %base, <8 x i16> %idxs %v = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru) ret <8 x half> %v @@ -9666,11 +9606,13 @@ define <4 x float> @mgather_truemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passthr define <4 x float> @mgather_falsemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passthru) { ; RV32-LABEL: mgather_falsemask_v4f32: ; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64V-LABEL: mgather_falsemask_v4f32: ; RV64V: # %bb.0: +; RV64V-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64V-NEXT: vmv1r.v v8, v10 ; RV64V-NEXT: ret ; @@ -9847,13 +9789,13 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB84_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB84_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB84_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB84_15 ; RV64ZVE32F-NEXT: .LBB84_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB84_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB84_16 ; RV64ZVE32F-NEXT: .LBB84_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB84_9 @@ -9872,14 +9814,35 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB84_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB84_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 2 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: flw fa5, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB84_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB84_16 -; RV64ZVE32F-NEXT: .LBB84_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB84_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: flw fa5, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB84_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB84_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB84_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -9890,7 +9853,7 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB84_6 -; RV64ZVE32F-NEXT: .LBB84_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB84_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -9903,7 +9866,7 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB84_7 -; RV64ZVE32F-NEXT: .LBB84_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB84_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 @@ -9916,30 +9879,6 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB84_8 ; RV64ZVE32F-NEXT: j .LBB84_9 -; RV64ZVE32F-NEXT: .LBB84_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 2 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: flw fa5, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB84_11 -; RV64ZVE32F-NEXT: .LBB84_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: flw fa5, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret %ptrs = getelementptr inbounds float, ptr %base, <8 x i8> %idxs %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) ret <8 x float> %v @@ -9998,13 +9937,13 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, < ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB85_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB85_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB85_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB85_15 ; RV64ZVE32F-NEXT: .LBB85_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB85_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB85_16 ; RV64ZVE32F-NEXT: .LBB85_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB85_9 @@ -10023,74 +9962,71 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, < ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB85_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB85_16 -; RV64ZVE32F-NEXT: .LBB85_11: # %else20 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB85_12: # %cond.load4 +; RV64ZVE32F-NEXT: beqz a2, .LBB85_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw fa5, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 -; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: beqz a2, .LBB85_6 -; RV64ZVE32F-NEXT: .LBB85_13: # %cond.load7 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB85_11: # %else17 +; RV64ZVE32F-NEXT: andi a1, a1, -128 +; RV64ZVE32F-NEXT: beqz a1, .LBB85_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 2 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: flw fa5, 0(a2) +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: flw fa5, 0(a0) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 -; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: beqz a2, .LBB85_7 -; RV64ZVE32F-NEXT: .LBB85_14: # %cond.load10 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB85_13: # %else20 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.x.s a2, v9 -; RV64ZVE32F-NEXT: slli a2, a2, 2 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: flw fa5, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 -; RV64ZVE32F-NEXT: andi a2, a1, 32 -; RV64ZVE32F-NEXT: bnez a2, .LBB85_8 -; RV64ZVE32F-NEXT: j .LBB85_9 -; RV64ZVE32F-NEXT: .LBB85_15: # %cond.load16 +; RV64ZVE32F-NEXT: vmv2r.v v8, v10 +; RV64ZVE32F-NEXT: ret +; RV64ZVE32F-NEXT: .LBB85_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw fa5, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB85_11 -; RV64ZVE32F-NEXT: .LBB85_16: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 +; RV64ZVE32F-NEXT: andi a2, a1, 8 +; RV64ZVE32F-NEXT: beqz a2, .LBB85_6 +; RV64ZVE32F-NEXT: .LBB85_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: flw fa5, 0(a0) +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 2 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: flw fa5, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 +; RV64ZVE32F-NEXT: andi a2, a1, 16 +; RV64ZVE32F-NEXT: beqz a2, .LBB85_7 +; RV64ZVE32F-NEXT: .LBB85_16: # %cond.load10 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; RV64ZVE32F-NEXT: vmv.x.s a2, v9 +; RV64ZVE32F-NEXT: slli a2, a2, 2 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: flw fa5, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 +; RV64ZVE32F-NEXT: andi a2, a1, 32 +; RV64ZVE32F-NEXT: bnez a2, .LBB85_8 +; RV64ZVE32F-NEXT: j .LBB85_9 %eidxs = sext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, ptr %base, <8 x i32> %eidxs %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) @@ -10153,13 +10089,13 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, < ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB86_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB86_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB86_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB86_15 ; RV64ZVE32F-NEXT: .LBB86_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB86_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB86_16 ; RV64ZVE32F-NEXT: .LBB86_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB86_9 @@ -10179,14 +10115,37 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, < ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB86_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB86_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: andi a2, a2, 255 +; RV64ZVE32F-NEXT: slli a2, a2, 2 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: flw fa5, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB86_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB86_16 -; RV64ZVE32F-NEXT: .LBB86_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB86_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: andi a1, a1, 255 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: flw fa5, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB86_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB86_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB86_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 @@ -10198,7 +10157,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, < ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB86_6 -; RV64ZVE32F-NEXT: .LBB86_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB86_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -10212,7 +10171,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, < ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB86_7 -; RV64ZVE32F-NEXT: .LBB86_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB86_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: andi a2, a2, 255 @@ -10226,32 +10185,6 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, < ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB86_8 ; RV64ZVE32F-NEXT: j .LBB86_9 -; RV64ZVE32F-NEXT: .LBB86_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: andi a2, a2, 255 -; RV64ZVE32F-NEXT: slli a2, a2, 2 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: flw fa5, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB86_11 -; RV64ZVE32F-NEXT: .LBB86_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: andi a1, a1, 255 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: flw fa5, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, ptr %base, <8 x i32> %eidxs %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) @@ -10312,13 +10245,13 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB87_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB87_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB87_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB87_15 ; RV64ZVE32F-NEXT: .LBB87_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB87_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB87_16 ; RV64ZVE32F-NEXT: .LBB87_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB87_9 @@ -10337,14 +10270,35 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB87_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB87_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 2 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: flw fa5, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB87_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB87_16 -; RV64ZVE32F-NEXT: .LBB87_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB87_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: flw fa5, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB87_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB87_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB87_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -10355,7 +10309,7 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB87_6 -; RV64ZVE32F-NEXT: .LBB87_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB87_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -10368,7 +10322,7 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB87_7 -; RV64ZVE32F-NEXT: .LBB87_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB87_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 @@ -10381,30 +10335,6 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB87_8 ; RV64ZVE32F-NEXT: j .LBB87_9 -; RV64ZVE32F-NEXT: .LBB87_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 2 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: flw fa5, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB87_11 -; RV64ZVE32F-NEXT: .LBB87_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: flw fa5, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret %ptrs = getelementptr inbounds float, ptr %base, <8 x i16> %idxs %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) ret <8 x float> %v @@ -10464,13 +10394,13 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB88_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB88_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB88_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB88_15 ; RV64ZVE32F-NEXT: .LBB88_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB88_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB88_16 ; RV64ZVE32F-NEXT: .LBB88_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB88_9 @@ -10489,14 +10419,35 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB88_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB88_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 2 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: flw fa5, 0(a2) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB88_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB88_16 -; RV64ZVE32F-NEXT: .LBB88_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB88_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: flw fa5, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB88_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB88_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB88_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -10507,7 +10458,7 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB88_6 -; RV64ZVE32F-NEXT: .LBB88_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB88_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -10520,7 +10471,7 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB88_7 -; RV64ZVE32F-NEXT: .LBB88_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB88_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 @@ -10533,30 +10484,6 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB88_8 ; RV64ZVE32F-NEXT: j .LBB88_9 -; RV64ZVE32F-NEXT: .LBB88_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 2 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: flw fa5, 0(a2) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB88_11 -; RV64ZVE32F-NEXT: .LBB88_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: flw fa5, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, ptr %base, <8 x i32> %eidxs %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) @@ -10620,13 +10547,13 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a3, .LBB89_12 +; RV64ZVE32F-NEXT: bnez a3, .LBB89_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a3, a2, 8 -; RV64ZVE32F-NEXT: bnez a3, .LBB89_13 +; RV64ZVE32F-NEXT: bnez a3, .LBB89_15 ; RV64ZVE32F-NEXT: .LBB89_6: # %else8 ; RV64ZVE32F-NEXT: andi a3, a2, 16 -; RV64ZVE32F-NEXT: bnez a3, .LBB89_14 +; RV64ZVE32F-NEXT: bnez a3, .LBB89_16 ; RV64ZVE32F-NEXT: .LBB89_7: # %else11 ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB89_9 @@ -10646,14 +10573,37 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2 -; RV64ZVE32F-NEXT: bnez a3, .LBB89_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a3, .LBB89_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a3, v8 +; RV64ZVE32F-NEXT: and a3, a3, a1 +; RV64ZVE32F-NEXT: slli a3, a3, 2 +; RV64ZVE32F-NEXT: add a3, a0, a3 +; RV64ZVE32F-NEXT: flw fa5, 0(a3) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB89_11: # %else17 ; RV64ZVE32F-NEXT: andi a2, a2, -128 -; RV64ZVE32F-NEXT: bnez a2, .LBB89_16 -; RV64ZVE32F-NEXT: .LBB89_11: # %else20 +; RV64ZVE32F-NEXT: beqz a2, .LBB89_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: and a1, a2, a1 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: flw fa5, 0(a0) +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB89_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB89_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB89_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 @@ -10665,7 +10615,7 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: beqz a3, .LBB89_6 -; RV64ZVE32F-NEXT: .LBB89_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB89_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 @@ -10679,7 +10629,7 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB89_7 -; RV64ZVE32F-NEXT: .LBB89_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB89_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: and a3, a3, a1 @@ -10693,32 +10643,6 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: bnez a3, .LBB89_8 ; RV64ZVE32F-NEXT: j .LBB89_9 -; RV64ZVE32F-NEXT: .LBB89_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a3, v8 -; RV64ZVE32F-NEXT: and a3, a3, a1 -; RV64ZVE32F-NEXT: slli a3, a3, 2 -; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: flw fa5, 0(a3) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a2, a2, -128 -; RV64ZVE32F-NEXT: beqz a2, .LBB89_11 -; RV64ZVE32F-NEXT: .LBB89_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: and a1, a2, a1 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: flw fa5, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, ptr %base, <8 x i32> %eidxs %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) @@ -10775,13 +10699,13 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> % ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB90_12 +; RV64ZVE32F-NEXT: bnez a2, .LBB90_14 ; RV64ZVE32F-NEXT: # %bb.5: # %else5 ; RV64ZVE32F-NEXT: andi a2, a1, 8 -; RV64ZVE32F-NEXT: bnez a2, .LBB90_13 +; RV64ZVE32F-NEXT: bnez a2, .LBB90_15 ; RV64ZVE32F-NEXT: .LBB90_6: # %else8 ; RV64ZVE32F-NEXT: andi a2, a1, 16 -; RV64ZVE32F-NEXT: bnez a2, .LBB90_14 +; RV64ZVE32F-NEXT: bnez a2, .LBB90_16 ; RV64ZVE32F-NEXT: .LBB90_7: # %else11 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB90_9 @@ -10799,14 +10723,33 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> % ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v12, 2 -; RV64ZVE32F-NEXT: bnez a2, .LBB90_15 -; RV64ZVE32F-NEXT: # %bb.10: # %else17 +; RV64ZVE32F-NEXT: beqz a2, .LBB90_11 +; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16 +; RV64ZVE32F-NEXT: vmv.x.s a2, v8 +; RV64ZVE32F-NEXT: slli a2, a2, 2 +; RV64ZVE32F-NEXT: add a2, a0, a2 +; RV64ZVE32F-NEXT: flw fa5, 0(a2) +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 +; RV64ZVE32F-NEXT: .LBB90_11: # %else17 ; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: bnez a1, .LBB90_16 -; RV64ZVE32F-NEXT: .LBB90_11: # %else20 +; RV64ZVE32F-NEXT: beqz a1, .LBB90_13 +; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 +; RV64ZVE32F-NEXT: vmv.x.s a1, v8 +; RV64ZVE32F-NEXT: slli a1, a1, 2 +; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: flw fa5, 0(a0) +; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 +; RV64ZVE32F-NEXT: .LBB90_13: # %else20 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret -; RV64ZVE32F-NEXT: .LBB90_12: # %cond.load4 +; RV64ZVE32F-NEXT: .LBB90_14: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -10816,7 +10759,7 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> % ; RV64ZVE32F-NEXT: vslideup.vi v10, v9, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB90_6 -; RV64ZVE32F-NEXT: .LBB90_13: # %cond.load7 +; RV64ZVE32F-NEXT: .LBB90_15: # %cond.load7 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -10827,7 +10770,7 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> % ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB90_7 -; RV64ZVE32F-NEXT: .LBB90_14: # %cond.load10 +; RV64ZVE32F-NEXT: .LBB90_16: # %cond.load10 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: slli a2, a2, 2 @@ -10838,28 +10781,6 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> % ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB90_8 ; RV64ZVE32F-NEXT: j .LBB90_9 -; RV64ZVE32F-NEXT: .LBB90_15: # %cond.load16 -; RV64ZVE32F-NEXT: vmv.x.s a2, v8 -; RV64ZVE32F-NEXT: slli a2, a2, 2 -; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: flw fa5, 0(a2) -; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 -; RV64ZVE32F-NEXT: andi a1, a1, -128 -; RV64ZVE32F-NEXT: beqz a1, .LBB90_11 -; RV64ZVE32F-NEXT: .LBB90_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 -; RV64ZVE32F-NEXT: vmv.x.s a1, v8 -; RV64ZVE32F-NEXT: slli a1, a1, 2 -; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: flw fa5, 0(a0) -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 -; RV64ZVE32F-NEXT: vmv2r.v v8, v10 -; RV64ZVE32F-NEXT: ret %ptrs = getelementptr inbounds float, ptr %base, <8 x i32> %idxs %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) ret <8 x float> %v @@ -11135,11 +11056,13 @@ define <4 x double> @mgather_truemask_v4f64(<4 x ptr> %ptrs, <4 x double> %passt define <4 x double> @mgather_falsemask_v4f64(<4 x ptr> %ptrs, <4 x double> %passthru) { ; RV32V-LABEL: mgather_falsemask_v4f64: ; RV32V: # %bb.0: +; RV32V-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32V-NEXT: vmv2r.v v8, v10 ; RV32V-NEXT: ret ; ; RV64V-LABEL: mgather_falsemask_v4f64: ; RV64V: # %bb.0: +; RV64V-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64V-NEXT: vmv2r.v v8, v10 ; RV64V-NEXT: ret ; @@ -13700,6 +13623,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m ; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 15 ; RV64ZVE32F-NEXT: .LBB107_24: # %else44 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB107_25: # %cond.load4 @@ -14086,6 +14010,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m ; RV64ZVE32F-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 31 ; RV64ZVE32F-NEXT: .LBB108_48: # %else92 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB108_49: # %cond.load4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll index e0cf39c75da24..69903d77084bf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll @@ -318,6 +318,7 @@ define <128 x i16> @masked_load_v128i16(ptr %a, <128 x i1> %mask) { define <256 x i8> @masked_load_v256i8(ptr %a, <256 x i1> %mask) { ; CHECK-LABEL: masked_load_v256i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll index 46c2033d28b38..80a9143d1ad8b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll @@ -135,10 +135,10 @@ declare <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half>, <16 x i1>, i32) define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI6_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI6_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -263,8 +263,8 @@ declare <8 x float> @llvm.vp.nearbyint.v8f32(<8 x float>, <8 x i1>, i32) define <8 x float> @vp_nearbyint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -307,8 +307,8 @@ declare <16 x float> @llvm.vp.nearbyint.v16f32(<16 x float>, <16 x i1>, i32) define <16 x float> @vp_nearbyint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -393,10 +393,10 @@ declare <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI18_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -437,10 +437,10 @@ declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI20_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -481,10 +481,10 @@ declare <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI22_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -525,10 +525,10 @@ declare <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI24_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -569,9 +569,9 @@ declare <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double>, <32 x i1>, i32) define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v32f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v7, v0, 2 ; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll index ad358d7320240..276f6b077931b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll @@ -23,9 +23,9 @@ declare i1 @llvm.vp.reduce.or.v1i1(i1, <1 x i1>, <1 x i1>, i32) define zeroext i1 @vpreduce_or_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v1i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -39,9 +39,9 @@ declare i1 @llvm.vp.reduce.xor.v1i1(i1, <1 x i1>, <1 x i1>, i32) define zeroext i1 @vpreduce_xor_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v1i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -71,9 +71,9 @@ declare i1 @llvm.vp.reduce.or.v2i1(i1, <2 x i1>, <2 x i1>, i32) define zeroext i1 @vpreduce_or_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -87,9 +87,9 @@ declare i1 @llvm.vp.reduce.xor.v2i1(i1, <2 x i1>, <2 x i1>, i32) define zeroext i1 @vpreduce_xor_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -119,9 +119,9 @@ declare i1 @llvm.vp.reduce.or.v4i1(i1, <4 x i1>, <4 x i1>, i32) define zeroext i1 @vpreduce_or_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -135,9 +135,9 @@ declare i1 @llvm.vp.reduce.xor.v4i1(i1, <4 x i1>, <4 x i1>, i32) define zeroext i1 @vpreduce_xor_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -167,9 +167,9 @@ declare i1 @llvm.vp.reduce.or.v8i1(i1, <8 x i1>, <8 x i1>, i32) define zeroext i1 @vpreduce_or_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v8i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -183,9 +183,9 @@ declare i1 @llvm.vp.reduce.xor.v8i1(i1, <8 x i1>, <8 x i1>, i32) define zeroext i1 @vpreduce_xor_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v8i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -231,6 +231,7 @@ declare i1 @llvm.vp.reduce.and.v256i1(i1, <256 x i1>, <256 x i1>, i32) define zeroext i1 @vpreduce_and_v256i1(i1 zeroext %s, <256 x i1> %v, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v256i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v11, v9 ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: li a3, 128 @@ -265,9 +266,9 @@ declare i1 @llvm.vp.reduce.or.v16i1(i1, <16 x i1>, <16 x i1>, i32) define zeroext i1 @vpreduce_or_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -281,9 +282,9 @@ declare i1 @llvm.vp.reduce.xor.v16i1(i1, <16 x i1>, <16 x i1>, i32) define zeroext i1 @vpreduce_xor_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -297,9 +298,9 @@ declare i1 @llvm.vp.reduce.add.v1i1(i1, <1 x i1>, <1 x i1>, i32) define zeroext i1 @vpreduce_add_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v1i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -313,9 +314,9 @@ declare i1 @llvm.vp.reduce.add.v2i1(i1, <2 x i1>, <2 x i1>, i32) define zeroext i1 @vpreduce_add_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -329,9 +330,9 @@ declare i1 @llvm.vp.reduce.add.v4i1(i1, <4 x i1>, <4 x i1>, i32) define zeroext i1 @vpreduce_add_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -345,9 +346,9 @@ declare i1 @llvm.vp.reduce.add.v8i1(i1, <8 x i1>, <8 x i1>, i32) define zeroext i1 @vpreduce_add_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v8i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -361,9 +362,9 @@ declare i1 @llvm.vp.reduce.add.v16i1(i1, <16 x i1>, <16 x i1>, i32) define zeroext i1 @vpreduce_add_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -489,9 +490,9 @@ declare i1 @llvm.vp.reduce.smin.v1i1(i1, <1 x i1>, <1 x i1>, i32) define zeroext i1 @vpreduce_smin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v1i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -505,9 +506,9 @@ declare i1 @llvm.vp.reduce.smin.v2i1(i1, <2 x i1>, <2 x i1>, i32) define zeroext i1 @vpreduce_smin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -521,9 +522,9 @@ declare i1 @llvm.vp.reduce.smin.v4i1(i1, <4 x i1>, <4 x i1>, i32) define zeroext i1 @vpreduce_smin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -537,9 +538,9 @@ declare i1 @llvm.vp.reduce.smin.v8i1(i1, <8 x i1>, <8 x i1>, i32) define zeroext i1 @vpreduce_smin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v8i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -553,9 +554,9 @@ declare i1 @llvm.vp.reduce.smin.v16i1(i1, <16 x i1>, <16 x i1>, i32) define zeroext i1 @vpreduce_smin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -569,9 +570,9 @@ declare i1 @llvm.vp.reduce.smin.v32i1(i1, <32 x i1>, <32 x i1>, i32) define zeroext i1 @vpreduce_smin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v32i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -585,9 +586,9 @@ declare i1 @llvm.vp.reduce.smin.v64i1(i1, <64 x i1>, <64 x i1>, i32) define zeroext i1 @vpreduce_smin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v64i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -601,9 +602,9 @@ declare i1 @llvm.vp.reduce.umax.v1i1(i1, <1 x i1>, <1 x i1>, i32) define zeroext i1 @vpreduce_umax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v1i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -617,9 +618,9 @@ declare i1 @llvm.vp.reduce.umax.v2i1(i1, <2 x i1>, <2 x i1>, i32) define zeroext i1 @vpreduce_umax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v2i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -633,9 +634,9 @@ declare i1 @llvm.vp.reduce.umax.v4i1(i1, <4 x i1>, <4 x i1>, i32) define zeroext i1 @vpreduce_umax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v4i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -649,9 +650,9 @@ declare i1 @llvm.vp.reduce.umax.v8i1(i1, <8 x i1>, <8 x i1>, i32) define zeroext i1 @vpreduce_umax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v8i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -665,9 +666,9 @@ declare i1 @llvm.vp.reduce.umax.v16i1(i1, <16 x i1>, <16 x i1>, i32) define zeroext i1 @vpreduce_umax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -681,9 +682,9 @@ declare i1 @llvm.vp.reduce.umax.v32i1(i1, <32 x i1>, <32 x i1>, i32) define zeroext i1 @vpreduce_umax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v32i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -697,9 +698,9 @@ declare i1 @llvm.vp.reduce.umax.v64i1(i1, <64 x i1>, <64 x i1>, i32) define zeroext i1 @vpreduce_umax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v64i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll index b8617fda3aa7e..266772d36ee9c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll @@ -123,10 +123,10 @@ declare <16 x half> @llvm.vp.rint.v16f16(<16 x half>, <16 x i1>, i32) define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI6_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI6_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -239,8 +239,8 @@ declare <8 x float> @llvm.vp.rint.v8f32(<8 x float>, <8 x i1>, i32) define <8 x float> @vp_rint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -279,8 +279,8 @@ declare <16 x float> @llvm.vp.rint.v16f32(<16 x float>, <16 x i1>, i32) define <16 x float> @vp_rint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -357,10 +357,10 @@ declare <4 x double> @llvm.vp.rint.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI18_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -397,10 +397,10 @@ declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI20_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -437,10 +437,10 @@ declare <15 x double> @llvm.vp.rint.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI22_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -477,10 +477,10 @@ declare <16 x double> @llvm.vp.rint.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI24_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -517,9 +517,9 @@ declare <32 x double> @llvm.vp.rint.v32f64(<32 x double>, <32 x i1>, i32) define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v32f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v7, v0, 2 ; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll index 820a05e3d6042..232a8a4827cb1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll @@ -194,8 +194,8 @@ define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) ; ; ZVFHMIN-LABEL: vp_round_v8f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v9, v0 ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v9, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 ; ZVFHMIN-NEXT: lui a1, 307200 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -261,10 +261,10 @@ declare <16 x half> @llvm.vp.round.v16f16(<16 x half>, <16 x i1>, i32) define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -280,8 +280,8 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext % ; ; ZVFHMIN-LABEL: vp_round_v16f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 ; ZVFHMIN-NEXT: lui a1, 307200 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -431,8 +431,8 @@ declare <8 x float> @llvm.vp.round.v8f32(<8 x float>, <8 x i1>, i32) define <8 x float> @vp_round_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -475,8 +475,8 @@ declare <16 x float> @llvm.vp.round.v16f32(<16 x float>, <16 x i1>, i32) define <16 x float> @vp_round_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -561,10 +561,10 @@ declare <4 x double> @llvm.vp.round.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI18_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -605,10 +605,10 @@ declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI20_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -649,10 +649,10 @@ declare <15 x double> @llvm.vp.round.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI22_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -693,10 +693,10 @@ declare <16 x double> @llvm.vp.round.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI24_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -743,6 +743,7 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -750,7 +751,6 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v0, 2 ; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll index 8391c7939180a..7c80c037403c2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll @@ -194,8 +194,8 @@ define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext % ; ; ZVFHMIN-LABEL: vp_roundeven_v8f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v9, v0 ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v9, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 ; ZVFHMIN-NEXT: lui a1, 307200 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -261,10 +261,10 @@ declare <16 x half> @llvm.vp.roundeven.v16f16(<16 x half>, <16 x i1>, i32) define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -280,8 +280,8 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe ; ; ZVFHMIN-LABEL: vp_roundeven_v16f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 ; ZVFHMIN-NEXT: lui a1, 307200 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -431,8 +431,8 @@ declare <8 x float> @llvm.vp.roundeven.v8f32(<8 x float>, <8 x i1>, i32) define <8 x float> @vp_roundeven_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -475,8 +475,8 @@ declare <16 x float> @llvm.vp.roundeven.v16f32(<16 x float>, <16 x i1>, i32) define <16 x float> @vp_roundeven_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -561,10 +561,10 @@ declare <4 x double> @llvm.vp.roundeven.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI18_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -605,10 +605,10 @@ declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI20_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -649,10 +649,10 @@ declare <15 x double> @llvm.vp.roundeven.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI22_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -693,10 +693,10 @@ declare <16 x double> @llvm.vp.roundeven.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI24_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -743,6 +743,7 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -750,7 +751,6 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v0, 2 ; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll index 8c38d24460265..65a4725267cd3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll @@ -194,8 +194,8 @@ define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext ; ; ZVFHMIN-LABEL: vp_roundtozero_v8f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v9, v0 ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v9, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 ; ZVFHMIN-NEXT: lui a1, 307200 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -261,10 +261,10 @@ declare <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half>, <16 x i1>, i32) define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -280,8 +280,8 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer ; ; ZVFHMIN-LABEL: vp_roundtozero_v16f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v10, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 ; ZVFHMIN-NEXT: lui a1, 307200 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -431,8 +431,8 @@ declare <8 x float> @llvm.vp.roundtozero.v8f32(<8 x float>, <8 x i1>, i32) define <8 x float> @vp_roundtozero_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -475,8 +475,8 @@ declare <16 x float> @llvm.vp.roundtozero.v16f32(<16 x float>, <16 x i1>, i32) define <16 x float> @vp_roundtozero_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -561,10 +561,10 @@ declare <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI18_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -605,10 +605,10 @@ declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI20_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -649,10 +649,10 @@ declare <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI22_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -693,10 +693,10 @@ declare <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI24_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -743,6 +743,7 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32 ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -750,7 +751,6 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v0, 2 ; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll index d52c42891fcc3..69d6ffa9f300c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll @@ -598,6 +598,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -648,6 +649,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> define <256 x i1> @icmp_eq_vx_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vx_v256i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma @@ -677,6 +679,7 @@ define <256 x i1> @icmp_eq_vx_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 z define <256 x i1> @icmp_eq_vx_swap_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vx_swap_v256i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll index 38026bb591f79..f2353e7d028bd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll @@ -8,8 +8,8 @@ define <8 x i32> @concat_2xv4i32(<4 x i32> %a, <4 x i32> %b) { ; VLA-LABEL: concat_2xv4i32: ; VLA: # %bb.0: -; VLA-NEXT: vmv1r.v v10, v9 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; VLA-NEXT: vmv1r.v v10, v9 ; VLA-NEXT: vslideup.vi v8, v10, 4 ; VLA-NEXT: ret ; @@ -32,9 +32,9 @@ define <8 x i32> @concat_4xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x ; ; VLS-LABEL: concat_4xv2i32: ; VLS: # %bb.0: +; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; VLS-NEXT: vmv1r.v v13, v10 ; VLS-NEXT: vmv1r.v v12, v8 -; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; VLS-NEXT: vslideup.vi v13, v11, 2 ; VLS-NEXT: vslideup.vi v12, v9, 2 ; VLS-NEXT: vmv2r.v v8, v12 @@ -62,9 +62,9 @@ define <8 x i32> @concat_8xv1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %c, <1 x ; ; VLS-LABEL: concat_8xv1i32: ; VLS: # %bb.0: +; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; VLS-NEXT: vmv1r.v v17, v12 ; VLS-NEXT: vmv1r.v v16, v8 -; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; VLS-NEXT: vslideup.vi v14, v15, 1 ; VLS-NEXT: vslideup.vi v17, v13, 1 ; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma @@ -89,8 +89,8 @@ define <8 x i32> @concat_8xv1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %c, <1 x define <16 x i32> @concat_2xv8i32(<8 x i32> %a, <8 x i32> %b) { ; VLA-LABEL: concat_2xv8i32: ; VLA: # %bb.0: -; VLA-NEXT: vmv2r.v v12, v10 ; VLA-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; VLA-NEXT: vmv2r.v v12, v10 ; VLA-NEXT: vslideup.vi v8, v12, 8 ; VLA-NEXT: ret ; @@ -104,10 +104,10 @@ define <16 x i32> @concat_2xv8i32(<8 x i32> %a, <8 x i32> %b) { define <16 x i32> @concat_4xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) { ; VLA-LABEL: concat_4xv4i32: ; VLA: # %bb.0: +; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; VLA-NEXT: vmv1r.v v14, v11 ; VLA-NEXT: vmv1r.v v12, v10 ; VLA-NEXT: vmv1r.v v10, v9 -; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; VLA-NEXT: vslideup.vi v12, v14, 4 ; VLA-NEXT: vslideup.vi v8, v10, 4 ; VLA-NEXT: vsetivli zero, 16, e32, m4, ta, ma @@ -140,11 +140,11 @@ define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x ; ; VLS-LABEL: concat_8xv2i32: ; VLS: # %bb.0: +; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; VLS-NEXT: vmv1r.v v19, v14 ; VLS-NEXT: vmv1r.v v18, v12 ; VLS-NEXT: vmv1r.v v17, v10 ; VLS-NEXT: vmv1r.v v16, v8 -; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; VLS-NEXT: vslideup.vi v19, v15, 2 ; VLS-NEXT: vslideup.vi v18, v13, 2 ; VLS-NEXT: vslideup.vi v17, v11, 2 @@ -164,6 +164,7 @@ define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x define <32 x i32> @concat_2xv16i32(<16 x i32> %a, <16 x i32> %b) { ; VLA-LABEL: concat_2xv16i32: ; VLA: # %bb.0: +; VLA-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; VLA-NEXT: vmv4r.v v16, v12 ; VLA-NEXT: li a0, 32 ; VLA-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -180,11 +181,11 @@ define <32 x i32> @concat_2xv16i32(<16 x i32> %a, <16 x i32> %b) { define <32 x i32> @concat_4xv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) { ; VLA-LABEL: concat_4xv8i32: ; VLA: # %bb.0: +; VLA-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; VLA-NEXT: vmv2r.v v20, v14 ; VLA-NEXT: vmv2r.v v16, v12 ; VLA-NEXT: vmv2r.v v12, v10 ; VLA-NEXT: li a0, 32 -; VLA-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; VLA-NEXT: vslideup.vi v16, v20, 8 ; VLA-NEXT: vslideup.vi v8, v12, 8 ; VLA-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -203,6 +204,7 @@ define <32 x i32> @concat_4xv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x define <32 x i32> @concat_8xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %e, <4 x i32> %f, <4 x i32> %g, <4 x i32> %h) { ; VLA-LABEL: concat_8xv4i32: ; VLA: # %bb.0: +; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; VLA-NEXT: vmv1r.v v18, v15 ; VLA-NEXT: vmv1r.v v20, v14 ; VLA-NEXT: vmv1r.v v14, v13 @@ -211,7 +213,6 @@ define <32 x i32> @concat_8xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x ; VLA-NEXT: vmv1r.v v12, v10 ; VLA-NEXT: vmv1r.v v10, v9 ; VLA-NEXT: li a0, 32 -; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; VLA-NEXT: vslideup.vi v20, v18, 4 ; VLA-NEXT: vslideup.vi v16, v14, 4 ; VLA-NEXT: vslideup.vi v12, v22, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll index d461fa8378cff..cadee8acf27d6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll @@ -108,6 +108,7 @@ define <4 x i64> @m2_splat_into_identity(<4 x i64> %v1) vscale_range(2,2) { define <4 x i64> @m2_broadcast_i128(<4 x i64> %v1) vscale_range(2,2) { ; CHECK-LABEL: m2_broadcast_i128: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: ret %res = shufflevector <4 x i64> %v1, <4 x i64> poison, <4 x i32> @@ -117,6 +118,7 @@ define <4 x i64> @m2_broadcast_i128(<4 x i64> %v1) vscale_range(2,2) { define <8 x i64> @m4_broadcast_i128(<8 x i64> %v1) vscale_range(2,2) { ; CHECK-LABEL: m4_broadcast_i128: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll index 407535831aeda..f7647ff38c8a0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll @@ -966,9 +966,9 @@ define <16 x i8> @reverse_v16i8_2(<8 x i8> %a, <8 x i8> %b) { define <32 x i8> @reverse_v32i8_2(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: reverse_v32i8_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: addi a1, a0, -1 ; CHECK-NEXT: vrsub.vx v12, v12, a1 @@ -1035,9 +1035,9 @@ define <8 x i16> @reverse_v8i16_2(<4 x i16> %a, <4 x i16> %b) { define <16 x i16> @reverse_v16i16_2(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: reverse_v16i16_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: srli a1, a0, 1 ; CHECK-NEXT: addi a1, a1, -1 @@ -1060,9 +1060,9 @@ define <16 x i16> @reverse_v16i16_2(<8 x i16> %a, <8 x i16> %b) { define <32 x i16> @reverse_v32i16_2(<16 x i16> %a, <16 x i16> %b) { ; CHECK-LABEL: reverse_v32i16_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: lui a1, 16 ; CHECK-NEXT: addi a1, a1, -1 @@ -1116,9 +1116,9 @@ define <4 x i32> @reverse_v4i32_2(<2 x i32> %a, < 2 x i32> %b) { define <8 x i32> @reverse_v8i32_2(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: reverse_v8i32_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: srli a1, a0, 2 ; CHECK-NEXT: addi a1, a1, -1 @@ -1142,9 +1142,9 @@ define <8 x i32> @reverse_v8i32_2(<4 x i32> %a, <4 x i32> %b) { define <16 x i32> @reverse_v16i32_2(<8 x i32> %a, <8 x i32> %b) { ; CHECK-LABEL: reverse_v16i32_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: srli a1, a0, 2 ; CHECK-NEXT: addi a1, a1, -1 @@ -1170,9 +1170,9 @@ define <16 x i32> @reverse_v16i32_2(<8 x i32> %a, <8 x i32> %b) { define <32 x i32> @reverse_v32i32_2(<16 x i32> %a, <16 x i32> %b) { ; CHECK-LABEL: reverse_v32i32_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: srli a1, a0, 2 ; CHECK-NEXT: addi a1, a1, -1 @@ -1219,9 +1219,9 @@ define <4 x i64> @reverse_v4i64_2(<2 x i64> %a, < 2 x i64> %b) { define <8 x i64> @reverse_v8i64_2(<4 x i64> %a, <4 x i64> %b) { ; CHECK-LABEL: reverse_v8i64_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: addi a1, a1, -1 @@ -1289,9 +1289,9 @@ define <8 x half> @reverse_v8f16_2(<4 x half> %a, <4 x half> %b) { define <16 x half> @reverse_v16f16_2(<8 x half> %a, <8 x half> %b) { ; CHECK-LABEL: reverse_v16f16_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: srli a1, a0, 1 ; CHECK-NEXT: addi a1, a1, -1 @@ -1361,9 +1361,9 @@ define <4 x float> @reverse_v4f32_2(<2 x float> %a, <2 x float> %b) { define <8 x float> @reverse_v8f32_2(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: reverse_v8f32_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: srli a1, a0, 2 ; CHECK-NEXT: addi a1, a1, -1 @@ -1387,9 +1387,9 @@ define <8 x float> @reverse_v8f32_2(<4 x float> %a, <4 x float> %b) { define <16 x float> @reverse_v16f32_2(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: reverse_v16f32_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: srli a1, a0, 2 ; CHECK-NEXT: addi a1, a1, -1 @@ -1430,9 +1430,9 @@ define <4 x double> @reverse_v4f64_2(<2 x double> %a, < 2 x double> %b) { define <8 x double> @reverse_v8f64_2(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: reverse_v8f64_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: addi a1, a1, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll index c37c3a9ee0ea0..49f6acf9ba8c9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll @@ -415,8 +415,8 @@ define <4 x i8> @vslide1up_4xi8_neg_incorrect_insert3(<4 x i8> %v, i8 %b) { define <2 x i8> @vslide1up_4xi8_neg_length_changing(<4 x i8> %v, i8 %b) { ; CHECK-LABEL: vslide1up_4xi8_neg_length_changing: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, tu, ma ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll index 1a08c613ca36a..29d9a8a9b060c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll @@ -62,8 +62,8 @@ define void @gather_masked(ptr noalias nocapture %A, ptr noalias nocapture reado ; CHECK-NEXT: li a4, 5 ; CHECK-NEXT: .LBB1_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vlse8.v v9, (a1), a4, v0.t ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vadd.vv v9, v10, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll index 1c2c90478a1f7..4b7f82f94f5e4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll @@ -542,6 +542,7 @@ declare <3 x double> @llvm.experimental.vp.strided.load.v3f64.p0.i32(ptr, i32, < define <32 x double> @strided_vpload_v32f64(ptr %ptr, i32 signext %stride, <32 x i1> %m, i32 zeroext %evl) nounwind { ; CHECK-LABEL: strided_vpload_v32f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: mv a3, a2 @@ -598,6 +599,7 @@ declare <32 x double> @llvm.experimental.vp.strided.load.v32f64.p0.i32(ptr, i32, define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_load_v33f64: ; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv1r.v v8, v0 ; CHECK-RV32-NEXT: li a5, 32 ; CHECK-RV32-NEXT: mv a3, a4 @@ -648,6 +650,7 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ; ; CHECK-RV64-LABEL: strided_load_v33f64: ; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV64-NEXT: vmv1r.v v8, v0 ; CHECK-RV64-NEXT: li a5, 32 ; CHECK-RV64-NEXT: mv a4, a3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll index 12893ec55cda7..a91dee1cb245f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll @@ -53,9 +53,9 @@ declare <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16>, <128 x i1>, i32) define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v128i7_v128i16: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v12, v0, 8 ; CHECK-NEXT: mv a2, a0 ; CHECK-NEXT: bltu a0, a1, .LBB4_2 @@ -231,6 +231,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze ; CHECK-NEXT: mul a2, a2, a3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 72 * vlenb +; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: li a3, 24 @@ -243,7 +244,6 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze ; CHECK-NEXT: add a2, sp, a2 ; CHECK-NEXT: addi a2, a2, 16 ; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill -; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v6, v0, 8 ; CHECK-NEXT: addi a2, a1, 512 ; CHECK-NEXT: addi a3, a1, 640 @@ -541,9 +541,9 @@ declare <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64>, <32 x i1>, i32) define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v32i32_v32i64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v12, v0, 2 ; CHECK-NEXT: mv a1, a0 ; CHECK-NEXT: bltu a0, a2, .LBB17_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll index db03dc3d5ab1e..6d9f69f436fc4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -80,14 +80,8 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> % ; RV32-SLOW-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-SLOW-NEXT: vmv.x.s a0, v0 ; RV32-SLOW-NEXT: andi a1, a0, 1 -; RV32-SLOW-NEXT: bnez a1, .LBB4_3 -; RV32-SLOW-NEXT: # %bb.1: # %else -; RV32-SLOW-NEXT: andi a0, a0, 2 -; RV32-SLOW-NEXT: bnez a0, .LBB4_4 -; RV32-SLOW-NEXT: .LBB4_2: # %else2 -; RV32-SLOW-NEXT: vmv1r.v v8, v9 -; RV32-SLOW-NEXT: ret -; RV32-SLOW-NEXT: .LBB4_3: # %cond.load +; RV32-SLOW-NEXT: beqz a1, .LBB4_2 +; RV32-SLOW-NEXT: # %bb.1: # %cond.load ; RV32-SLOW-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV32-SLOW-NEXT: vmv.x.s a1, v8 ; RV32-SLOW-NEXT: lbu a2, 1(a1) @@ -96,9 +90,10 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> % ; RV32-SLOW-NEXT: or a1, a2, a1 ; RV32-SLOW-NEXT: vsetvli zero, zero, e16, m2, tu, ma ; RV32-SLOW-NEXT: vmv.s.x v9, a1 +; RV32-SLOW-NEXT: .LBB4_2: # %else ; RV32-SLOW-NEXT: andi a0, a0, 2 -; RV32-SLOW-NEXT: beqz a0, .LBB4_2 -; RV32-SLOW-NEXT: .LBB4_4: # %cond.load1 +; RV32-SLOW-NEXT: beqz a0, .LBB4_4 +; RV32-SLOW-NEXT: # %bb.3: # %cond.load1 ; RV32-SLOW-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-SLOW-NEXT: vslidedown.vi v8, v8, 1 ; RV32-SLOW-NEXT: vmv.x.s a0, v8 @@ -109,6 +104,8 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> % ; RV32-SLOW-NEXT: vmv.s.x v8, a0 ; RV32-SLOW-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32-SLOW-NEXT: vslideup.vi v9, v8, 1 +; RV32-SLOW-NEXT: .LBB4_4: # %else2 +; RV32-SLOW-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-SLOW-NEXT: vmv1r.v v8, v9 ; RV32-SLOW-NEXT: ret ; @@ -117,14 +114,8 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> % ; RV64-SLOW-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-SLOW-NEXT: vmv.x.s a0, v0 ; RV64-SLOW-NEXT: andi a1, a0, 1 -; RV64-SLOW-NEXT: bnez a1, .LBB4_3 -; RV64-SLOW-NEXT: # %bb.1: # %else -; RV64-SLOW-NEXT: andi a0, a0, 2 -; RV64-SLOW-NEXT: bnez a0, .LBB4_4 -; RV64-SLOW-NEXT: .LBB4_2: # %else2 -; RV64-SLOW-NEXT: vmv1r.v v8, v9 -; RV64-SLOW-NEXT: ret -; RV64-SLOW-NEXT: .LBB4_3: # %cond.load +; RV64-SLOW-NEXT: beqz a1, .LBB4_2 +; RV64-SLOW-NEXT: # %bb.1: # %cond.load ; RV64-SLOW-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV64-SLOW-NEXT: vmv.x.s a1, v8 ; RV64-SLOW-NEXT: lbu a2, 1(a1) @@ -133,9 +124,10 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> % ; RV64-SLOW-NEXT: or a1, a2, a1 ; RV64-SLOW-NEXT: vsetvli zero, zero, e16, m2, tu, ma ; RV64-SLOW-NEXT: vmv.s.x v9, a1 +; RV64-SLOW-NEXT: .LBB4_2: # %else ; RV64-SLOW-NEXT: andi a0, a0, 2 -; RV64-SLOW-NEXT: beqz a0, .LBB4_2 -; RV64-SLOW-NEXT: .LBB4_4: # %cond.load1 +; RV64-SLOW-NEXT: beqz a0, .LBB4_4 +; RV64-SLOW-NEXT: # %bb.3: # %cond.load1 ; RV64-SLOW-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-SLOW-NEXT: vslidedown.vi v8, v8, 1 ; RV64-SLOW-NEXT: vmv.x.s a0, v8 @@ -146,6 +138,8 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> % ; RV64-SLOW-NEXT: vmv.s.x v8, a0 ; RV64-SLOW-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-SLOW-NEXT: vslideup.vi v9, v8, 1 +; RV64-SLOW-NEXT: .LBB4_4: # %else2 +; RV64-SLOW-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-SLOW-NEXT: vmv1r.v v8, v9 ; RV64-SLOW-NEXT: ret ; @@ -174,23 +168,18 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> % ; RV32-SLOW-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-SLOW-NEXT: vmv.x.s a0, v0 ; RV32-SLOW-NEXT: andi a1, a0, 1 -; RV32-SLOW-NEXT: bnez a1, .LBB5_3 -; RV32-SLOW-NEXT: # %bb.1: # %else -; RV32-SLOW-NEXT: andi a0, a0, 2 -; RV32-SLOW-NEXT: bnez a0, .LBB5_4 -; RV32-SLOW-NEXT: .LBB5_2: # %else2 -; RV32-SLOW-NEXT: vmv1r.v v8, v9 -; RV32-SLOW-NEXT: ret -; RV32-SLOW-NEXT: .LBB5_3: # %cond.load +; RV32-SLOW-NEXT: beqz a1, .LBB5_2 +; RV32-SLOW-NEXT: # %bb.1: # %cond.load ; RV32-SLOW-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV32-SLOW-NEXT: vmv.x.s a1, v8 ; RV32-SLOW-NEXT: lw a2, 0(a1) ; RV32-SLOW-NEXT: lw a1, 4(a1) ; RV32-SLOW-NEXT: vslide1down.vx v9, v9, a2 ; RV32-SLOW-NEXT: vslide1down.vx v9, v9, a1 +; RV32-SLOW-NEXT: .LBB5_2: # %else ; RV32-SLOW-NEXT: andi a0, a0, 2 -; RV32-SLOW-NEXT: beqz a0, .LBB5_2 -; RV32-SLOW-NEXT: .LBB5_4: # %cond.load1 +; RV32-SLOW-NEXT: beqz a0, .LBB5_4 +; RV32-SLOW-NEXT: # %bb.3: # %cond.load1 ; RV32-SLOW-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-SLOW-NEXT: vslidedown.vi v8, v8, 1 ; RV32-SLOW-NEXT: vmv.x.s a0, v8 @@ -201,6 +190,8 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> % ; RV32-SLOW-NEXT: vslide1down.vx v8, v8, a0 ; RV32-SLOW-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-SLOW-NEXT: vslideup.vi v9, v8, 1 +; RV32-SLOW-NEXT: .LBB5_4: # %else2 +; RV32-SLOW-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-SLOW-NEXT: vmv1r.v v8, v9 ; RV32-SLOW-NEXT: ret ; @@ -209,14 +200,8 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> % ; RV64-SLOW-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-SLOW-NEXT: vmv.x.s a0, v0 ; RV64-SLOW-NEXT: andi a1, a0, 1 -; RV64-SLOW-NEXT: bnez a1, .LBB5_3 -; RV64-SLOW-NEXT: # %bb.1: # %else -; RV64-SLOW-NEXT: andi a0, a0, 2 -; RV64-SLOW-NEXT: bnez a0, .LBB5_4 -; RV64-SLOW-NEXT: .LBB5_2: # %else2 -; RV64-SLOW-NEXT: vmv1r.v v8, v9 -; RV64-SLOW-NEXT: ret -; RV64-SLOW-NEXT: .LBB5_3: # %cond.load +; RV64-SLOW-NEXT: beqz a1, .LBB5_2 +; RV64-SLOW-NEXT: # %bb.1: # %cond.load ; RV64-SLOW-NEXT: vsetvli zero, zero, e64, m8, tu, ma ; RV64-SLOW-NEXT: vmv.x.s a1, v8 ; RV64-SLOW-NEXT: lwu a2, 4(a1) @@ -224,9 +209,10 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> % ; RV64-SLOW-NEXT: slli a2, a2, 32 ; RV64-SLOW-NEXT: or a1, a2, a1 ; RV64-SLOW-NEXT: vmv.s.x v9, a1 +; RV64-SLOW-NEXT: .LBB5_2: # %else ; RV64-SLOW-NEXT: andi a0, a0, 2 -; RV64-SLOW-NEXT: beqz a0, .LBB5_2 -; RV64-SLOW-NEXT: .LBB5_4: # %cond.load1 +; RV64-SLOW-NEXT: beqz a0, .LBB5_4 +; RV64-SLOW-NEXT: # %bb.3: # %cond.load1 ; RV64-SLOW-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-SLOW-NEXT: vslidedown.vi v8, v8, 1 ; RV64-SLOW-NEXT: vmv.x.s a0, v8 @@ -236,6 +222,8 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> % ; RV64-SLOW-NEXT: or a0, a1, a0 ; RV64-SLOW-NEXT: vmv.s.x v8, a0 ; RV64-SLOW-NEXT: vslideup.vi v9, v8, 1 +; RV64-SLOW-NEXT: .LBB5_4: # %else2 +; RV64-SLOW-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-SLOW-NEXT: vmv1r.v v8, v9 ; RV64-SLOW-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll index 5be1a771eb279..7ee8179acfdb9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll @@ -363,6 +363,7 @@ declare <256 x i8> @llvm.vp.add.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a2, 128 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll index ac48542ca9ebb..fec54b36042fa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll @@ -267,6 +267,7 @@ declare <256 x i8> @llvm.vp.smax.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) define <256 x i8> @vmax_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll index 794eef6ed40b2..7ca0dbd9adffc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll @@ -266,6 +266,7 @@ declare <256 x i8> @llvm.vp.umax.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) define <256 x i8> @vmaxu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll index 34011f6bd8acd..ea75742ca6e43 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll @@ -267,6 +267,7 @@ declare <256 x i8> @llvm.vp.smin.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) define <256 x i8> @vmin_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll index 79e72b7d9cac9..f4f54db64018d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll @@ -266,6 +266,7 @@ declare <256 x i8> @llvm.vp.umin.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) define <256 x i8> @vminu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll index 24e75cde2ce91..df9ff0fc39a7e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -2617,8 +2617,8 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32> define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v32f64: ; RV32: # %bb.0: -; RV32-NEXT: vmv1r.v v7, v0 ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv1r.v v7, v0 ; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: li a2, 32 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll index 71f497e4c7be4..6c9989775f790 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll @@ -394,6 +394,7 @@ declare <33 x double> @llvm.vp.load.v33f64.p0(ptr, <33 x i1>, i32) define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v33f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v0 ; CHECK-NEXT: li a4, 32 ; CHECK-NEXT: mv a3, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll index a11c2b6bca12e..a53d33e6120d5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll @@ -1181,9 +1181,9 @@ define <32 x double> @vpmerge_vv_v32f64(<32 x double> %va, <32 x double> %vb, <3 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: li a1, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll index 888fc79f0122d..7afd31fdd663c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll @@ -372,6 +372,7 @@ declare <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, define <256 x i8> @vsadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vi_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a2, 128 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll index e1d57ae1e6741..f61b112fd8024 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll @@ -368,6 +368,7 @@ declare <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, define <256 x i8> @vsaddu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vi_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a2, 128 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll index 1d8af4c46cc07..5d407caf71514 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll @@ -163,6 +163,7 @@ define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i3 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v6, v8 ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: li a2, 128 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll index 8fad3db55f9bc..6ddf2e464750e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll @@ -384,6 +384,7 @@ declare <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, define <256 x i8> @vssub_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vi_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a2, 128 ; CHECK-NEXT: addi a3, a1, -128 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll index ca35aa6c4a94c..c403593894794 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll @@ -379,6 +379,7 @@ declare <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, define <256 x i8> @vssubu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vi_v258i8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a2, 128 ; CHECK-NEXT: addi a3, a1, -128 diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll index e6dfe5e78cdb4..74a00c655d526 100644 --- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll @@ -117,8 +117,8 @@ declare @llvm.vp.floor.nxv4bf16(, @vp_floor_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv4bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -169,8 +169,8 @@ declare @llvm.vp.floor.nxv8bf16(, @vp_floor_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv8bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -221,8 +221,8 @@ declare @llvm.vp.floor.nxv16bf16(, define @vp_floor_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv16bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -279,9 +279,9 @@ define @vp_floor_nxv32bf16( %va, @vp_floor_nxv4f16( %va, @llvm.vp.floor.nxv8f16(, @vp_floor_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI18_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -668,8 +668,8 @@ define @vp_floor_nxv8f16( %va, @llvm.vp.floor.nxv16f16(, @vp_floor_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v12, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI20_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vmv1r.v v12, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0) ; ZVFH-NEXT: vfabs.v v16, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -754,8 +754,8 @@ define @vp_floor_nxv16f16( %va, @llvm.vp.floor.nxv32f16(, @vp_floor_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v16, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI22_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vmv1r.v v16, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI22_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0) ; ZVFH-NEXT: vfabs.v v24, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -846,9 +846,9 @@ define @vp_floor_nxv32f16( %va, @llvm.vp.floor.nxv4f32(, @vp_floor_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1112,8 +1112,8 @@ declare @llvm.vp.floor.nxv8f32(, @vp_floor_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1156,8 +1156,8 @@ declare @llvm.vp.floor.nxv16f32(, @vp_floor_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1242,10 +1242,10 @@ declare @llvm.vp.floor.nxv2f64(, @vp_floor_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI36_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -1286,10 +1286,10 @@ declare @llvm.vp.floor.nxv4f64(, @vp_floor_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI38_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -1330,10 +1330,10 @@ declare @llvm.vp.floor.nxv7f64(, @vp_floor_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI40_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1374,10 +1374,10 @@ declare @llvm.vp.floor.nxv8f64(, @vp_floor_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI42_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1425,13 +1425,13 @@ define @vp_floor_nxv16f64( %va, @vfmax_nxv32bf16_vv( %a, @vfmax_nxv32bf16_vv( %a, @vfmax_nxv32bf16_vv( %a, @vfmax_nxv32bf16_vv( %a, @vfmax_nxv32f16_vv( %a, @vfmax_nxv32f16_vv( %a, @llvm.vp.maximum.nxv1bf16(, < define @vfmax_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v11, v11, v0.t @@ -66,8 +66,8 @@ declare @llvm.vp.maximum.nxv2bf16(, < define @vfmax_vv_nxv2bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v11, v11, v0.t @@ -113,8 +113,8 @@ declare @llvm.vp.maximum.nxv4bf16(, < define @vfmax_vv_nxv4bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmfeq.vv v8, v12, v12, v0.t @@ -162,8 +162,8 @@ declare @llvm.vp.maximum.nxv8bf16(, < define @vfmax_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmfeq.vv v8, v16, v16, v0.t @@ -217,8 +217,8 @@ define @vfmax_vv_nxv16bf16( %va, @llvm.vp.maximum.nxv1f16(, @vfmax_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t ; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0 ; ZVFH-NEXT: vmv1r.v v0, v10 @@ -582,8 +582,8 @@ define @vfmax_vv_nxv1f16( %va, @llvm.vp.maximum.nxv2f16(, @vfmax_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t ; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0 ; ZVFH-NEXT: vmv1r.v v0, v10 @@ -652,8 +652,8 @@ define @vfmax_vv_nxv2f16( %va, @llvm.vp.maximum.nxv4f16(, @vfmax_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t ; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0 ; ZVFH-NEXT: vmv1r.v v0, v10 @@ -722,8 +722,8 @@ define @vfmax_vv_nxv4f16( %va, @llvm.vp.maximum.nxv8f16(, @vfmax_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v12, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v12, v0 ; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t ; ZVFH-NEXT: vmv1r.v v0, v13 ; ZVFH-NEXT: vmerge.vvm v14, v8, v10, v0 @@ -796,8 +796,8 @@ define @vfmax_vv_nxv8f16( %va, @llvm.vp.maximum.nxv16f16(, @vfmax_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v16, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vmv1r.v v16, v0 ; ZVFH-NEXT: vmfeq.vv v17, v8, v8, v0.t ; ZVFH-NEXT: vmv1r.v v0, v17 ; ZVFH-NEXT: vmerge.vvm v20, v8, v12, v0 @@ -876,8 +876,8 @@ define @vfmax_vv_nxv16f16( %va, @vfmax_vv_nxv32f16( %va, @llvm.vp.maximum.nxv1f32(, @vfmax_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t ; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1313,8 +1313,8 @@ declare @llvm.vp.maximum.nxv2f32(, @vfmax_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t ; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1346,8 +1346,8 @@ declare @llvm.vp.maximum.nxv4f32(, @vfmax_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: vmerge.vvm v14, v8, v10, v0 @@ -1381,8 +1381,8 @@ declare @llvm.vp.maximum.nxv8f32(, @vfmax_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmerge.vvm v20, v8, v12, v0 @@ -1416,8 +1416,8 @@ declare @llvm.vp.maximum.nxv1f64(, @vfmax_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t ; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1449,8 +1449,8 @@ declare @llvm.vp.maximum.nxv2f64(, @vfmax_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: vmerge.vvm v14, v8, v10, v0 @@ -1484,8 +1484,8 @@ declare @llvm.vp.maximum.nxv4f64(, @vfmax_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmerge.vvm v20, v8, v12, v0 @@ -1525,8 +1525,8 @@ define @vfmax_vv_nxv8f64( %va, @vfmax_vv_nxv16f64( %va, @vfmax_vv_nxv16f64( %va, @vfmin_nxv32bf16_vv( %a, @vfmin_nxv32bf16_vv( %a, @vfmin_nxv32bf16_vv( %a, @vfmin_nxv32bf16_vv( %a, @vfmin_nxv32f16_vv( %a, @vfmin_nxv32f16_vv( %a, @llvm.vp.minimum.nxv1bf16(, < define @vfmin_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v11, v11, v0.t @@ -66,8 +66,8 @@ declare @llvm.vp.minimum.nxv2bf16(, < define @vfmin_vv_nxv2bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v11, v11, v0.t @@ -113,8 +113,8 @@ declare @llvm.vp.minimum.nxv4bf16(, < define @vfmin_vv_nxv4bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmfeq.vv v8, v12, v12, v0.t @@ -162,8 +162,8 @@ declare @llvm.vp.minimum.nxv8bf16(, < define @vfmin_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmfeq.vv v8, v16, v16, v0.t @@ -217,8 +217,8 @@ define @vfmin_vv_nxv16bf16( %va, @llvm.vp.minimum.nxv1f16(, @vfmin_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t ; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0 ; ZVFH-NEXT: vmv1r.v v0, v10 @@ -582,8 +582,8 @@ define @vfmin_vv_nxv1f16( %va, @llvm.vp.minimum.nxv2f16(, @vfmin_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t ; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0 ; ZVFH-NEXT: vmv1r.v v0, v10 @@ -652,8 +652,8 @@ define @vfmin_vv_nxv2f16( %va, @llvm.vp.minimum.nxv4f16(, @vfmin_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 ; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t ; ZVFH-NEXT: vmerge.vvm v11, v8, v9, v0 ; ZVFH-NEXT: vmv1r.v v0, v10 @@ -722,8 +722,8 @@ define @vfmin_vv_nxv4f16( %va, @llvm.vp.minimum.nxv8f16(, @vfmin_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v12, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v12, v0 ; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t ; ZVFH-NEXT: vmv1r.v v0, v13 ; ZVFH-NEXT: vmerge.vvm v14, v8, v10, v0 @@ -796,8 +796,8 @@ define @vfmin_vv_nxv8f16( %va, @llvm.vp.minimum.nxv16f16(, @vfmin_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v16, v0 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vmv1r.v v16, v0 ; ZVFH-NEXT: vmfeq.vv v17, v8, v8, v0.t ; ZVFH-NEXT: vmv1r.v v0, v17 ; ZVFH-NEXT: vmerge.vvm v20, v8, v12, v0 @@ -876,8 +876,8 @@ define @vfmin_vv_nxv16f16( %va, @vfmin_vv_nxv32f16( %va, @llvm.vp.minimum.nxv1f32(, @vfmin_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t ; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1313,8 +1313,8 @@ declare @llvm.vp.minimum.nxv2f32(, @vfmin_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t ; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1346,8 +1346,8 @@ declare @llvm.vp.minimum.nxv4f32(, @vfmin_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: vmerge.vvm v14, v8, v10, v0 @@ -1381,8 +1381,8 @@ declare @llvm.vp.minimum.nxv8f32(, @vfmin_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmerge.vvm v20, v8, v12, v0 @@ -1416,8 +1416,8 @@ declare @llvm.vp.minimum.nxv1f64(, @vfmin_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t ; CHECK-NEXT: vmerge.vvm v11, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1449,8 +1449,8 @@ declare @llvm.vp.minimum.nxv2f64(, @vfmin_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: vmerge.vvm v14, v8, v10, v0 @@ -1484,8 +1484,8 @@ declare @llvm.vp.minimum.nxv4f64(, @vfmin_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: vmerge.vvm v20, v8, v12, v0 @@ -1525,8 +1525,8 @@ define @vfmin_vv_nxv8f64( %va, @vfmin_vv_nxv16f64( %va, @vfmin_vv_nxv16f64( %va, This Inner Loop Header: Depth=1 ; RV32-NEXT: th.lrb a0, a1, a0, 0 +; RV32-NEXT: vsetivli zero, 1, e8, m1, tu, ma ; RV32-NEXT: vmv1r.v v9, v8 -; RV32-NEXT: vsetivli zero, 8, e8, m1, tu, ma ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vmseq.vi v9, v9, 0 @@ -45,8 +45,8 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) { ; RV64-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: th.lrb a0, a1, a0, 0 +; RV64-NEXT: vsetivli zero, 1, e8, m1, tu, ma ; RV64-NEXT: vmv1r.v v9, v8 -; RV64-NEXT: vsetivli zero, 8, e8, m1, tu, ma ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-NEXT: vmseq.vi v9, v9, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll index c7e3c8cb51982..b569efc7447da 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll @@ -703,10 +703,10 @@ define @fshl_v16i32( %a, @fshl_v7i64( %a, ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: li a0, 63 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v24, a0, v0.t ; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t ; CHECK-NEXT: vnot.v v16, v24, v0.t @@ -953,10 +953,10 @@ define @fshl_v8i64( %a, ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: li a0, 63 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v24, a0, v0.t ; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t ; CHECK-NEXT: vnot.v v16, v24, v0.t @@ -988,6 +988,7 @@ define @fshr_v16i64( %a, @fshr_v16i64( %a, @fshl_v16i64( %a, @fshl_v16i64( %a, @test_specify_reg_mf2( %in, %in2) nounwind { ; CHECK-LABEL: test_specify_reg_mf2: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v2, v9 ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: #APP ; CHECK-NEXT: vadd.vv v0, v1, v2 ; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v0 ; CHECK-NEXT: ret entry: @@ -380,11 +382,13 @@ entry: define @test_specify_reg_m1( %in, %in2) nounwind { ; CHECK-LABEL: test_specify_reg_m1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v2, v9 ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: #APP ; CHECK-NEXT: vadd.vv v0, v1, v2 ; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v0 ; CHECK-NEXT: ret entry: @@ -395,11 +399,13 @@ entry: define @test_specify_reg_m2( %in, %in2) nounwind { ; CHECK-LABEL: test_specify_reg_m2: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v4, v10 ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: #APP ; CHECK-NEXT: vadd.vv v0, v2, v4 ; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v8, v0 ; CHECK-NEXT: ret entry: @@ -410,6 +416,7 @@ entry: define @test_specify_reg_mask( %in, %in2) nounwind { ; CHECK-LABEL: test_specify_reg_mask: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v2, v8 ; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: #APP diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll index 8925a9e0cee32..ca9cec921b3cd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll @@ -5,6 +5,7 @@ define @insert_nxv8i32_nxv4i32_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv8i32_nxv4i32_0: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv4i32.nxv8i32( %vec, %subvec, i64 0) @@ -14,6 +15,7 @@ define @insert_nxv8i32_nxv4i32_0( %vec, @insert_nxv8i32_nxv4i32_4( %vec, %subvec) { ; CHECK-LABEL: insert_nxv8i32_nxv4i32_4: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv4i32.nxv8i32( %vec, %subvec, i64 4) @@ -23,6 +25,7 @@ define @insert_nxv8i32_nxv4i32_4( %vec, @insert_nxv8i32_nxv2i32_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv8i32_nxv2i32_0: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v12 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 0) @@ -32,6 +35,7 @@ define @insert_nxv8i32_nxv2i32_0( %vec, @insert_nxv8i32_nxv2i32_2( %vec, %subvec) { ; CHECK-LABEL: insert_nxv8i32_nxv2i32_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v12 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 2) @@ -41,6 +45,7 @@ define @insert_nxv8i32_nxv2i32_2( %vec, @insert_nxv8i32_nxv2i32_4( %vec, %subvec) { ; CHECK-LABEL: insert_nxv8i32_nxv2i32_4: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v12 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 4) @@ -50,6 +55,7 @@ define @insert_nxv8i32_nxv2i32_4( %vec, @insert_nxv8i32_nxv2i32_6( %vec, %subvec) { ; CHECK-LABEL: insert_nxv8i32_nxv2i32_6: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 6) @@ -86,6 +92,7 @@ define @insert_nxv1i8_nxv4i8_3( %vec, @insert_nxv16i32_nxv8i32_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv8i32_0: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv8i32.nxv16i32( %vec, %subvec, i64 0) @@ -95,6 +102,7 @@ define @insert_nxv16i32_nxv8i32_0( %vec, define @insert_nxv16i32_nxv8i32_8( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv8i32_8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv4r.v v12, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv8i32.nxv16i32( %vec, %subvec, i64 8) @@ -104,6 +112,7 @@ define @insert_nxv16i32_nxv8i32_8( %vec, define @insert_nxv16i32_nxv4i32_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv4i32_0: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 0) @@ -113,6 +122,7 @@ define @insert_nxv16i32_nxv4i32_0( %vec, define @insert_nxv16i32_nxv4i32_4( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv4i32_4: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v10, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 4) @@ -122,6 +132,7 @@ define @insert_nxv16i32_nxv4i32_4( %vec, define @insert_nxv16i32_nxv4i32_8( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv4i32_8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v12, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 8) @@ -131,6 +142,7 @@ define @insert_nxv16i32_nxv4i32_8( %vec, define @insert_nxv16i32_nxv4i32_12( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv4i32_12: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v14, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 12) @@ -140,6 +152,7 @@ define @insert_nxv16i32_nxv4i32_12( %vec, define @insert_nxv16i32_nxv2i32_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_0: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 0) @@ -149,6 +162,7 @@ define @insert_nxv16i32_nxv2i32_0( %vec, define @insert_nxv16i32_nxv2i32_2( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 2) @@ -158,6 +172,7 @@ define @insert_nxv16i32_nxv2i32_2( %vec, define @insert_nxv16i32_nxv2i32_4( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_4: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 4) @@ -167,6 +182,7 @@ define @insert_nxv16i32_nxv2i32_4( %vec, define @insert_nxv16i32_nxv2i32_6( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_6: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v11, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 6) @@ -176,6 +192,7 @@ define @insert_nxv16i32_nxv2i32_6( %vec, define @insert_nxv16i32_nxv2i32_8( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v12, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 8) @@ -185,6 +202,7 @@ define @insert_nxv16i32_nxv2i32_8( %vec, define @insert_nxv16i32_nxv2i32_10( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_10: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v13, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 10) @@ -194,6 +212,7 @@ define @insert_nxv16i32_nxv2i32_10( %vec, define @insert_nxv16i32_nxv2i32_12( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_12: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v14, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 12) @@ -203,6 +222,7 @@ define @insert_nxv16i32_nxv2i32_12( %vec, define @insert_nxv16i32_nxv2i32_14( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_14: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v15, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 14) @@ -512,6 +532,7 @@ define @insert_nxv2i64_nxv3i64(<3 x i64> %sv) #0 { define @insert_insert_combine(<2 x i32> %subvec) { ; CHECK-LABEL: insert_insert_combine: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: ret %inner = call @llvm.vector.insert.nxv4i32.v2i32( undef, <2 x i32> %subvec, i64 0) @@ -524,6 +545,7 @@ define @insert_insert_combine(<2 x i32> %subvec) { define @insert_insert_combine2( %subvec) { ; CHECK-LABEL: insert_insert_combine2: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: ret %inner = call @llvm.vector.insert.nxv2i32.nxv4i32( undef, %subvec, i64 0) diff --git a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll index ffb9bf76fb4fa..166dba6a56524 100644 --- a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll @@ -55,11 +55,11 @@ declare @llvm.vp.llrint.nxv8i64.nxv8f32(, define @llrint_nxv16i64_nxv16f32( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv16i64_nxv16f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sltu a2, a0, a3 ; CHECK-NEXT: addi a2, a2, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll index 9991bbc9725ba..21045b69a8b5d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll @@ -117,11 +117,11 @@ define @lrint_nxv16f32( %x, @llvm.riscv.viota.mask.nxv1i8( define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: viota.m v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1312,9 +1312,9 @@ declare @llvm.riscv.vmsbf.mask.nxv1i1( define @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsbf.m v8, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -1443,9 +1443,9 @@ declare @llvm.riscv.vmsbf.mask.nxv64i1( define @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmsbf.m v8, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll index a3eddbcc2baed..9ee2324f615dd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -221,11 +221,13 @@ define @mgather_truemask_nxv4i8( %ptrs, @mgather_falsemask_nxv4i8( %ptrs, %passthru) { ; RV32-LABEL: mgather_falsemask_nxv4i8: ; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_nxv4i8: ; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i8.nxv4p0( %ptrs, i32 1, zeroinitializer, %passthru) @@ -442,11 +444,13 @@ define @mgather_truemask_nxv4i16( %ptrs, @mgather_falsemask_nxv4i16( %ptrs, %passthru) { ; RV32-LABEL: mgather_falsemask_nxv4i16: ; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_nxv4i16: ; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i16.nxv4p0( %ptrs, i32 2, zeroinitializer, %passthru) @@ -686,11 +690,13 @@ define @mgather_truemask_nxv4i32( %ptrs, @mgather_falsemask_nxv4i32( %ptrs, %passthru) { ; RV32-LABEL: mgather_falsemask_nxv4i32: ; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_nxv4i32: ; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-NEXT: vmv2r.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i32.nxv4p0( %ptrs, i32 4, zeroinitializer, %passthru) @@ -949,6 +955,7 @@ define @mgather_truemask_nxv4i64( %ptrs, @mgather_falsemask_nxv4i64( %ptrs, %passthru) { ; CHECK-LABEL: mgather_falsemask_nxv4i64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %v = call @llvm.masked.gather.nxv4i64.nxv4p0( %ptrs, i32 8, zeroinitializer, %passthru) @@ -1232,12 +1239,12 @@ define void @mgather_nxv16i64( %ptrs0, %ptr ; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; RV64-NEXT: addi a3, sp, 16 ; RV64-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; RV64-NEXT: vmv8r.v v16, v8 ; RV64-NEXT: vl8re64.v v24, (a0) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: vl8re64.v v8, (a1) ; RV64-NEXT: srli a1, a0, 3 -; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vx v7, v0, a1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v24, (zero), v16, v0.t @@ -1348,11 +1355,13 @@ define @mgather_truemask_nxv4bf16( %ptrs define @mgather_falsemask_nxv4bf16( %ptrs, %passthru) { ; RV32-LABEL: mgather_falsemask_nxv4bf16: ; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_nxv4bf16: ; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4bf16.nxv4p0( %ptrs, i32 2, zeroinitializer, %passthru) @@ -1549,11 +1558,13 @@ define @mgather_truemask_nxv4f16( %ptrs, < define @mgather_falsemask_nxv4f16( %ptrs, %passthru) { ; RV32-LABEL: mgather_falsemask_nxv4f16: ; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_nxv4f16: ; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4f16.nxv4p0( %ptrs, i32 2, zeroinitializer, %passthru) @@ -1749,11 +1760,13 @@ define @mgather_truemask_nxv4f32( %ptrs, define @mgather_falsemask_nxv4f32( %ptrs, %passthru) { ; RV32-LABEL: mgather_falsemask_nxv4f32: ; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_nxv4f32: ; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-NEXT: vmv2r.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4f32.nxv4p0( %ptrs, i32 4, zeroinitializer, %passthru) @@ -2012,6 +2025,7 @@ define @mgather_truemask_nxv4f64( %ptrs, define @mgather_falsemask_nxv4f64( %ptrs, %passthru) { ; CHECK-LABEL: mgather_falsemask_nxv4f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %v = call @llvm.masked.gather.nxv4f64.nxv4p0( %ptrs, i32 8, zeroinitializer, %passthru) @@ -2317,8 +2331,8 @@ define @mgather_baseidx_nxv32i8(ptr %base, ; ; RV64-LABEL: mgather_baseidx_nxv32i8: ; RV64: # %bb.0: -; RV64-NEXT: vmv1r.v v16, v0 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v16, v0 ; RV64-NEXT: vsext.vf8 v24, v8 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll index 72c251ce985cb..77a1f508d2218 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll @@ -2009,11 +2009,11 @@ define void @mscatter_baseidx_nxv16i16_nxv16f64( %val0, @reverse_nxv32i8( %a) { define @reverse_nxv64i8( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv64i8: ; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv8r.v v16, v8 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v8 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v24, v8, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma @@ -1188,10 +1188,10 @@ define @reverse_nxv64i8( %a) { ; ; RV32-BITS-256-LABEL: reverse_nxv64i8: ; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vmv8r.v v16, v8 ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: addi a0, a0, -1 -; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vid.v v8 ; RV32-BITS-256-NEXT: vrsub.vx v24, v8, a0 ; RV32-BITS-256-NEXT: vrgather.vv v15, v16, v24 @@ -1206,10 +1206,10 @@ define @reverse_nxv64i8( %a) { ; ; RV32-BITS-512-LABEL: reverse_nxv64i8: ; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vmv8r.v v16, v8 ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: addi a0, a0, -1 -; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vid.v v8 ; RV32-BITS-512-NEXT: vrsub.vx v24, v8, a0 ; RV32-BITS-512-NEXT: vrgather.vv v15, v16, v24 @@ -1224,10 +1224,10 @@ define @reverse_nxv64i8( %a) { ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv64i8: ; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv8r.v v16, v8 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v8 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v24, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma @@ -1243,10 +1243,10 @@ define @reverse_nxv64i8( %a) { ; ; RV64-BITS-256-LABEL: reverse_nxv64i8: ; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vmv8r.v v16, v8 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: addi a0, a0, -1 -; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vid.v v8 ; RV64-BITS-256-NEXT: vrsub.vx v24, v8, a0 ; RV64-BITS-256-NEXT: vrgather.vv v15, v16, v24 @@ -1261,10 +1261,10 @@ define @reverse_nxv64i8( %a) { ; ; RV64-BITS-512-LABEL: reverse_nxv64i8: ; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vmv8r.v v16, v8 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: addi a0, a0, -1 -; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vid.v v8 ; RV64-BITS-512-NEXT: vrsub.vx v24, v8, a0 ; RV64-BITS-512-NEXT: vrgather.vv v15, v16, v24 @@ -1367,11 +1367,11 @@ define @reverse_nxv16i16( %a) { define @reverse_nxv32i16( %a) { ; CHECK-LABEL: reverse_nxv32i16: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 @@ -1458,11 +1458,11 @@ define @reverse_nxv8i32( %a) { define @reverse_nxv16i32( %a) { ; CHECK-LABEL: reverse_nxv16i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 @@ -1533,11 +1533,11 @@ define @reverse_nxv4i64( %a) { define @reverse_nxv8i64( %a) { ; CHECK-LABEL: reverse_nxv8i64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 @@ -1644,11 +1644,11 @@ define @reverse_nxv16bf16( %a) { define @reverse_nxv32bf16( %a) { ; CHECK-LABEL: reverse_nxv32bf16: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 @@ -1751,11 +1751,11 @@ define @reverse_nxv16f16( %a) { define @reverse_nxv32f16( %a) { ; CHECK-LABEL: reverse_nxv32f16: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 @@ -1842,11 +1842,11 @@ define @reverse_nxv8f32( %a) { define @reverse_nxv16f32( %a) { ; CHECK-LABEL: reverse_nxv16f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 @@ -1917,11 +1917,11 @@ define @reverse_nxv4f64( %a) { define @reverse_nxv8f64( %a) { ; CHECK-LABEL: reverse_nxv8f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll index 94fce80ad3b8e..9aa26e59c6a03 100644 --- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll @@ -117,8 +117,8 @@ declare @llvm.vp.nearbyint.nxv4bf16(, define @vp_nearbyint_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv4bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -169,8 +169,8 @@ declare @llvm.vp.nearbyint.nxv8bf16(, define @vp_nearbyint_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv8bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -221,8 +221,8 @@ declare @llvm.vp.nearbyint.nxv16bf16( @vp_nearbyint_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv16bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -273,9 +273,9 @@ declare @llvm.vp.nearbyint.nxv32bf16( @vp_nearbyint_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv32bf16: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 ; CHECK-NEXT: lui a3, 307200 ; CHECK-NEXT: slli a1, a2, 1 @@ -566,8 +566,8 @@ define @vp_nearbyint_nxv4f16( %va, @llvm.vp.nearbyint.nxv8f16(, @vp_nearbyint_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI18_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -652,8 +652,8 @@ define @vp_nearbyint_nxv8f16( %va, @llvm.vp.nearbyint.nxv16f16(, < define @vp_nearbyint_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v12, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI20_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vmv1r.v v12, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0) ; ZVFH-NEXT: vfabs.v v16, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -738,8 +738,8 @@ define @vp_nearbyint_nxv16f16( %va, @llvm.vp.nearbyint.nxv32f16(, < define @vp_nearbyint_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v16, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI22_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vmv1r.v v16, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI22_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0) ; ZVFH-NEXT: vfabs.v v24, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -824,9 +824,9 @@ define @vp_nearbyint_nxv32f16( %va, @llvm.vp.nearbyint.nxv4f32(, @vp_nearbyint_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1080,8 +1080,8 @@ declare @llvm.vp.nearbyint.nxv8f32(, @vp_nearbyint_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1124,8 +1124,8 @@ declare @llvm.vp.nearbyint.nxv16f32(, define @vp_nearbyint_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1210,10 +1210,10 @@ declare @llvm.vp.nearbyint.nxv2f64(, define @vp_nearbyint_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI36_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -1254,10 +1254,10 @@ declare @llvm.vp.nearbyint.nxv4f64(, define @vp_nearbyint_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI38_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -1298,10 +1298,10 @@ declare @llvm.vp.nearbyint.nxv7f64(, define @vp_nearbyint_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI40_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1342,10 +1342,10 @@ declare @llvm.vp.nearbyint.nxv8f64(, define @vp_nearbyint_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI42_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1387,13 +1387,13 @@ declare @llvm.vp.nearbyint.nxv16f64( @vp_nearbyint_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv16f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: lui a2, %hi(.LCPI44_0) ; CHECK-NEXT: srli a3, a1, 3 ; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2) ; CHECK-NEXT: sub a2, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v6, v0, a3 ; CHECK-NEXT: sltu a3, a0, a2 ; CHECK-NEXT: addi a3, a3, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/pr88576.ll b/llvm/test/CodeGen/RISCV/rvv/pr88576.ll index 37c67b9ff2f6a..dd7debd3ab046 100644 --- a/llvm/test/CodeGen/RISCV/rvv/pr88576.ll +++ b/llvm/test/CodeGen/RISCV/rvv/pr88576.ll @@ -23,10 +23,10 @@ define i1 @foo( %x, i64 %y) { ; CHECK-NEXT: slli a2, a2, 4 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: andi sp, sp, -64 +; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: addi a2, sp, 64 ; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: add a0, a2, a0 ; CHECK-NEXT: add a1, a2, a1 @@ -53,8 +53,8 @@ define i1 @foo( %x, i64 %y) { define i8 @bar( %x, i64 %y) { ; CHECK-LABEL: bar: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vslidedown.vx v8, v0, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll index 2a69dd31118bd..70ea1bc78d2e5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll @@ -109,8 +109,8 @@ declare @llvm.vp.rint.nxv4bf16(, @vp_rint_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv4bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -157,8 +157,8 @@ declare @llvm.vp.rint.nxv8bf16(, @vp_rint_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv8bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -205,8 +205,8 @@ declare @llvm.vp.rint.nxv16bf16(, < define @vp_rint_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv16bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -259,9 +259,9 @@ define @vp_rint_nxv32bf16( %va, @vp_rint_nxv4f16( %va, @llvm.vp.rint.nxv8f16(, @vp_rint_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI18_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -613,8 +613,8 @@ define @vp_rint_nxv8f16( %va, @llvm.vp.rint.nxv16f16(, @vp_rint_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v12, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI20_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vmv1r.v v12, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0) ; ZVFH-NEXT: vfabs.v v16, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -691,8 +691,8 @@ define @vp_rint_nxv16f16( %va, @llvm.vp.rint.nxv32f16(, @vp_rint_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v16, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI22_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vmv1r.v v16, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI22_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0) ; ZVFH-NEXT: vfabs.v v24, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -775,9 +775,9 @@ define @vp_rint_nxv32f16( %va, @llvm.vp.rint.nxv4f32(, @vp_rint_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1018,8 +1018,8 @@ declare @llvm.vp.rint.nxv8f32(, @vp_rint_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1058,8 +1058,8 @@ declare @llvm.vp.rint.nxv16f32(, @vp_rint_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1136,10 +1136,10 @@ declare @llvm.vp.rint.nxv2f64(, @vp_rint_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI36_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -1176,10 +1176,10 @@ declare @llvm.vp.rint.nxv4f64(, @vp_rint_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI38_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -1216,10 +1216,10 @@ declare @llvm.vp.rint.nxv7f64(, @vp_rint_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI40_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1256,10 +1256,10 @@ declare @llvm.vp.rint.nxv8f64(, @vp_rint_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI42_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1303,13 +1303,13 @@ define @vp_rint_nxv16f64( %va, @llvm.vp.round.nxv4bf16(, @vp_round_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv4bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -169,8 +169,8 @@ declare @llvm.vp.round.nxv8bf16(, @vp_round_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv8bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -221,8 +221,8 @@ declare @llvm.vp.round.nxv16bf16(, define @vp_round_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv16bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -279,9 +279,9 @@ define @vp_round_nxv32bf16( %va, @vp_round_nxv4f16( %va, @llvm.vp.round.nxv8f16(, @vp_round_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI18_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -668,8 +668,8 @@ define @vp_round_nxv8f16( %va, @llvm.vp.round.nxv16f16(, @vp_round_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v12, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI20_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vmv1r.v v12, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0) ; ZVFH-NEXT: vfabs.v v16, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -754,8 +754,8 @@ define @vp_round_nxv16f16( %va, @llvm.vp.round.nxv32f16(, @vp_round_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v16, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI22_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vmv1r.v v16, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI22_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0) ; ZVFH-NEXT: vfabs.v v24, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -846,9 +846,9 @@ define @vp_round_nxv32f16( %va, @llvm.vp.round.nxv4f32(, @vp_round_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1112,8 +1112,8 @@ declare @llvm.vp.round.nxv8f32(, @vp_round_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1156,8 +1156,8 @@ declare @llvm.vp.round.nxv16f32(, @vp_round_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1242,10 +1242,10 @@ declare @llvm.vp.round.nxv2f64(, @vp_round_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI36_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -1286,10 +1286,10 @@ declare @llvm.vp.round.nxv4f64(, @vp_round_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI38_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -1330,10 +1330,10 @@ declare @llvm.vp.round.nxv7f64(, @vp_round_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI40_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1374,10 +1374,10 @@ declare @llvm.vp.round.nxv8f64(, @vp_round_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI42_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1425,13 +1425,13 @@ define @vp_round_nxv16f64( %va, @llvm.vp.roundeven.nxv4bf16(, define @vp_roundeven_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv4bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -169,8 +169,8 @@ declare @llvm.vp.roundeven.nxv8bf16(, define @vp_roundeven_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv8bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -221,8 +221,8 @@ declare @llvm.vp.roundeven.nxv16bf16( @vp_roundeven_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv16bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -279,9 +279,9 @@ define @vp_roundeven_nxv32bf16( %va ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 ; CHECK-NEXT: lui a3, 307200 ; CHECK-NEXT: slli a1, a2, 1 @@ -582,8 +582,8 @@ define @vp_roundeven_nxv4f16( %va, @llvm.vp.roundeven.nxv8f16(, @vp_roundeven_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI18_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -668,8 +668,8 @@ define @vp_roundeven_nxv8f16( %va, @llvm.vp.roundeven.nxv16f16(, < define @vp_roundeven_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v12, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI20_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vmv1r.v v12, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0) ; ZVFH-NEXT: vfabs.v v16, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -754,8 +754,8 @@ define @vp_roundeven_nxv16f16( %va, @llvm.vp.roundeven.nxv32f16(, < define @vp_roundeven_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v16, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI22_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vmv1r.v v16, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI22_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0) ; ZVFH-NEXT: vfabs.v v24, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -846,9 +846,9 @@ define @vp_roundeven_nxv32f16( %va, @llvm.vp.roundeven.nxv4f32(, @vp_roundeven_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1112,8 +1112,8 @@ declare @llvm.vp.roundeven.nxv8f32(, @vp_roundeven_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1156,8 +1156,8 @@ declare @llvm.vp.roundeven.nxv16f32(, define @vp_roundeven_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1242,10 +1242,10 @@ declare @llvm.vp.roundeven.nxv2f64(, define @vp_roundeven_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI36_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -1286,10 +1286,10 @@ declare @llvm.vp.roundeven.nxv4f64(, define @vp_roundeven_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI38_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -1330,10 +1330,10 @@ declare @llvm.vp.roundeven.nxv7f64(, define @vp_roundeven_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI40_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1374,10 +1374,10 @@ declare @llvm.vp.roundeven.nxv8f64(, define @vp_roundeven_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI42_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1425,13 +1425,13 @@ define @vp_roundeven_nxv16f64( %va, ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: lui a2, %hi(.LCPI44_0) ; CHECK-NEXT: srli a3, a1, 3 ; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2) ; CHECK-NEXT: sub a2, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v6, v0, a3 ; CHECK-NEXT: sltu a3, a0, a2 ; CHECK-NEXT: addi a3, a3, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll index 96c821a76ae84..dd7db58ccdf34 100644 --- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll @@ -117,8 +117,8 @@ declare @llvm.vp.roundtozero.nxv4bf16( @vp_roundtozero_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv4bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma @@ -169,8 +169,8 @@ declare @llvm.vp.roundtozero.nxv8bf16( @vp_roundtozero_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv8bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma @@ -221,8 +221,8 @@ declare @llvm.vp.roundtozero.nxv16bf16( @vp_roundtozero_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv16bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 ; CHECK-NEXT: lui a1, 307200 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -279,9 +279,9 @@ define @vp_roundtozero_nxv32bf16( % ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 ; CHECK-NEXT: lui a3, 307200 ; CHECK-NEXT: slli a1, a2, 1 @@ -582,8 +582,8 @@ define @vp_roundtozero_nxv4f16( %va, @llvm.vp.roundtozero.nxv8f16(, @vp_roundtozero_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI18_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv1r.v v10, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -668,8 +668,8 @@ define @vp_roundtozero_nxv8f16( %va, @llvm.vp.roundtozero.nxv16f16(, define @vp_roundtozero_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v12, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI20_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vmv1r.v v12, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0) ; ZVFH-NEXT: vfabs.v v16, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -754,8 +754,8 @@ define @vp_roundtozero_nxv16f16( %va, < ; ; ZVFHMIN-LABEL: vp_roundtozero_nxv16f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vmv1r.v v12, v0 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v12, v0 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 ; ZVFHMIN-NEXT: lui a1, 307200 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma @@ -821,10 +821,10 @@ declare @llvm.vp.roundtozero.nxv32f16(, define @vp_roundtozero_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vmv1r.v v16, v0 -; ZVFH-NEXT: lui a1, %hi(.LCPI22_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFH-NEXT: vmv1r.v v16, v0 +; ZVFH-NEXT: lui a0, %hi(.LCPI22_0) +; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0) ; ZVFH-NEXT: vfabs.v v24, v8, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -846,9 +846,9 @@ define @vp_roundtozero_nxv32f16( %va, < ; ZVFHMIN-NEXT: slli a1, a1, 3 ; ZVFHMIN-NEXT: sub sp, sp, a1 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vmv1r.v v7, v0 ; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 ; ZVFHMIN-NEXT: lui a3, 307200 ; ZVFHMIN-NEXT: slli a1, a2, 1 @@ -1068,8 +1068,8 @@ declare @llvm.vp.roundtozero.nxv4f32(, define @vp_roundtozero_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1112,8 +1112,8 @@ declare @llvm.vp.roundtozero.nxv8f32(, define @vp_roundtozero_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1156,8 +1156,8 @@ declare @llvm.vp.roundtozero.nxv16f32( @vp_roundtozero_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: lui a0, 307200 ; CHECK-NEXT: fmv.w.x fa5, a0 @@ -1242,10 +1242,10 @@ declare @llvm.vp.roundtozero.nxv2f64( define @vp_roundtozero_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI36_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t @@ -1286,10 +1286,10 @@ declare @llvm.vp.roundtozero.nxv4f64( define @vp_roundtozero_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmv1r.v v12, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI38_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) ; CHECK-NEXT: vfabs.v v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t @@ -1330,10 +1330,10 @@ declare @llvm.vp.roundtozero.nxv7f64( define @vp_roundtozero_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv7f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI40_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1374,10 +1374,10 @@ declare @llvm.vp.roundtozero.nxv8f64( define @vp_roundtozero_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v16, v0 +; CHECK-NEXT: lui a0, %hi(.LCPI42_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) ; CHECK-NEXT: vfabs.v v24, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t @@ -1425,13 +1425,13 @@ define @vp_roundtozero_nxv16f64( %v ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: lui a2, %hi(.LCPI44_0) ; CHECK-NEXT: srli a3, a1, 3 ; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2) ; CHECK-NEXT: sub a2, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v6, v0, a3 ; CHECK-NEXT: sltu a3, a0, a2 ; CHECK-NEXT: addi a3, a3, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll index aef160049106b..f636ab9ebd0ce 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll @@ -17,6 +17,7 @@ define @foo( %a, @spill_zvlsseg_nxv1i32(ptr %base, i32 %vl) nounwind { ; SPILL-O0-NEXT: # implicit-def: $v8_v9 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; SPILL-O0-NEXT: vmv1r.v v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill @@ -90,6 +91,7 @@ define @spill_zvlsseg_nxv2i32(ptr %base, i32 %vl) nounwind { ; SPILL-O0-NEXT: # implicit-def: $v8_v9 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; SPILL-O0-NEXT: vmv1r.v v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill @@ -166,6 +168,7 @@ define @spill_zvlsseg_nxv4i32(ptr %base, i32 %vl) nounwind { ; SPILL-O0-NEXT: # implicit-def: $v8m2_v10m2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; SPILL-O0-NEXT: vmv2r.v v8, v10 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill @@ -246,6 +249,7 @@ define @spill_zvlsseg_nxv8i32(ptr %base, i32 %vl) nounwind { ; SPILL-O0-NEXT: # implicit-def: $v8m4_v12m4 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; SPILL-O0-NEXT: vmv4r.v v8, v12 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill @@ -326,6 +330,7 @@ define @spill_zvlsseg3_nxv4i32(ptr %base, i32 %vl) nounwind { ; SPILL-O0-NEXT: # implicit-def: $v8m2_v10m2_v12m2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; SPILL-O0-NEXT: vlseg3e32.v v8, (a0) +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; SPILL-O0-NEXT: vmv2r.v v8, v10 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll index c7c44fb0e1215..2cd80ef79bd82 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll @@ -20,6 +20,7 @@ define @foo( %a, @spill_zvlsseg_nxv1i32(ptr %base, i64 %vl) nounwind { ; SPILL-O0-NEXT: # implicit-def: $v8_v9 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; SPILL-O0-NEXT: vmv1r.v v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill @@ -90,6 +91,7 @@ define @spill_zvlsseg_nxv2i32(ptr %base, i64 %vl) nounwind { ; SPILL-O0-NEXT: # implicit-def: $v8_v9 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; SPILL-O0-NEXT: vmv1r.v v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill @@ -166,6 +168,7 @@ define @spill_zvlsseg_nxv4i32(ptr %base, i64 %vl) nounwind { ; SPILL-O0-NEXT: # implicit-def: $v8m2_v10m2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; SPILL-O0-NEXT: vmv2r.v v8, v10 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill @@ -246,6 +249,7 @@ define @spill_zvlsseg_nxv8i32(ptr %base, i64 %vl) nounwind { ; SPILL-O0-NEXT: # implicit-def: $v8m4_v12m4 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; SPILL-O0-NEXT: vmv4r.v v8, v12 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill @@ -326,6 +330,7 @@ define @spill_zvlsseg3_nxv4i32(ptr %base, i64 %vl) nounwind { ; SPILL-O0-NEXT: # implicit-def: $v8m2_v10m2_v12m2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; SPILL-O0-NEXT: vlseg3e32.v v8, (a0) +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; SPILL-O0-NEXT: vmv2r.v v8, v10 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll index b27ba14e85c83..53d1666c30e96 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll @@ -47,6 +47,7 @@ define @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, ; CHECK-NEXT: vs8r.v v8, (t1) ; CHECK-NEXT: sd t1, 0(sp) ; CHECK-NEXT: sd t0, 8(sp) +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: call bar ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll index 23ebfade6f6b0..d329979857a6b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll @@ -941,8 +941,8 @@ declare @llvm.riscv.vredsum.nxv2i32.nxv2i32( define @vredsum( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vredsum: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vredsum.vs v11, v9, v10 ; CHECK-NEXT: vmerge.vvm v8, v8, v11, v0 ; CHECK-NEXT: ret @@ -965,8 +965,8 @@ define @vfredusum( %passthru, @vfredusum_allones_mask( %passth define @unfoldable_vredsum_allones_mask_diff_vl( %passthru, %x, %y) { ; CHECK-LABEL: unfoldable_vredsum_allones_mask_diff_vl: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma +; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vredsum.vs v11, v9, v10 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, tu, ma ; CHECK-NEXT: vmv.v.v v8, v11 diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll index 6c11e9413525e..70b53841bff4c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll @@ -1473,6 +1473,7 @@ define @fcmp_oeq_vv_nxv64bf16( %va, @fcmp_oeq_vv_nxv64bf16( %va, @fcmp_oeq_vv_nxv64f16( %va, @fcmp_oeq_vv_nxv64f16( %va, @fcmp_oeq_vv_nxv64f16( %va, @fcmp_oeq_vv_nxv64f16( %va, @icmp_eq_vv_nxv128i8( %va, @icmp_eq_vv_nxv128i8( %va, @icmp_eq_vv_nxv128i8( %va, @icmp_eq_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vx_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -1173,8 +1173,8 @@ define @icmp_eq_vx_nxv128i8( %va, i8 %b, define @icmp_eq_vx_swap_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vx_swap_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -2244,6 +2244,7 @@ define @icmp_eq_vv_nxv32i32( %va, @icmp_eq_vv_nxv32i32( %va, @icmp_eq_vv_nxv32i32( %va, @icmp_eq_vx_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vx_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: srli a2, a3, 2 ; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a4, a1, a3 ; CHECK-NEXT: sltu a5, a1, a4 @@ -2332,11 +2332,11 @@ define @icmp_eq_vx_nxv32i32( %va, i32 %b, define @icmp_eq_vx_swap_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vx_swap_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: srli a2, a3, 2 ; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a4, a1, a3 ; CHECK-NEXT: sltu a5, a1, a4 diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll index 197ba085c0359..75a5eea1cb409 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -4865,10 +4865,10 @@ declare <4 x i1> @llvm.vp.icmp.v4i32(<4 x i32>, <4 x i32>, metadata, <4 x i1>, i define void @sink_splat_vp_icmp(ptr nocapture %x, i32 signext %y, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_icmp: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v0 ; CHECK-NEXT: lui a3, 1 ; CHECK-NEXT: add a3, a0, a3 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: .LBB102_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 @@ -4906,10 +4906,10 @@ declare <4 x i1> @llvm.vp.fcmp.v4f32(<4 x float>, <4 x float>, metadata, <4 x i1 define void @sink_splat_vp_fcmp(ptr nocapture %x, float %y, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_fcmp: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v0 ; CHECK-NEXT: lui a2, 1 ; CHECK-NEXT: add a2, a0, a2 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: .LBB103_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll index f8315de324e42..ecd098edb30ae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll @@ -663,6 +663,7 @@ declare @llvm.experimental.vp.strided.load.nxv3f64.p0.i32( define @strided_load_nxv16f64(ptr %ptr, i64 %stride, %mask, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_load_nxv16f64: ; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv1r.v v9, v0 ; CHECK-RV32-NEXT: csrr a4, vlenb ; CHECK-RV32-NEXT: sub a2, a3, a4 @@ -688,6 +689,7 @@ define @strided_load_nxv16f64(ptr %ptr, i64 %stride, @llvm.experimental.vp.strided.load.nxv16f64.p0.i6 define @strided_load_nxv17f64(ptr %ptr, i64 %stride, %mask, i32 zeroext %evl, ptr %hi_ptr) { ; CHECK-RV32-LABEL: strided_load_nxv17f64: ; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-RV32-NEXT: vmv1r.v v8, v0 ; CHECK-RV32-NEXT: csrr a2, vlenb ; CHECK-RV32-NEXT: slli a7, a2, 1 @@ -812,6 +815,7 @@ define @strided_load_nxv17f64(ptr %ptr, i64 %stride, %v, ptr %ptr, i32 sig ; CHECK-NEXT: slli a4, a4, 3 ; CHECK-NEXT: sub sp, sp, a4 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: addi a4, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll index ab13c78da05e8..c9f9a79733003 100644 --- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll +++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll @@ -158,8 +158,8 @@ declare @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( %v, ptr noalias %q) { ; CHECK-LABEL: repeat_shuffle: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vslideup.vi v10, v8, 2 ; CHECK-NEXT: vse64.v v10, (a0) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll index ebd550013ec78..fee6799e992f3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll @@ -565,8 +565,8 @@ declare @llvm.vp.add.nxv128i8(, @vadd_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -1343,11 +1343,11 @@ declare @llvm.vp.add.nxv32i32(, @vadd_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 @@ -1399,11 +1399,11 @@ declare i32 @llvm.vscale.i32() define @vadd_vi_nxv32i32_evl_nx8( %va, %m) { ; CHECK-LABEL: vadd_vi_nxv32i32_evl_nx8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a2, a0, 2 ; CHECK-NEXT: slli a1, a0, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll index e59a9174b03d9..6b35e4767b239 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll @@ -43,9 +43,9 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv1i1( define iXLen @intrinsic_vcpop_mask_m_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv1i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: @@ -97,9 +97,9 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv2i1( define iXLen @intrinsic_vcpop_mask_m_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv2i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: @@ -137,9 +137,9 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv4i1( define iXLen @intrinsic_vcpop_mask_m_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv4i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: @@ -177,9 +177,9 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv8i1( define iXLen @intrinsic_vcpop_mask_m_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv8i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: @@ -217,9 +217,9 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv16i1( define iXLen @intrinsic_vcpop_mask_m_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv16i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: @@ -257,9 +257,9 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv32i1( define iXLen @intrinsic_vcpop_mask_m_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv32i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: @@ -297,9 +297,9 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv64i1( define iXLen @intrinsic_vcpop_mask_m_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv64i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll index 6de846b2582da..2fc5b40a89afa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll @@ -7,8 +7,8 @@ define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) { ; CHECK-LABEL: vector_deinterleave_v16i1_v32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v8, v0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-NEXT: vmv1r.v v8, v0 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll index 99743066c79a8..2291475cef014 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll @@ -133,8 +133,8 @@ ret {, } %retval define {, } @vector_deinterleave_nxv64i8_nxv128i8( %vec) { ; CHECK-LABEL: vector_deinterleave_nxv64i8_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vnsrl.wi v8, v24, 0 ; CHECK-NEXT: vnsrl.wi v0, v24, 8 ; CHECK-NEXT: vnsrl.wi v12, v16, 0 @@ -148,8 +148,8 @@ ret {, } %retval define {, } @vector_deinterleave_nxv32i16_nxv64i16( %vec) { ; CHECK-LABEL: vector_deinterleave_nxv32i16_nxv64i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vnsrl.wi v8, v24, 0 ; CHECK-NEXT: vnsrl.wi v0, v24, 16 ; CHECK-NEXT: vnsrl.wi v12, v16, 0 @@ -163,9 +163,9 @@ ret {, } %retval define {, } @vector_deinterleave_nxv16i32_nxvv32i32( %vec) { ; CHECK-LABEL: vector_deinterleave_nxv16i32_nxvv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv8r.v v24, v16 ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wx v20, v24, a0 ; CHECK-NEXT: vnsrl.wx v16, v8, a0 ; CHECK-NEXT: vnsrl.wi v0, v8, 0 @@ -374,8 +374,8 @@ declare {, } @llvm.vector.deinterleave define {, } @vector_deinterleave_nxv32bf16_nxv64bf16( %vec) { ; CHECK-LABEL: vector_deinterleave_nxv32bf16_nxv64bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vnsrl.wi v8, v24, 0 ; CHECK-NEXT: vnsrl.wi v0, v24, 16 ; CHECK-NEXT: vnsrl.wi v12, v16, 0 @@ -389,8 +389,8 @@ ret {, } %retval define {, } @vector_deinterleave_nxv32f16_nxv64f16( %vec) { ; CHECK-LABEL: vector_deinterleave_nxv32f16_nxv64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vnsrl.wi v8, v24, 0 ; CHECK-NEXT: vnsrl.wi v0, v24, 16 ; CHECK-NEXT: vnsrl.wi v12, v16, 0 @@ -404,9 +404,9 @@ ret {, } %retval define {, } @vector_deinterleave_nxv16f32_nxv32f32( %vec) { ; CHECK-LABEL: vector_deinterleave_nxv16f32_nxv32f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv8r.v v24, v16 ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wx v20, v24, a0 ; CHECK-NEXT: vnsrl.wx v16, v8, a0 ; CHECK-NEXT: vnsrl.wi v0, v8, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll index 7b0ac01918b9b..08aa02c7e869a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll @@ -91,10 +91,10 @@ define <8 x i32> @vector_interleave_v8i32_v4i32(<4 x i32> %a, <4 x i32> %b) { define <4 x i64> @vector_interleave_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: vector_interleave_v4i64_v2i64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: lui a0, 12304 ; CHECK-NEXT: addi a0, a0, 512 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 2 ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -106,10 +106,10 @@ define <4 x i64> @vector_interleave_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b) { ; ; ZVBB-LABEL: vector_interleave_v4i64_v2i64: ; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; ZVBB-NEXT: vmv1r.v v10, v9 ; ZVBB-NEXT: lui a0, 12304 ; ZVBB-NEXT: addi a0, a0, 512 -; ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; ZVBB-NEXT: vslideup.vi v8, v10, 2 ; ZVBB-NEXT: vmv.s.x v10, a0 ; ZVBB-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -239,10 +239,10 @@ define <8 x float> @vector_interleave_v8f32_v4f32(<4 x float> %a, <4 x float> %b define <4 x double> @vector_interleave_v4f64_v2f64(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: vector_interleave_v4f64_v2f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: lui a0, 12304 ; CHECK-NEXT: addi a0, a0, 512 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 2 ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -254,10 +254,10 @@ define <4 x double> @vector_interleave_v4f64_v2f64(<2 x double> %a, <2 x double> ; ; ZVBB-LABEL: vector_interleave_v4f64_v2f64: ; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; ZVBB-NEXT: vmv1r.v v10, v9 ; ZVBB-NEXT: lui a0, 12304 ; ZVBB-NEXT: addi a0, a0, 512 -; ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; ZVBB-NEXT: vslideup.vi v8, v10, 2 ; ZVBB-NEXT: vmv.s.x v10, a0 ; ZVBB-NEXT: vsetvli zero, zero, e16, mf2, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll index bc203e215d878..9b78f31d399d9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll @@ -9,9 +9,9 @@ define void @vector_interleave_store_nxv32i1_nxv16i1( %a, %b, ptr %p) { ; CHECK-LABEL: vector_interleave_store_nxv32i1_nxv16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: csrr a2, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll index 26e9afcb1d109..864acb320d8fe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll @@ -11,9 +11,9 @@ define @vector_interleave_nxv32i1_nxv16i1( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv32i1_nxv16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: csrr a1, vlenb @@ -32,9 +32,9 @@ define @vector_interleave_nxv32i1_nxv16i1( ; ; ZVBB-LABEL: vector_interleave_nxv32i1_nxv16i1: ; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; ZVBB-NEXT: vmv1r.v v9, v0 ; ZVBB-NEXT: vmv1r.v v0, v8 -; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; ZVBB-NEXT: vmv.v.i v10, 0 ; ZVBB-NEXT: li a0, 1 ; ZVBB-NEXT: csrr a1, vlenb @@ -160,9 +160,9 @@ declare @llvm.vector.interleave2.nxv4i64(, define @vector_interleave_nxv128i1_nxv64i1( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv128i1_nxv64i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v24, 0 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vmerge.vim v16, v24, 1, v0 @@ -203,8 +203,8 @@ define @vector_interleave_nxv128i1_nxv64i1( @vector_interleave_nxv128i8_nxv64i8( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv128i8_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vwaddu.vv v8, v24, v16 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwaddu.vv v0, v28, v20 @@ -215,8 +215,8 @@ define @vector_interleave_nxv128i8_nxv64i8( @vector_interleave_nxv128i8_nxv64i8( @vector_interleave_nxv64i16_nxv32i16( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv64i16_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vwaddu.vv v8, v24, v16 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwaddu.vv v0, v28, v20 @@ -242,8 +242,8 @@ define @vector_interleave_nxv64i16_nxv32i16( @vector_interleave_nxv64i16_nxv32i16( @vector_interleave_nxv32i32_nxv16i32( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv32i32_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vwaddu.vv v8, v24, v16 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwaddu.vv v0, v28, v20 @@ -269,9 +269,9 @@ define @vector_interleave_nxv32i32_nxv16i32( @llvm.vector.interleave2.nxv4f64( @vector_interleave_nxv64bf16_nxv32bf16( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv64bf16_nxv32bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vwaddu.vv v8, v24, v16 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwaddu.vv v0, v28, v20 @@ -587,8 +587,8 @@ define @vector_interleave_nxv64bf16_nxv32bf16( @vector_interleave_nxv64bf16_nxv32bf16( @vector_interleave_nxv64f16_nxv32f16( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv64f16_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vwaddu.vv v8, v24, v16 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwaddu.vv v0, v28, v20 @@ -614,8 +614,8 @@ define @vector_interleave_nxv64f16_nxv32f16( @vector_interleave_nxv64f16_nxv32f16( @vector_interleave_nxv32f32_nxv16f32( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv32f32_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vwaddu.vv v8, v24, v16 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwaddu.vv v0, v28, v20 @@ -641,9 +641,9 @@ define @vector_interleave_nxv32f32_nxv16f32( @vadd_vv_passthru( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: vadd_vv_passthru: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vadd.vv v9, v8, v8 @@ -152,8 +152,8 @@ entry: define @vadd_vv_passthru_negative( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: vadd_vv_passthru_negative: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vadd.vv v10, v8, v9 ; CHECK-NEXT: vadd.vv v9, v8, v10 ; CHECK-NEXT: vadd.vv v8, v8, v9 @@ -183,8 +183,8 @@ entry: define @vadd_vv_mask( %0, %1, i32 %2, %m) nounwind { ; CHECK-LABEL: vadd_vv_mask: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vadd.vv v9, v8, v8, v0.t @@ -218,8 +218,8 @@ entry: define @vadd_vv_mask_negative( %0, %1, i32 %2, %m, %m2) nounwind { ; CHECK-LABEL: vadd_vv_mask_negative: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vadd.vv v11, v8, v9, v0.t ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vadd.vv v9, v8, v11, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll index 6a72043ca7e8e..90d798b167cfc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll @@ -11,9 +11,9 @@ declare @llvm.vector.splice.nxv1i1(, @splice_nxv1i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1i1_offset_negone: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0 @@ -33,9 +33,9 @@ define @splice_nxv1i1_offset_negone( %a, @splice_nxv1i1_offset_max( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1i1_offset_max: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0 @@ -59,9 +59,9 @@ declare @llvm.vector.splice.nxv2i1(, @splice_nxv2i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i1_offset_negone: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0 @@ -81,9 +81,9 @@ define @splice_nxv2i1_offset_negone( %a, @splice_nxv2i1_offset_max( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i1_offset_max: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0 @@ -107,9 +107,9 @@ declare @llvm.vector.splice.nxv4i1(, @splice_nxv4i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i1_offset_negone: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0 @@ -129,9 +129,9 @@ define @splice_nxv4i1_offset_negone( %a, @splice_nxv4i1_offset_max( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i1_offset_max: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0 @@ -155,9 +155,9 @@ declare @llvm.vector.splice.nxv8i1(, @splice_nxv8i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i1_offset_negone: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0 @@ -176,9 +176,9 @@ define @splice_nxv8i1_offset_negone( %a, @splice_nxv8i1_offset_max( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i1_offset_max: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0 @@ -201,9 +201,9 @@ declare @llvm.vector.splice.nxv16i1(, @splice_nxv16i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i1_offset_negone: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: vmerge.vim v12, v10, 1, v0 @@ -223,9 +223,9 @@ define @splice_nxv16i1_offset_negone( %a, < define @splice_nxv16i1_offset_max( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i1_offset_max: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: vmerge.vim v12, v10, 1, v0 @@ -249,9 +249,9 @@ declare @llvm.vector.splice.nxv32i1(, @splice_nxv32i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv32i1_offset_negone: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 @@ -296,9 +296,9 @@ declare @llvm.vector.splice.nxv64i1(, @splice_nxv64i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv64i1_offset_negone: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v24, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: vmerge.vim v16, v24, 1, v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll index 2c92a5da8eecb..8f9f9c4256c8f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll @@ -462,11 +462,11 @@ declare @llvm.vp.fabs.nxv16f64(, @vfabs_vv_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv16f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sltu a2, a0, a3 ; CHECK-NEXT: addi a2, a2, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll index 1953cfd2a0169..87bc9f27d6dc9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll @@ -411,11 +411,11 @@ define @vfadd_vv_nxv32bf16( %va, @vfadd_vf_nxv32bf16( %va, bf ; CHECK-NEXT: add a1, a2, a1 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: fmv.x.h a1, fa0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: addi a3, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli a3, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v20 ; CHECK-NEXT: vsetvli a3, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.x v16, a1 @@ -604,10 +604,10 @@ define @vfadd_vf_nxv32bf16_unmasked( @vfadd_vv_nxv32f16( %va, @vfadd_vf_nxv32f16( %va, half %b ; ZVFHMIN-NEXT: add a1, a2, a1 ; ZVFHMIN-NEXT: sub sp, sp, a1 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vmv8r.v v16, v8 ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 ; ZVFHMIN-NEXT: csrr a2, vlenb ; ZVFHMIN-NEXT: addi a3, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill -; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 ; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v16, a1 @@ -1416,10 +1416,10 @@ define @vfadd_vf_nxv32f16_unmasked( %va ; ZVFHMIN-NEXT: slli a1, a1, 4 ; ZVFHMIN-NEXT: sub sp, sp, a1 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; ZVFHMIN-NEXT: vmv8r.v v16, v8 ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 ; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma ; ZVFHMIN-NEXT: vmset.m v7 ; ZVFHMIN-NEXT: addi a3, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll index ccd286b7ee5fd..061af454aa8ba 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll @@ -373,11 +373,11 @@ define @vfdiv_vv_nxv32bf16( %va, @vfdiv_vf_nxv32bf16( %va, bf ; CHECK-NEXT: add a1, a2, a1 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: fmv.x.h a1, fa0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: addi a3, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli a3, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v20 ; CHECK-NEXT: vsetvli a3, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.x v16, a1 @@ -566,10 +566,10 @@ define @vfdiv_vf_nxv32bf16_unmasked( @vfdiv_vv_nxv32f16( %va, @vfdiv_vf_nxv32f16( %va, half %b ; ZVFHMIN-NEXT: add a1, a2, a1 ; ZVFHMIN-NEXT: sub sp, sp, a1 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vmv8r.v v16, v8 ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 ; ZVFHMIN-NEXT: csrr a2, vlenb ; ZVFHMIN-NEXT: addi a3, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill -; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 ; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v16, a1 @@ -1328,10 +1328,10 @@ define @vfdiv_vf_nxv32f16_unmasked( %va ; ZVFHMIN-NEXT: slli a1, a1, 4 ; ZVFHMIN-NEXT: sub sp, sp, a1 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; ZVFHMIN-NEXT: vmv8r.v v16, v8 ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 ; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma ; ZVFHMIN-NEXT: vmset.m v7 ; ZVFHMIN-NEXT: addi a3, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll index eafd605c6110e..c510121ee3ebe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll @@ -43,9 +43,9 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv1i1( define iXLen @intrinsic_vfirst_mask_m_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv1i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret entry: @@ -97,9 +97,9 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv2i1( define iXLen @intrinsic_vfirst_mask_m_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv2i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret entry: @@ -137,9 +137,9 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv4i1( define iXLen @intrinsic_vfirst_mask_m_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv4i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret entry: @@ -177,9 +177,9 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv8i1( define iXLen @intrinsic_vfirst_mask_m_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv8i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret entry: @@ -217,9 +217,9 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv16i1( define iXLen @intrinsic_vfirst_mask_m_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv16i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret entry: @@ -257,9 +257,9 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv32i1( define iXLen @intrinsic_vfirst_mask_m_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv32i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret entry: @@ -297,9 +297,9 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv64i1( define iXLen @intrinsic_vfirst_mask_m_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv64i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll index fd518d9be786d..7ca1983e8b32c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll @@ -628,6 +628,7 @@ define @vfma_vv_nxv32bf16( %va, @vfma_vv_nxv32bf16( %va, @vfma_vv_nxv32f16( %va, @vfma_vv_nxv32f16( %va, @vfma_vv_nxv16f64( %va, @vfma_vv_nxv16f64( %va, @vfnmadd_vv_nxv16f16( %va, @vfnmadd_vf_nxv16f16_neg_splat_commute( @vfnmsub_vv_nxv16f16( %va, @vfnmsub_vf_nxv16f16_neg_splat( ; ; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16_neg_splat: ; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vmv4r.v v4, v8 ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 -; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v16, a1 ; ZVFHMIN-NEXT: lui a1, 8 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 @@ -8712,11 +8712,11 @@ define @vfmsub_vv_nxv32f16( %va, @vfmsub_vf_nxv32f16_unmasked( %v ; ZVFHMIN-NEXT: add a1, a1, a2 ; ZVFHMIN-NEXT: sub sp, sp, a1 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; ZVFHMIN-NEXT: vmv8r.v v24, v16 ; ZVFHMIN-NEXT: fmv.x.h a2, fa0 ; ZVFHMIN-NEXT: lui a1, 8 -; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma ; ZVFHMIN-NEXT: vmset.m v7 ; ZVFHMIN-NEXT: csrr a3, vlenb ; ZVFHMIN-NEXT: csrr a4, vlenb @@ -10001,11 +10001,11 @@ define @vfnmadd_vv_nxv32f16_unmasked_commuted( @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute( @vfnmsub_vv_nxv32f16_unmasked_commuted( @vfnmsub_vf_nxv32f16( %va, half ; ZVFHMIN-NEXT: add a1, a1, a2 ; ZVFHMIN-NEXT: sub sp, sp, a1 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv8r.v v24, v16 ; ZVFHMIN-NEXT: fmv.x.h a2, fa0 ; ZVFHMIN-NEXT: lui a3, 8 ; ZVFHMIN-NEXT: csrr a1, vlenb -; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v16, a2 ; ZVFHMIN-NEXT: csrr a2, vlenb ; ZVFHMIN-NEXT: slli a2, a2, 5 @@ -12075,11 +12075,11 @@ define @vfnmsub_vf_nxv32f16_commute( %v ; ZVFHMIN-NEXT: slli a1, a1, 5 ; ZVFHMIN-NEXT: sub sp, sp, a1 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv8r.v v24, v16 ; ZVFHMIN-NEXT: fmv.x.h a2, fa0 ; ZVFHMIN-NEXT: lui a3, 8 ; ZVFHMIN-NEXT: csrr a1, vlenb -; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v16, a2 ; ZVFHMIN-NEXT: csrr a2, vlenb ; ZVFHMIN-NEXT: slli a2, a2, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll index 1d471ab2404b1..a4f3a7d3a09a8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll @@ -227,6 +227,7 @@ define @vfmadd_vv_nxv32bf16( %va, < ; CHECK-NEXT: slli a1, a1, 5 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv8r.v v0, v16 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill @@ -237,7 +238,6 @@ define @vfmadd_vv_nxv32bf16( %va, < ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 24 @@ -314,6 +314,7 @@ define @vfmadd_vf_nxv32bf16( %va, < ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv8r.v v24, v16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -321,7 +322,6 @@ define @vfmadd_vf_nxv32bf16( %va, < ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: fmv.x.h a0, fa0 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill @@ -664,6 +664,7 @@ define @vfmadd_vv_nxv32f16( %va, @vfmadd_vv_nxv32f16( %va, @vfmadd_vf_nxv32f16( %va, @vfmadd_vf_nxv32f16( %va, @vfmadd_vv_nxv32bf16( %va, < ; ZVFH-NEXT: slli a1, a1, 5 ; ZVFH-NEXT: sub sp, sp, a1 ; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFH-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFH-NEXT: vmv8r.v v0, v16 ; ZVFH-NEXT: addi a1, sp, 16 ; ZVFH-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill @@ -236,7 +237,6 @@ define @vfmadd_vv_nxv32bf16( %va, < ; ZVFH-NEXT: add a0, sp, a0 ; ZVFH-NEXT: addi a0, a0, 16 ; ZVFH-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v16 ; ZVFH-NEXT: csrr a0, vlenb ; ZVFH-NEXT: slli a0, a0, 3 @@ -316,6 +316,7 @@ define @vfmadd_vv_nxv32bf16( %va, < ; ZVFHMIN-NEXT: slli a1, a1, 5 ; ZVFHMIN-NEXT: sub sp, sp, a1 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vmv8r.v v0, v16 ; ZVFHMIN-NEXT: addi a1, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill @@ -326,7 +327,6 @@ define @vfmadd_vv_nxv32bf16( %va, < ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v16 ; ZVFHMIN-NEXT: csrr a0, vlenb ; ZVFHMIN-NEXT: li a1, 24 @@ -402,12 +402,12 @@ define @vfmadd_vf_nxv32bf16( %va, < ; ZVFH-NEXT: slli a0, a0, 5 ; ZVFH-NEXT: sub sp, sp, a0 ; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFH-NEXT: vmv8r.v v0, v16 ; ZVFH-NEXT: addi a0, sp, 16 ; ZVFH-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; ZVFH-NEXT: vmv8r.v v16, v8 ; ZVFH-NEXT: fmv.x.h a0, fa0 -; ZVFH-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v16 ; ZVFH-NEXT: csrr a1, vlenb ; ZVFH-NEXT: slli a1, a1, 4 @@ -498,12 +498,12 @@ define @vfmadd_vf_nxv32bf16( %va, < ; ZVFHMIN-NEXT: slli a0, a0, 5 ; ZVFHMIN-NEXT: sub sp, sp, a0 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vmv8r.v v0, v16 ; ZVFHMIN-NEXT: addi a0, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; ZVFHMIN-NEXT: vmv8r.v v16, v8 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0 -; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v16 ; ZVFHMIN-NEXT: csrr a1, vlenb ; ZVFHMIN-NEXT: slli a1, a1, 4 @@ -875,6 +875,7 @@ define @vfmadd_vv_nxv32f16( %va, @vfmadd_vv_nxv32f16( %va, @vfmadd_vf_nxv32f16( %va, @vfmax_vv_nxv32bf16( %va, @vfmax_vv_nxv32f16( %va, @vfmin_vv_nxv32bf16( %va, @vfmin_vv_nxv32f16( %va, @vfmul_vv_nxv32f16( %va, @vfmul_vf_nxv32f16( %va, half %b ; ZVFHMIN-NEXT: add a1, a2, a1 ; ZVFHMIN-NEXT: sub sp, sp, a1 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vmv8r.v v16, v8 ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 ; ZVFHMIN-NEXT: csrr a2, vlenb ; ZVFHMIN-NEXT: addi a3, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill -; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 ; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v16, a1 @@ -706,10 +706,10 @@ define @vfmul_vf_nxv32f16_unmasked( %va ; ZVFHMIN-NEXT: slli a1, a1, 4 ; ZVFHMIN-NEXT: sub sp, sp, a1 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; ZVFHMIN-NEXT: vmv8r.v v16, v8 ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 ; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma ; ZVFHMIN-NEXT: vmset.m v7 ; ZVFHMIN-NEXT: addi a3, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll index d1702268f829f..901f3cd63fa9e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll @@ -1112,6 +1112,7 @@ define @vfma_vv_nxv16f64( %va, @vfma_vv_nxv16f64( %va, @llvm.vp.fneg.nxv16f64(, @vfneg_vv_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv16f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sltu a2, a0, a3 ; CHECK-NEXT: addi a2, a2, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll index 3705e73fda492..b8ec285b5c34e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll @@ -329,6 +329,7 @@ define @vfnmsub_vv_nxv32f16( %va, @vfnmsub_vv_nxv32f16( %va, @vfnmsub_vv_nxv32f16( %va, @vfnmsub_vv_nxv32f16( %va, @vfnmsub_vf_nxv32f16( %va, @vfnmsub_vf_nxv32f16( %va, @llvm.vp.fpext.nxv32f32.nxv32f16( @vfpext_nxv32f16_nxv32f32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv32f16_nxv32f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll index cf195c7c0935e..d990c74c67d5a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll @@ -508,11 +508,11 @@ declare @llvm.vp.fptosi.nxv32i16.nxv32f32( @vfptosi_nxv32i16_nxv32f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv32i16_nxv32f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 @@ -538,11 +538,11 @@ declare @llvm.vp.fptosi.nxv32i32.nxv32f32( @vfptosi_nxv32i32_nxv32f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv32i32_nxv32f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll index 952d28604b86c..3b24a648d97f5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll @@ -508,11 +508,11 @@ declare @llvm.vp.fptoui.nxv32i16.nxv32f32( @vfptoui_nxv32i16_nxv32f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv32i16_nxv32f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 @@ -538,11 +538,11 @@ declare @llvm.vp.fptoui.nxv32i32.nxv32f32( @vfptoui_nxv32i32_nxv32f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv32i32_nxv32f32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll index 874813f057595..63156e1399293 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll @@ -102,13 +102,13 @@ define @vfptrunc_nxv16f32_nxv16f64( ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sltu a2, a0, a3 ; CHECK-NEXT: addi a2, a2, -1 @@ -147,6 +147,7 @@ define @vfptrunc_nxv32f32_nxv32f64( ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill @@ -160,7 +161,6 @@ define @vfptrunc_nxv32f32_nxv32f64( ; CHECK-NEXT: srli a5, a1, 2 ; CHECK-NEXT: slli a6, a1, 3 ; CHECK-NEXT: slli a4, a1, 1 -; CHECK-NEXT: vsetvli a7, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v0, a5 ; CHECK-NEXT: add a6, a0, a6 ; CHECK-NEXT: sub a5, a2, a4 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll index 8edcf23988c7f..8e57be1e0697c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll @@ -167,12 +167,12 @@ declare @llvm.vp.sqrt.nxv32bf16(, < define @vfsqrt_vv_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv32bf16: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v16, v0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a1, a2, 1 ; CHECK-NEXT: srli a2, a2, 2 ; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sltu a2, a0, a3 ; CHECK-NEXT: addi a2, a2, -1 @@ -452,12 +452,12 @@ define @vfsqrt_vv_nxv32f16( %va, @llvm.vp.sqrt.nxv16f64(, @vfsqrt_vv_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv16f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sltu a2, a0, a3 ; CHECK-NEXT: addi a2, a2, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll index 25a80e66c4a52..d034f65479a15 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll @@ -373,11 +373,11 @@ define @vfsub_vv_nxv32bf16( %va, @vfsub_vf_nxv32bf16( %va, bf ; CHECK-NEXT: add a1, a2, a1 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: fmv.x.h a1, fa0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: addi a3, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli a3, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v20 ; CHECK-NEXT: vsetvli a3, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.x v16, a1 @@ -566,10 +566,10 @@ define @vfsub_vf_nxv32bf16_unmasked( @vfsub_vv_nxv32f16( %va, @vfsub_vf_nxv32f16( %va, half %b ; ZVFHMIN-NEXT: add a1, a2, a1 ; ZVFHMIN-NEXT: sub sp, sp, a1 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vmv8r.v v16, v8 ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 ; ZVFHMIN-NEXT: csrr a2, vlenb ; ZVFHMIN-NEXT: addi a3, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill -; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 ; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v16, a1 @@ -1328,10 +1328,10 @@ define @vfsub_vf_nxv32f16_unmasked( %va ; ZVFHMIN-NEXT: slli a1, a1, 4 ; ZVFHMIN-NEXT: sub sp, sp, a1 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; ZVFHMIN-NEXT: vmv8r.v v16, v8 ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 ; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma ; ZVFHMIN-NEXT: vmset.m v7 ; ZVFHMIN-NEXT: addi a3, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll index 1a1472fcfc66f..ff4f3e24ec17b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll @@ -111,8 +111,8 @@ define @different_vl_with_ta( %a, @different_vl_with_tu( %passthru, %a, %b, iXLen %vl1, iXLen %vl2) { ; CHECK-LABEL: different_vl_with_tu: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv2r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vmv2r.v v14, v10 ; CHECK-NEXT: vadd.vv v14, v10, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vadd.vv v8, v14, v10 @@ -126,8 +126,8 @@ define @different_vl_with_tu( %passthru, @different_imm_vl_with_tu( %passthru, %a, %b, iXLen %vl1, iXLen %vl2) { ; CHECK-LABEL: different_imm_vl_with_tu: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv2r.v v14, v10 ; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma +; CHECK-NEXT: vmv2r.v v14, v10 ; CHECK-NEXT: vadd.vv v14, v10, v12 ; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; CHECK-NEXT: vadd.vv v8, v14, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll index 1516d656663b6..d8bff08ea5513 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll @@ -51,9 +51,9 @@ entry: define @test_vlseg2ff_mask_dead_vl(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask) { ; CHECK-LABEL: test_vlseg2ff_mask_dead_vl: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll index b89097b8ff974..03aee1881503a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll @@ -25,9 +25,9 @@ entry: define @test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -64,9 +64,9 @@ entry: define @test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -103,9 +103,9 @@ entry: define @test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -142,9 +142,9 @@ entry: define @test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -181,9 +181,9 @@ entry: define @test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -220,9 +220,9 @@ entry: define @test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -259,10 +259,10 @@ entry: define @test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -299,10 +299,10 @@ entry: define @test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -339,10 +339,10 @@ entry: define @test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -379,10 +379,10 @@ entry: define @test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -419,10 +419,10 @@ entry: define @test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -459,11 +459,11 @@ entry: define @test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -500,11 +500,11 @@ entry: define @test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -541,11 +541,11 @@ entry: define @test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -582,11 +582,11 @@ entry: define @test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -623,11 +623,11 @@ entry: define @test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -664,12 +664,12 @@ entry: define @test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -706,12 +706,12 @@ entry: define @test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -748,12 +748,12 @@ entry: define @test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -790,12 +790,12 @@ entry: define @test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -832,13 +832,13 @@ entry: define @test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -875,13 +875,13 @@ entry: define @test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -918,13 +918,13 @@ entry: define @test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -961,13 +961,13 @@ entry: define @test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1004,6 +1004,7 @@ entry: define @test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1011,7 +1012,6 @@ define @test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1048,6 +1048,7 @@ entry: define @test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1055,7 +1056,6 @@ define @test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1092,6 +1092,7 @@ entry: define @test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1099,7 +1100,6 @@ define @test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1136,6 +1136,7 @@ entry: define @test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1143,7 +1144,6 @@ define @test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1180,6 +1180,7 @@ entry: define @test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1188,7 +1189,6 @@ define @test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1225,6 +1225,7 @@ entry: define @test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1233,7 +1234,6 @@ define @test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1270,6 +1270,7 @@ entry: define @test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1278,7 +1279,6 @@ define @test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1315,6 +1315,7 @@ entry: define @test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1323,7 +1324,6 @@ define @test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1359,9 +1359,9 @@ entry: define @test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1397,9 +1397,9 @@ entry: define @test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1435,9 +1435,9 @@ entry: define @test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1473,9 +1473,9 @@ entry: define @test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1511,9 +1511,9 @@ entry: define @test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1549,10 +1549,10 @@ entry: define @test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1588,10 +1588,10 @@ entry: define @test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1627,10 +1627,10 @@ entry: define @test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1666,10 +1666,10 @@ entry: define @test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1705,11 +1705,11 @@ entry: define @test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1745,11 +1745,11 @@ entry: define @test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1785,11 +1785,11 @@ entry: define @test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1825,11 +1825,11 @@ entry: define @test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1865,12 +1865,12 @@ entry: define @test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1906,12 +1906,12 @@ entry: define @test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1947,12 +1947,12 @@ entry: define @test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1988,13 +1988,13 @@ entry: define @test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2030,13 +2030,13 @@ entry: define @test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2072,13 +2072,13 @@ entry: define @test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2114,6 +2114,7 @@ entry: define @test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2121,7 +2122,6 @@ define @test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2157,6 +2157,7 @@ entry: define @test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2164,7 +2165,6 @@ define @test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2200,6 +2200,7 @@ entry: define @test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2207,7 +2208,6 @@ define @test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2243,6 +2243,7 @@ entry: define @test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2251,7 +2252,6 @@ define @test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2287,6 +2287,7 @@ entry: define @test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2295,7 +2296,6 @@ define @test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2331,6 +2331,7 @@ entry: define @test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2339,7 +2340,6 @@ define @test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2375,9 +2375,9 @@ entry: define @test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2413,9 +2413,9 @@ entry: define @test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2451,9 +2451,9 @@ entry: define @test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2489,9 +2489,9 @@ entry: define @test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2527,10 +2527,10 @@ entry: define @test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2566,10 +2566,10 @@ entry: define @test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2605,10 +2605,10 @@ entry: define @test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2644,11 +2644,11 @@ entry: define @test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2684,11 +2684,11 @@ entry: define @test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2724,11 +2724,11 @@ entry: define @test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2764,12 +2764,12 @@ entry: define @test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2805,12 +2805,12 @@ entry: define @test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2846,13 +2846,13 @@ entry: define @test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2888,13 +2888,13 @@ entry: define @test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2930,6 +2930,7 @@ entry: define @test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2937,7 +2938,6 @@ define @test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2973,6 +2973,7 @@ entry: define @test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2980,7 +2981,6 @@ define @test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3016,6 +3016,7 @@ entry: define @test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -3024,7 +3025,6 @@ define @test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3060,6 +3060,7 @@ entry: define @test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -3068,7 +3069,6 @@ define @test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3104,9 +3104,9 @@ entry: define @test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3142,9 +3142,9 @@ entry: define @test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3180,9 +3180,9 @@ entry: define @test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3218,10 +3218,10 @@ entry: define @test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3257,10 +3257,10 @@ entry: define @test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3296,11 +3296,11 @@ entry: define @test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3336,11 +3336,11 @@ entry: define @test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3376,12 +3376,12 @@ entry: define @test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3417,13 +3417,13 @@ entry: define @test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3459,6 +3459,7 @@ entry: define @test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -3466,7 +3467,6 @@ define @test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3502,6 +3502,7 @@ entry: define @test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -3510,7 +3511,6 @@ define @test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3545,9 +3545,9 @@ entry: define @test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3582,9 +3582,9 @@ entry: define @test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3619,9 +3619,9 @@ entry: define @test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3656,9 +3656,9 @@ entry: define @test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3693,9 +3693,9 @@ entry: define @test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3730,10 +3730,10 @@ entry: define @test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3768,10 +3768,10 @@ entry: define @test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3806,10 +3806,10 @@ entry: define @test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3844,10 +3844,10 @@ entry: define @test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3882,11 +3882,11 @@ entry: define @test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3921,11 +3921,11 @@ entry: define @test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3960,11 +3960,11 @@ entry: define @test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3999,11 +3999,11 @@ entry: define @test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4038,12 +4038,12 @@ entry: define @test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4078,12 +4078,12 @@ entry: define @test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4118,12 +4118,12 @@ entry: define @test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4158,13 +4158,13 @@ entry: define @test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4199,13 +4199,13 @@ entry: define @test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4240,13 +4240,13 @@ entry: define @test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4281,6 +4281,7 @@ entry: define @test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -4288,7 +4289,6 @@ define @test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4323,6 +4323,7 @@ entry: define @test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -4330,7 +4331,6 @@ define @test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4365,6 +4365,7 @@ entry: define @test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -4372,7 +4373,6 @@ define @test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4407,6 +4407,7 @@ entry: define @test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -4415,7 +4416,6 @@ define @test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4450,6 +4450,7 @@ entry: define @test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -4458,7 +4459,6 @@ define @test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4493,6 +4493,7 @@ entry: define @test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -4501,7 +4502,6 @@ define @test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4536,9 +4536,9 @@ entry: define @test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4573,9 +4573,9 @@ entry: define @test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4610,9 +4610,9 @@ entry: define @test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4647,9 +4647,9 @@ entry: define @test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4684,10 +4684,10 @@ entry: define @test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4722,10 +4722,10 @@ entry: define @test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4760,10 +4760,10 @@ entry: define @test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4798,11 +4798,11 @@ entry: define @test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4837,11 +4837,11 @@ entry: define @test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4876,11 +4876,11 @@ entry: define @test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4915,12 +4915,12 @@ entry: define @test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4955,12 +4955,12 @@ entry: define @test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4995,13 +4995,13 @@ entry: define @test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5036,13 +5036,13 @@ entry: define @test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5077,6 +5077,7 @@ entry: define @test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -5084,7 +5085,6 @@ define @test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5119,6 +5119,7 @@ entry: define @test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -5126,7 +5127,6 @@ define @test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5161,6 +5161,7 @@ entry: define @test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -5169,7 +5170,6 @@ define @test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5204,6 +5204,7 @@ entry: define @test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -5212,7 +5213,6 @@ define @test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5247,9 +5247,9 @@ entry: define @test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5284,9 +5284,9 @@ entry: define @test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5321,9 +5321,9 @@ entry: define @test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5358,10 +5358,10 @@ entry: define @test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5396,10 +5396,10 @@ entry: define @test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5434,11 +5434,11 @@ entry: define @test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5473,11 +5473,11 @@ entry: define @test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5512,12 +5512,12 @@ entry: define @test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5552,13 +5552,13 @@ entry: define @test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5593,6 +5593,7 @@ entry: define @test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -5600,7 +5601,6 @@ define @test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5635,6 +5635,7 @@ entry: define @test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -5643,7 +5644,6 @@ define @test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5678,9 +5678,9 @@ entry: define @test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5715,9 +5715,9 @@ entry: define @test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5752,9 +5752,9 @@ entry: define @test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5789,9 +5789,9 @@ entry: define @test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5826,9 +5826,9 @@ entry: define @test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5863,10 +5863,10 @@ entry: define @test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5901,10 +5901,10 @@ entry: define @test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5939,10 +5939,10 @@ entry: define @test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5977,10 +5977,10 @@ entry: define @test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6015,11 +6015,11 @@ entry: define @test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6054,11 +6054,11 @@ entry: define @test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6093,11 +6093,11 @@ entry: define @test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6132,11 +6132,11 @@ entry: define @test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6171,12 +6171,12 @@ entry: define @test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6211,12 +6211,12 @@ entry: define @test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6251,12 +6251,12 @@ entry: define @test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6291,13 +6291,13 @@ entry: define @test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6332,13 +6332,13 @@ entry: define @test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6373,13 +6373,13 @@ entry: define @test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6414,6 +6414,7 @@ entry: define @test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -6421,7 +6422,6 @@ define @test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nx ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6456,6 +6456,7 @@ entry: define @test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -6463,7 +6464,6 @@ define @test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nx ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6498,6 +6498,7 @@ entry: define @test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -6505,7 +6506,6 @@ define @test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nx ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6540,6 +6540,7 @@ entry: define @test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -6548,7 +6549,6 @@ define @test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nx ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6583,6 +6583,7 @@ entry: define @test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -6591,7 +6592,6 @@ define @test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nx ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -6626,6 +6626,7 @@ entry: define @test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -6634,7 +6635,6 @@ define @test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nx ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll index 3dc0db90b6d85..05a5be295cc71 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll @@ -51,9 +51,9 @@ entry: define @test_vlseg2ff_mask_dead_vl(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg2ff_mask_dead_vl: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll index 68acb3beb0686..85008ff97143f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll @@ -25,9 +25,9 @@ entry: define @test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -64,9 +64,9 @@ entry: define @test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -103,9 +103,9 @@ entry: define @test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -142,9 +142,9 @@ entry: define @test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -181,9 +181,9 @@ entry: define @test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -220,9 +220,9 @@ entry: define @test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -259,10 +259,10 @@ entry: define @test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -299,10 +299,10 @@ entry: define @test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -339,10 +339,10 @@ entry: define @test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -379,10 +379,10 @@ entry: define @test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -419,10 +419,10 @@ entry: define @test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -459,11 +459,11 @@ entry: define @test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -500,11 +500,11 @@ entry: define @test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -541,11 +541,11 @@ entry: define @test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -582,11 +582,11 @@ entry: define @test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -623,11 +623,11 @@ entry: define @test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -664,12 +664,12 @@ entry: define @test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -706,12 +706,12 @@ entry: define @test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -748,12 +748,12 @@ entry: define @test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -790,12 +790,12 @@ entry: define @test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -832,13 +832,13 @@ entry: define @test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -875,13 +875,13 @@ entry: define @test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -918,13 +918,13 @@ entry: define @test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -961,13 +961,13 @@ entry: define @test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1004,6 +1004,7 @@ entry: define @test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1011,7 +1012,6 @@ define @test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1048,6 +1048,7 @@ entry: define @test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1055,7 +1056,6 @@ define @test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1092,6 +1092,7 @@ entry: define @test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1099,7 +1100,6 @@ define @test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1136,6 +1136,7 @@ entry: define @test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1143,7 +1144,6 @@ define @test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1180,6 +1180,7 @@ entry: define @test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1188,7 +1189,6 @@ define @test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1225,6 +1225,7 @@ entry: define @test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1233,7 +1234,6 @@ define @test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1270,6 +1270,7 @@ entry: define @test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1278,7 +1279,6 @@ define @test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1315,6 +1315,7 @@ entry: define @test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -1323,7 +1324,6 @@ define @test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1359,9 +1359,9 @@ entry: define @test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1397,9 +1397,9 @@ entry: define @test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1435,9 +1435,9 @@ entry: define @test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1473,9 +1473,9 @@ entry: define @test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1511,9 +1511,9 @@ entry: define @test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1549,10 +1549,10 @@ entry: define @test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1588,10 +1588,10 @@ entry: define @test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1627,10 +1627,10 @@ entry: define @test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1666,10 +1666,10 @@ entry: define @test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1705,11 +1705,11 @@ entry: define @test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1745,11 +1745,11 @@ entry: define @test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1785,11 +1785,11 @@ entry: define @test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1825,11 +1825,11 @@ entry: define @test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1865,12 +1865,12 @@ entry: define @test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1906,12 +1906,12 @@ entry: define @test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1947,12 +1947,12 @@ entry: define @test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1988,13 +1988,13 @@ entry: define @test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2030,13 +2030,13 @@ entry: define @test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2072,13 +2072,13 @@ entry: define @test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2114,6 +2114,7 @@ entry: define @test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2121,7 +2122,6 @@ define @test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2157,6 +2157,7 @@ entry: define @test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2164,7 +2165,6 @@ define @test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2200,6 +2200,7 @@ entry: define @test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2207,7 +2208,6 @@ define @test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2243,6 +2243,7 @@ entry: define @test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2251,7 +2252,6 @@ define @test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2287,6 +2287,7 @@ entry: define @test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2295,7 +2296,6 @@ define @test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2331,6 +2331,7 @@ entry: define @test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2339,7 +2340,6 @@ define @test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2375,9 +2375,9 @@ entry: define @test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2413,9 +2413,9 @@ entry: define @test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2451,9 +2451,9 @@ entry: define @test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2489,9 +2489,9 @@ entry: define @test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2527,10 +2527,10 @@ entry: define @test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2566,10 +2566,10 @@ entry: define @test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2605,10 +2605,10 @@ entry: define @test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2644,11 +2644,11 @@ entry: define @test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2684,11 +2684,11 @@ entry: define @test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2724,11 +2724,11 @@ entry: define @test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2764,12 +2764,12 @@ entry: define @test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2805,12 +2805,12 @@ entry: define @test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2846,13 +2846,13 @@ entry: define @test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2888,13 +2888,13 @@ entry: define @test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2930,6 +2930,7 @@ entry: define @test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2937,7 +2938,6 @@ define @test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2973,6 +2973,7 @@ entry: define @test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -2980,7 +2981,6 @@ define @test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3016,6 +3016,7 @@ entry: define @test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -3024,7 +3025,6 @@ define @test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3060,6 +3060,7 @@ entry: define @test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -3068,7 +3069,6 @@ define @test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3104,9 +3104,9 @@ entry: define @test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3142,9 +3142,9 @@ entry: define @test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3180,9 +3180,9 @@ entry: define @test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3218,10 +3218,10 @@ entry: define @test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3257,10 +3257,10 @@ entry: define @test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3296,11 +3296,11 @@ entry: define @test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3336,11 +3336,11 @@ entry: define @test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3376,12 +3376,12 @@ entry: define @test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3417,13 +3417,13 @@ entry: define @test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3459,6 +3459,7 @@ entry: define @test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -3466,7 +3467,6 @@ define @test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3502,6 +3502,7 @@ entry: define @test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -3510,7 +3511,6 @@ define @test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3545,9 +3545,9 @@ entry: define @test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3582,9 +3582,9 @@ entry: define @test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3619,9 +3619,9 @@ entry: define @test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3656,9 +3656,9 @@ entry: define @test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3693,9 +3693,9 @@ entry: define @test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3730,10 +3730,10 @@ entry: define @test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3768,10 +3768,10 @@ entry: define @test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3806,10 +3806,10 @@ entry: define @test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3844,10 +3844,10 @@ entry: define @test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3882,11 +3882,11 @@ entry: define @test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3921,11 +3921,11 @@ entry: define @test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3960,11 +3960,11 @@ entry: define @test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3999,11 +3999,11 @@ entry: define @test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4038,12 +4038,12 @@ entry: define @test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4078,12 +4078,12 @@ entry: define @test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4118,12 +4118,12 @@ entry: define @test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4158,13 +4158,13 @@ entry: define @test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4199,13 +4199,13 @@ entry: define @test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4240,13 +4240,13 @@ entry: define @test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4281,6 +4281,7 @@ entry: define @test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -4288,7 +4289,6 @@ define @test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4323,6 +4323,7 @@ entry: define @test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -4330,7 +4331,6 @@ define @test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4365,6 +4365,7 @@ entry: define @test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -4372,7 +4373,6 @@ define @test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4407,6 +4407,7 @@ entry: define @test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -4415,7 +4416,6 @@ define @test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4450,6 +4450,7 @@ entry: define @test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -4458,7 +4459,6 @@ define @test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4493,6 +4493,7 @@ entry: define @test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -4501,7 +4502,6 @@ define @test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4536,9 +4536,9 @@ entry: define @test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4573,9 +4573,9 @@ entry: define @test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4610,9 +4610,9 @@ entry: define @test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4647,9 +4647,9 @@ entry: define @test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4684,10 +4684,10 @@ entry: define @test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4722,10 +4722,10 @@ entry: define @test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4760,10 +4760,10 @@ entry: define @test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4798,11 +4798,11 @@ entry: define @test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4837,11 +4837,11 @@ entry: define @test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4876,11 +4876,11 @@ entry: define @test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4915,12 +4915,12 @@ entry: define @test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4955,12 +4955,12 @@ entry: define @test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4995,13 +4995,13 @@ entry: define @test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5036,13 +5036,13 @@ entry: define @test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5077,6 +5077,7 @@ entry: define @test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -5084,7 +5085,6 @@ define @test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5119,6 +5119,7 @@ entry: define @test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -5126,7 +5127,6 @@ define @test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5161,6 +5161,7 @@ entry: define @test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -5169,7 +5170,6 @@ define @test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5204,6 +5204,7 @@ entry: define @test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -5212,7 +5213,6 @@ define @test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5247,9 +5247,9 @@ entry: define @test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5284,9 +5284,9 @@ entry: define @test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5321,9 +5321,9 @@ entry: define @test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5358,10 +5358,10 @@ entry: define @test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5396,10 +5396,10 @@ entry: define @test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5434,11 +5434,11 @@ entry: define @test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5473,11 +5473,11 @@ entry: define @test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5512,12 +5512,12 @@ entry: define @test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5552,13 +5552,13 @@ entry: define @test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5593,6 +5593,7 @@ entry: define @test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -5600,7 +5601,6 @@ define @test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5635,6 +5635,7 @@ entry: define @test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -5643,7 +5644,6 @@ define @test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5678,9 +5678,9 @@ entry: define @test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5715,9 +5715,9 @@ entry: define @test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5752,9 +5752,9 @@ entry: define @test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5789,9 +5789,9 @@ entry: define @test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5826,9 +5826,9 @@ entry: define @test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vmv4r.v v8, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5863,10 +5863,10 @@ entry: define @test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5901,10 +5901,10 @@ entry: define @test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5939,10 +5939,10 @@ entry: define @test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5977,10 +5977,10 @@ entry: define @test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6015,11 +6015,11 @@ entry: define @test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6054,11 +6054,11 @@ entry: define @test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6093,11 +6093,11 @@ entry: define @test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6132,11 +6132,11 @@ entry: define @test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: vmv2r.v v12, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6171,12 +6171,12 @@ entry: define @test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6211,12 +6211,12 @@ entry: define @test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6251,12 +6251,12 @@ entry: define @test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6291,13 +6291,13 @@ entry: define @test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6332,13 +6332,13 @@ entry: define @test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6373,13 +6373,13 @@ entry: define @test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 ; CHECK-NEXT: vmv1r.v v10, v11 ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6414,6 +6414,7 @@ entry: define @test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -6421,7 +6422,6 @@ define @test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nx ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6456,6 +6456,7 @@ entry: define @test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -6463,7 +6464,6 @@ define @test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nx ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6498,6 +6498,7 @@ entry: define @test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -6505,7 +6506,6 @@ define @test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nx ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6540,6 +6540,7 @@ entry: define @test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -6548,7 +6549,6 @@ define @test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nx ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6583,6 +6583,7 @@ entry: define @test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -6591,7 +6592,6 @@ define @test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nx ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -6626,6 +6626,7 @@ entry: define @test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: vmv1r.v v9, v10 @@ -6634,7 +6635,6 @@ define @test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nx ; CHECK-NEXT: vmv1r.v v12, v13 ; CHECK-NEXT: vmv1r.v v13, v14 ; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll index 0b553d3cd6fdf..b839cd595f3be 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll @@ -412,8 +412,8 @@ declare @llvm.vp.smax.nxv128i8(, @vmax_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -974,11 +974,11 @@ declare @llvm.vp.smax.nxv32i32(, @vmax_vx_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a3, a2, 2 ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a3 ; CHECK-NEXT: sub a3, a1, a2 ; CHECK-NEXT: sltu a4, a1, a3 @@ -1034,11 +1034,11 @@ declare i32 @llvm.vscale.i32() define @vmax_vx_nxv32i32_evl_nx8( %va, i32 %b, %m) { ; CHECK-LABEL: vmax_vx_nxv32i32_evl_nx8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a3, a1, 2 ; CHECK-NEXT: slli a2, a1, 1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a3 ; CHECK-NEXT: sub a3, a1, a2 ; CHECK-NEXT: sltu a4, a1, a3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll index f6be882f74206..99e0dfaf90a2d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll @@ -410,8 +410,8 @@ declare @llvm.vp.umax.nxv128i8(, @vmaxu_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -972,11 +972,11 @@ declare @llvm.vp.umax.nxv32i32(, @vmaxu_vx_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a3, a2, 2 ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a3 ; CHECK-NEXT: sub a3, a1, a2 ; CHECK-NEXT: sltu a4, a1, a3 @@ -1032,11 +1032,11 @@ declare i32 @llvm.vscale.i32() define @vmaxu_vx_nxv32i32_evl_nx8( %va, i32 %b, %m) { ; CHECK-LABEL: vmaxu_vx_nxv32i32_evl_nx8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a3, a1, 2 ; CHECK-NEXT: slli a2, a1, 1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a3 ; CHECK-NEXT: sub a3, a1, a2 ; CHECK-NEXT: sltu a4, a1, a3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll index 3ebfc68ddee4b..babf8de57b7ea 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmfeq.mask.nxv1f16( define @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v9 ; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmfeq.mask.nxv2f16( define @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v9 ; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmfeq.mask.nxv4f16( define @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v9 ; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmfeq.mask.nxv8f16( define @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v10 ; CHECK-NEXT: vmfeq.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmfeq.mask.nxv16f16( define @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v12 ; CHECK-NEXT: vmfeq.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmfeq.mask.nxv1f32( define @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v9 ; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmfeq.mask.nxv2f32( define @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v9 ; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmfeq.mask.nxv4f32( define @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v10 ; CHECK-NEXT: vmfeq.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmfeq.mask.nxv8f32( define @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v12 ; CHECK-NEXT: vmfeq.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmfeq.mask.nxv1f64( define @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v9 ; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmfeq.mask.nxv2f64( define @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v10 ; CHECK-NEXT: vmfeq.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmfeq.mask.nxv4f64( define @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmfeq.vv v0, v8, v12 ; CHECK-NEXT: vmfeq.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -646,9 +646,9 @@ declare @llvm.riscv.vmfeq.mask.nxv1f16.f16( define @intrinsic_vmfeq_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -693,9 +693,9 @@ declare @llvm.riscv.vmfeq.mask.nxv2f16.f16( define @intrinsic_vmfeq_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -740,9 +740,9 @@ declare @llvm.riscv.vmfeq.mask.nxv4f16.f16( define @intrinsic_vmfeq_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -787,9 +787,9 @@ declare @llvm.riscv.vmfeq.mask.nxv8f16.f16( define @intrinsic_vmfeq_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -834,9 +834,9 @@ declare @llvm.riscv.vmfeq.mask.nxv16f16.f16( define @intrinsic_vmfeq_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmfeq.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -881,9 +881,9 @@ declare @llvm.riscv.vmfeq.mask.nxv1f32.f32( define @intrinsic_vmfeq_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -928,9 +928,9 @@ declare @llvm.riscv.vmfeq.mask.nxv2f32.f32( define @intrinsic_vmfeq_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -975,9 +975,9 @@ declare @llvm.riscv.vmfeq.mask.nxv4f32.f32( define @intrinsic_vmfeq_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmfeq.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1022,9 +1022,9 @@ declare @llvm.riscv.vmfeq.mask.nxv8f32.f32( define @intrinsic_vmfeq_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1069,9 +1069,9 @@ declare @llvm.riscv.vmfeq.mask.nxv1f64.f64( define @intrinsic_vmfeq_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1116,9 +1116,9 @@ declare @llvm.riscv.vmfeq.mask.nxv2f64.f64( define @intrinsic_vmfeq_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmfeq.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1163,9 +1163,9 @@ declare @llvm.riscv.vmfeq.mask.nxv4f64.f64( define @intrinsic_vmfeq_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmfeq.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll index e041e5874a8dc..4a9dd2f7d769d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmfge.mask.nxv1f16( define @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfle.vv v0, v9, v8 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmfge.mask.nxv2f16( define @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfle.vv v0, v9, v8 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmfge.mask.nxv4f16( define @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfle.vv v0, v9, v8 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmfge.mask.nxv8f16( define @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmfle.vv v0, v10, v8 ; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmfge.mask.nxv16f16( define @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmfle.vv v0, v12, v8 ; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmfge.mask.nxv1f32( define @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfle.vv v0, v9, v8 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmfge.mask.nxv2f32( define @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfle.vv v0, v9, v8 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmfge.mask.nxv4f32( define @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmfle.vv v0, v10, v8 ; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmfge.mask.nxv8f32( define @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmfle.vv v0, v12, v8 ; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmfge.mask.nxv1f64( define @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfle.vv v0, v9, v8 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmfge.mask.nxv2f64( define @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmfle.vv v0, v10, v8 ; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmfge.mask.nxv4f64( define @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmfle.vv v0, v12, v8 ; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -646,9 +646,9 @@ declare @llvm.riscv.vmfge.mask.nxv1f16.f16( define @intrinsic_vmfge_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -693,9 +693,9 @@ declare @llvm.riscv.vmfge.mask.nxv2f16.f16( define @intrinsic_vmfge_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -740,9 +740,9 @@ declare @llvm.riscv.vmfge.mask.nxv4f16.f16( define @intrinsic_vmfge_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -787,9 +787,9 @@ declare @llvm.riscv.vmfge.mask.nxv8f16.f16( define @intrinsic_vmfge_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -834,9 +834,9 @@ declare @llvm.riscv.vmfge.mask.nxv16f16.f16( define @intrinsic_vmfge_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -881,9 +881,9 @@ declare @llvm.riscv.vmfge.mask.nxv1f32.f32( define @intrinsic_vmfge_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -928,9 +928,9 @@ declare @llvm.riscv.vmfge.mask.nxv2f32.f32( define @intrinsic_vmfge_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -975,9 +975,9 @@ declare @llvm.riscv.vmfge.mask.nxv4f32.f32( define @intrinsic_vmfge_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1022,9 +1022,9 @@ declare @llvm.riscv.vmfge.mask.nxv8f32.f32( define @intrinsic_vmfge_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1069,9 +1069,9 @@ declare @llvm.riscv.vmfge.mask.nxv1f64.f64( define @intrinsic_vmfge_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1116,9 +1116,9 @@ declare @llvm.riscv.vmfge.mask.nxv2f64.f64( define @intrinsic_vmfge_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1163,9 +1163,9 @@ declare @llvm.riscv.vmfge.mask.nxv4f64.f64( define @intrinsic_vmfge_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll index 0faaf4ebf255d..c9c5e84937cec 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmfgt.mask.nxv1f16( define @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmflt.vv v0, v9, v8 ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmfgt.mask.nxv2f16( define @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmflt.vv v0, v9, v8 ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmfgt.mask.nxv4f16( define @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmflt.vv v0, v9, v8 ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmfgt.mask.nxv8f16( define @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmflt.vv v0, v10, v8 ; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmfgt.mask.nxv16f16( define @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmflt.vv v0, v12, v8 ; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmfgt.mask.nxv1f32( define @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmflt.vv v0, v9, v8 ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmfgt.mask.nxv2f32( define @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmflt.vv v0, v9, v8 ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmfgt.mask.nxv4f32( define @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmflt.vv v0, v10, v8 ; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmfgt.mask.nxv8f32( define @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmflt.vv v0, v12, v8 ; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmfgt.mask.nxv1f64( define @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmflt.vv v0, v9, v8 ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmfgt.mask.nxv2f64( define @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmflt.vv v0, v10, v8 ; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmfgt.mask.nxv4f64( define @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmflt.vv v0, v12, v8 ; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -646,9 +646,9 @@ declare @llvm.riscv.vmfgt.mask.nxv1f16.f16( define @intrinsic_vmfgt_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -693,9 +693,9 @@ declare @llvm.riscv.vmfgt.mask.nxv2f16.f16( define @intrinsic_vmfgt_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -740,9 +740,9 @@ declare @llvm.riscv.vmfgt.mask.nxv4f16.f16( define @intrinsic_vmfgt_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -787,9 +787,9 @@ declare @llvm.riscv.vmfgt.mask.nxv8f16.f16( define @intrinsic_vmfgt_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -834,9 +834,9 @@ declare @llvm.riscv.vmfgt.mask.nxv16f16.f16( define @intrinsic_vmfgt_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -881,9 +881,9 @@ declare @llvm.riscv.vmfgt.mask.nxv1f32.f32( define @intrinsic_vmfgt_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -928,9 +928,9 @@ declare @llvm.riscv.vmfgt.mask.nxv2f32.f32( define @intrinsic_vmfgt_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -975,9 +975,9 @@ declare @llvm.riscv.vmfgt.mask.nxv4f32.f32( define @intrinsic_vmfgt_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1022,9 +1022,9 @@ declare @llvm.riscv.vmfgt.mask.nxv8f32.f32( define @intrinsic_vmfgt_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1069,9 +1069,9 @@ declare @llvm.riscv.vmfgt.mask.nxv1f64.f64( define @intrinsic_vmfgt_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1116,9 +1116,9 @@ declare @llvm.riscv.vmfgt.mask.nxv2f64.f64( define @intrinsic_vmfgt_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1163,9 +1163,9 @@ declare @llvm.riscv.vmfgt.mask.nxv4f64.f64( define @intrinsic_vmfgt_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll index ef5de6bc3481f..77d8dda258961 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmfle.mask.nxv1f16( define @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfle.vv v0, v8, v9 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmfle.mask.nxv2f16( define @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfle.vv v0, v8, v9 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmfle.mask.nxv4f16( define @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfle.vv v0, v8, v9 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmfle.mask.nxv8f16( define @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmfle.vv v0, v8, v10 ; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmfle.mask.nxv16f16( define @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmfle.vv v0, v8, v12 ; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmfle.mask.nxv1f32( define @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfle.vv v0, v8, v9 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmfle.mask.nxv2f32( define @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfle.vv v0, v8, v9 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmfle.mask.nxv4f32( define @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmfle.vv v0, v8, v10 ; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmfle.mask.nxv8f32( define @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmfle.vv v0, v8, v12 ; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmfle.mask.nxv1f64( define @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfle.vv v0, v8, v9 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmfle.mask.nxv2f64( define @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmfle.vv v0, v8, v10 ; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmfle.mask.nxv4f64( define @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmfle.vv v0, v8, v12 ; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -646,9 +646,9 @@ declare @llvm.riscv.vmfle.mask.nxv1f16.f16( define @intrinsic_vmfle_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -693,9 +693,9 @@ declare @llvm.riscv.vmfle.mask.nxv2f16.f16( define @intrinsic_vmfle_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -740,9 +740,9 @@ declare @llvm.riscv.vmfle.mask.nxv4f16.f16( define @intrinsic_vmfle_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -787,9 +787,9 @@ declare @llvm.riscv.vmfle.mask.nxv8f16.f16( define @intrinsic_vmfle_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -834,9 +834,9 @@ declare @llvm.riscv.vmfle.mask.nxv16f16.f16( define @intrinsic_vmfle_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -881,9 +881,9 @@ declare @llvm.riscv.vmfle.mask.nxv1f32.f32( define @intrinsic_vmfle_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -928,9 +928,9 @@ declare @llvm.riscv.vmfle.mask.nxv2f32.f32( define @intrinsic_vmfle_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -975,9 +975,9 @@ declare @llvm.riscv.vmfle.mask.nxv4f32.f32( define @intrinsic_vmfle_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1022,9 +1022,9 @@ declare @llvm.riscv.vmfle.mask.nxv8f32.f32( define @intrinsic_vmfle_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1069,9 +1069,9 @@ declare @llvm.riscv.vmfle.mask.nxv1f64.f64( define @intrinsic_vmfle_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1116,9 +1116,9 @@ declare @llvm.riscv.vmfle.mask.nxv2f64.f64( define @intrinsic_vmfle_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1163,9 +1163,9 @@ declare @llvm.riscv.vmfle.mask.nxv4f64.f64( define @intrinsic_vmfle_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll index 0b7740d5e0045..0fdae8abe8f6b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmflt.mask.nxv1f16( define @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmflt.vv v0, v8, v9 ; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmflt.mask.nxv2f16( define @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmflt.vv v0, v8, v9 ; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmflt.mask.nxv4f16( define @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmflt.vv v0, v8, v9 ; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmflt.mask.nxv8f16( define @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmflt.vv v0, v8, v10 ; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmflt.mask.nxv16f16( define @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmflt.vv v0, v8, v12 ; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmflt.mask.nxv1f32( define @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmflt.vv v0, v8, v9 ; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmflt.mask.nxv2f32( define @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmflt.vv v0, v8, v9 ; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmflt.mask.nxv4f32( define @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmflt.vv v0, v8, v10 ; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmflt.mask.nxv8f32( define @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmflt.vv v0, v8, v12 ; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmflt.mask.nxv1f64( define @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmflt.vv v0, v8, v9 ; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmflt.mask.nxv2f64( define @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmflt.vv v0, v8, v10 ; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmflt.mask.nxv4f64( define @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmflt.vv v0, v8, v12 ; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -646,9 +646,9 @@ declare @llvm.riscv.vmflt.mask.nxv1f16.f16( define @intrinsic_vmflt_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -693,9 +693,9 @@ declare @llvm.riscv.vmflt.mask.nxv2f16.f16( define @intrinsic_vmflt_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -740,9 +740,9 @@ declare @llvm.riscv.vmflt.mask.nxv4f16.f16( define @intrinsic_vmflt_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -787,9 +787,9 @@ declare @llvm.riscv.vmflt.mask.nxv8f16.f16( define @intrinsic_vmflt_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -834,9 +834,9 @@ declare @llvm.riscv.vmflt.mask.nxv16f16.f16( define @intrinsic_vmflt_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -881,9 +881,9 @@ declare @llvm.riscv.vmflt.mask.nxv1f32.f32( define @intrinsic_vmflt_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -928,9 +928,9 @@ declare @llvm.riscv.vmflt.mask.nxv2f32.f32( define @intrinsic_vmflt_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -975,9 +975,9 @@ declare @llvm.riscv.vmflt.mask.nxv4f32.f32( define @intrinsic_vmflt_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1022,9 +1022,9 @@ declare @llvm.riscv.vmflt.mask.nxv8f32.f32( define @intrinsic_vmflt_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1069,9 +1069,9 @@ declare @llvm.riscv.vmflt.mask.nxv1f64.f64( define @intrinsic_vmflt_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1116,9 +1116,9 @@ declare @llvm.riscv.vmflt.mask.nxv2f64.f64( define @intrinsic_vmflt_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmflt.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1163,9 +1163,9 @@ declare @llvm.riscv.vmflt.mask.nxv4f64.f64( define @intrinsic_vmflt_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll index 65a04e504a973..1d0227f793728 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmfne.mask.nxv1f16( define @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfne.vv v0, v8, v9 ; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmfne.mask.nxv2f16( define @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfne.vv v0, v8, v9 ; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmfne.mask.nxv4f16( define @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfne.vv v0, v8, v9 ; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmfne.mask.nxv8f16( define @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmfne.vv v0, v8, v10 ; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmfne.mask.nxv16f16( define @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmfne.vv v0, v8, v12 ; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmfne.mask.nxv1f32( define @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfne.vv v0, v8, v9 ; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmfne.mask.nxv2f32( define @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfne.vv v0, v8, v9 ; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmfne.mask.nxv4f32( define @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmfne.vv v0, v8, v10 ; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmfne.mask.nxv8f32( define @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmfne.vv v0, v8, v12 ; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmfne.mask.nxv1f64( define @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmfne.vv v0, v8, v9 ; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmfne.mask.nxv2f64( define @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmfne.vv v0, v8, v10 ; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmfne.mask.nxv4f64( define @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmfne.vv v0, v8, v12 ; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -646,9 +646,9 @@ declare @llvm.riscv.vmfne.mask.nxv1f16.f16( define @intrinsic_vmfne_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -693,9 +693,9 @@ declare @llvm.riscv.vmfne.mask.nxv2f16.f16( define @intrinsic_vmfne_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -740,9 +740,9 @@ declare @llvm.riscv.vmfne.mask.nxv4f16.f16( define @intrinsic_vmfne_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -787,9 +787,9 @@ declare @llvm.riscv.vmfne.mask.nxv8f16.f16( define @intrinsic_vmfne_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -834,9 +834,9 @@ declare @llvm.riscv.vmfne.mask.nxv16f16.f16( define @intrinsic_vmfne_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmfne.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -881,9 +881,9 @@ declare @llvm.riscv.vmfne.mask.nxv1f32.f32( define @intrinsic_vmfne_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -928,9 +928,9 @@ declare @llvm.riscv.vmfne.mask.nxv2f32.f32( define @intrinsic_vmfne_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -975,9 +975,9 @@ declare @llvm.riscv.vmfne.mask.nxv4f32.f32( define @intrinsic_vmfne_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmfne.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1022,9 +1022,9 @@ declare @llvm.riscv.vmfne.mask.nxv8f32.f32( define @intrinsic_vmfne_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1069,9 +1069,9 @@ declare @llvm.riscv.vmfne.mask.nxv1f64.f64( define @intrinsic_vmfne_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1116,9 +1116,9 @@ declare @llvm.riscv.vmfne.mask.nxv2f64.f64( define @intrinsic_vmfne_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmfne.vf v11, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1163,9 +1163,9 @@ declare @llvm.riscv.vmfne.mask.nxv4f64.f64( define @intrinsic_vmfne_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmfne.vf v13, v8, fa0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll index 8690014cc2c9d..3441934fb1550 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll @@ -412,8 +412,8 @@ declare @llvm.vp.smin.nxv128i8(, @vmin_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -974,11 +974,11 @@ declare @llvm.vp.smin.nxv32i32(, @vmin_vx_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a3, a2, 2 ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a3 ; CHECK-NEXT: sub a3, a1, a2 ; CHECK-NEXT: sltu a4, a1, a3 @@ -1034,11 +1034,11 @@ declare i32 @llvm.vscale.i32() define @vmin_vx_nxv32i32_evl_nx8( %va, i32 %b, %m) { ; CHECK-LABEL: vmin_vx_nxv32i32_evl_nx8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a3, a1, 2 ; CHECK-NEXT: slli a2, a1, 1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a3 ; CHECK-NEXT: sub a3, a1, a2 ; CHECK-NEXT: sltu a4, a1, a3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll index 414807829d563..bbf4c886bfe9f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll @@ -410,8 +410,8 @@ declare @llvm.vp.umin.nxv128i8(, @vminu_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -972,11 +972,11 @@ declare @llvm.vp.umin.nxv32i32(, @vminu_vx_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a3, a2, 2 ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a3 ; CHECK-NEXT: sub a3, a1, a2 ; CHECK-NEXT: sltu a4, a1, a3 @@ -1032,11 +1032,11 @@ declare i32 @llvm.vscale.i32() define @vminu_vx_nxv32i32_evl_nx8( %va, i32 %b, %m) { ; CHECK-LABEL: vminu_vx_nxv32i32_evl_nx8: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a3, a1, 2 ; CHECK-NEXT: slli a2, a1, 1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a3 ; CHECK-NEXT: sub a3, a1, a2 ; CHECK-NEXT: sltu a4, a1, a3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll index d1f344d52763d..80e74faa8cd91 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll @@ -31,9 +31,9 @@ declare @llvm.riscv.vmsbf.mask.nxv1i1( define @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmsbf.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -73,9 +73,9 @@ declare @llvm.riscv.vmsbf.mask.nxv2i1( define @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmsbf.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -115,9 +115,9 @@ declare @llvm.riscv.vmsbf.mask.nxv4i1( define @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmsbf.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -157,9 +157,9 @@ declare @llvm.riscv.vmsbf.mask.nxv8i1( define @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsbf.m v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -199,9 +199,9 @@ declare @llvm.riscv.vmsbf.mask.nxv16i1( define @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmsbf.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -241,9 +241,9 @@ declare @llvm.riscv.vmsbf.mask.nxv32i1( define @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmsbf.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -283,9 +283,9 @@ declare @llvm.riscv.vmsbf.mask.nxv64i1( define @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vmsbf.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll index 1fd2383c40d18..6407f39a65e8b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmseq.mask.nxv1i8( define @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmseq.mask.nxv2i8( define @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmseq.mask.nxv4i8( define @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmseq.mask.nxv8i8( define @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmseq.mask.nxv16i8( define @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmseq.mask.nxv32i8( define @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmseq.mask.nxv1i16( define @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmseq.mask.nxv2i16( define @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmseq.mask.nxv4i16( define @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmseq.mask.nxv8i16( define @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmseq.mask.nxv16i16( define @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmseq.mask.nxv1i32( define @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -646,8 +646,8 @@ declare @llvm.riscv.vmseq.mask.nxv2i32( define @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -697,8 +697,8 @@ declare @llvm.riscv.vmseq.mask.nxv4i32( define @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -748,8 +748,8 @@ declare @llvm.riscv.vmseq.mask.nxv8i32( define @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -799,8 +799,8 @@ declare @llvm.riscv.vmseq.mask.nxv1i64( define @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -850,8 +850,8 @@ declare @llvm.riscv.vmseq.mask.nxv2i64( define @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -901,8 +901,8 @@ declare @llvm.riscv.vmseq.mask.nxv4i64( define @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -952,9 +952,9 @@ declare @llvm.riscv.vmseq.mask.nxv1i8.i8( define @intrinsic_vmseq_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -999,9 +999,9 @@ declare @llvm.riscv.vmseq.mask.nxv2i8.i8( define @intrinsic_vmseq_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1046,9 +1046,9 @@ declare @llvm.riscv.vmseq.mask.nxv4i8.i8( define @intrinsic_vmseq_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1093,9 +1093,9 @@ declare @llvm.riscv.vmseq.mask.nxv8i8.i8( define @intrinsic_vmseq_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1140,9 +1140,9 @@ declare @llvm.riscv.vmseq.mask.nxv16i8.i8( define @intrinsic_vmseq_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1187,9 +1187,9 @@ declare @llvm.riscv.vmseq.mask.nxv32i8.i8( define @intrinsic_vmseq_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1234,9 +1234,9 @@ declare @llvm.riscv.vmseq.mask.nxv1i16.i16( define @intrinsic_vmseq_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1281,9 +1281,9 @@ declare @llvm.riscv.vmseq.mask.nxv2i16.i16( define @intrinsic_vmseq_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1328,9 +1328,9 @@ declare @llvm.riscv.vmseq.mask.nxv4i16.i16( define @intrinsic_vmseq_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1375,9 +1375,9 @@ declare @llvm.riscv.vmseq.mask.nxv8i16.i16( define @intrinsic_vmseq_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1422,9 +1422,9 @@ declare @llvm.riscv.vmseq.mask.nxv16i16.i16( define @intrinsic_vmseq_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1469,9 +1469,9 @@ declare @llvm.riscv.vmseq.mask.nxv1i32.i32( define @intrinsic_vmseq_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1516,9 +1516,9 @@ declare @llvm.riscv.vmseq.mask.nxv2i32.i32( define @intrinsic_vmseq_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1563,9 +1563,9 @@ declare @llvm.riscv.vmseq.mask.nxv4i32.i32( define @intrinsic_vmseq_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1610,9 +1610,9 @@ declare @llvm.riscv.vmseq.mask.nxv8i32.i32( define @intrinsic_vmseq_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1684,9 +1684,9 @@ define @intrinsic_vmseq_mask_vx_nxv1i64_i64( ; ; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmv1r.v v10, v0 ; RV64-NEXT: vmv1r.v v0, v9 -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmseq.vx v10, v8, a0, v0.t ; RV64-NEXT: vmv.v.v v0, v10 ; RV64-NEXT: ret @@ -1758,9 +1758,9 @@ define @intrinsic_vmseq_mask_vx_nxv2i64_i64( ; ; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmv1r.v v11, v0 ; RV64-NEXT: vmv1r.v v0, v10 -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmseq.vx v11, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v11 ; RV64-NEXT: ret @@ -1832,9 +1832,9 @@ define @intrinsic_vmseq_mask_vx_nxv4i64_i64( ; ; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmv1r.v v13, v0 ; RV64-NEXT: vmv1r.v v0, v12 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmseq.vx v13, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v13 ; RV64-NEXT: ret @@ -1867,9 +1867,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1902,9 +1902,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1937,9 +1937,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1972,9 +1972,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2007,9 +2007,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2042,9 +2042,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2077,9 +2077,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2112,9 +2112,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2147,9 +2147,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2182,9 +2182,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2217,9 +2217,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2252,9 +2252,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2287,9 +2287,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2322,9 +2322,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2357,9 +2357,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2392,9 +2392,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2427,9 +2427,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2462,9 +2462,9 @@ entry: define @intrinsic_vmseq_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll index 2dc133d169f0a..45e3840f7e673 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmsge.mask.nxv1i8( define @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmsge.mask.nxv2i8( define @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmsge.mask.nxv4i8( define @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmsge.mask.nxv8i8( define @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmsge.mask.nxv16i8( define @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmsge.mask.nxv32i8( define @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmsge.mask.nxv1i16( define @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmsge.mask.nxv2i16( define @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmsge.mask.nxv4i16( define @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmsge.mask.nxv8i16( define @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmsge.mask.nxv16i16( define @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmsge.mask.nxv1i32( define @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -646,8 +646,8 @@ declare @llvm.riscv.vmsge.mask.nxv2i32( define @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -697,8 +697,8 @@ declare @llvm.riscv.vmsge.mask.nxv4i32( define @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -748,8 +748,8 @@ declare @llvm.riscv.vmsge.mask.nxv8i32( define @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -799,8 +799,8 @@ declare @llvm.riscv.vmsge.mask.nxv1i64( define @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -850,8 +850,8 @@ declare @llvm.riscv.vmsge.mask.nxv2i64( define @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -901,8 +901,8 @@ declare @llvm.riscv.vmsge.mask.nxv4i64( define @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -953,9 +953,9 @@ declare @llvm.riscv.vmsge.mask.nxv1i8.i8( define @intrinsic_vmsge_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1001,9 +1001,9 @@ declare @llvm.riscv.vmsge.mask.nxv2i8.i8( define @intrinsic_vmsge_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1049,9 +1049,9 @@ declare @llvm.riscv.vmsge.mask.nxv4i8.i8( define @intrinsic_vmsge_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1069,11 +1069,11 @@ entry: define @intrinsic_vmsge_mask_vx_nxv4i8_i8_1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8_1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: li a1, 99 +; CHECK-NEXT: li a0, 99 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmsgt.vx v10, v8, a1, v0.t +; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret entry: @@ -1136,9 +1136,9 @@ declare @llvm.riscv.vmsge.mask.nxv8i8.i8( define @intrinsic_vmsge_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1184,9 +1184,9 @@ declare @llvm.riscv.vmsge.mask.nxv16i8.i8( define @intrinsic_vmsge_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v11, v10 ; CHECK-NEXT: ret @@ -1232,9 +1232,9 @@ declare @llvm.riscv.vmsge.mask.nxv32i8.i8( define @intrinsic_vmsge_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v13, v12 ; CHECK-NEXT: ret @@ -1280,9 +1280,9 @@ declare @llvm.riscv.vmsge.mask.nxv1i16.i16( define @intrinsic_vmsge_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1328,9 +1328,9 @@ declare @llvm.riscv.vmsge.mask.nxv2i16.i16( define @intrinsic_vmsge_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1376,9 +1376,9 @@ declare @llvm.riscv.vmsge.mask.nxv4i16.i16( define @intrinsic_vmsge_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1424,9 +1424,9 @@ declare @llvm.riscv.vmsge.mask.nxv8i16.i16( define @intrinsic_vmsge_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v11, v10 ; CHECK-NEXT: ret @@ -1472,9 +1472,9 @@ declare @llvm.riscv.vmsge.mask.nxv16i16.i16( define @intrinsic_vmsge_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v13, v12 ; CHECK-NEXT: ret @@ -1520,9 +1520,9 @@ declare @llvm.riscv.vmsge.mask.nxv1i32.i32( define @intrinsic_vmsge_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1568,9 +1568,9 @@ declare @llvm.riscv.vmsge.mask.nxv2i32.i32( define @intrinsic_vmsge_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1616,9 +1616,9 @@ declare @llvm.riscv.vmsge.mask.nxv4i32.i32( define @intrinsic_vmsge_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v11, v10 ; CHECK-NEXT: ret @@ -1664,9 +1664,9 @@ declare @llvm.riscv.vmsge.mask.nxv8i32.i32( define @intrinsic_vmsge_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v13, v12 ; CHECK-NEXT: ret @@ -1739,9 +1739,9 @@ define @intrinsic_vmsge_mask_vx_nxv1i64_i64( ; ; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmv1r.v v10, v0 ; RV64-NEXT: vmv1r.v v0, v9 -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmslt.vx v10, v8, a0, v0.t ; RV64-NEXT: vmxor.mm v0, v10, v9 ; RV64-NEXT: ret @@ -1814,9 +1814,9 @@ define @intrinsic_vmsge_mask_vx_nxv2i64_i64( ; ; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmv1r.v v11, v0 ; RV64-NEXT: vmv1r.v v0, v10 -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmslt.vx v11, v8, a0, v0.t ; RV64-NEXT: vmxor.mm v0, v11, v10 ; RV64-NEXT: ret @@ -1889,9 +1889,9 @@ define @intrinsic_vmsge_mask_vx_nxv4i64_i64( ; ; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmv1r.v v13, v0 ; RV64-NEXT: vmv1r.v v0, v12 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmslt.vx v13, v8, a0, v0.t ; RV64-NEXT: vmxor.mm v0, v13, v12 ; RV64-NEXT: ret @@ -1924,9 +1924,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, -15, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1959,9 +1959,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, -13, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2025,9 +2025,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, -11, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2060,9 +2060,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, -9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2095,9 +2095,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v11, v8, -7, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2130,9 +2130,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v13, v8, -5, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2165,9 +2165,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, -3, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2200,9 +2200,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, -1, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2235,9 +2235,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2270,9 +2270,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v11, v8, 2, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2305,9 +2305,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v13, v8, 4, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2340,9 +2340,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 6, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2375,9 +2375,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 8, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2410,9 +2410,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v11, v8, 10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2445,9 +2445,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v13, v8, 12, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2480,9 +2480,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 8, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2515,9 +2515,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v11, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2550,9 +2550,9 @@ entry: define @intrinsic_vmsge_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v13, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll index 69a3835cd4d67..e42be4faafefc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i8( define @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i8( define @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i8( define @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv8i8( define @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv16i8( define @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv32i8( define @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i16( define @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i16( define @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i16( define @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv8i16( define @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv16i16( define @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i32( define @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -646,8 +646,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i32( define @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -697,8 +697,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i32( define @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -748,8 +748,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv8i32( define @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -799,8 +799,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i64( define @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -850,8 +850,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i64( define @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -901,8 +901,8 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i64( define @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -953,9 +953,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i8.i8( define @intrinsic_vmsgeu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1001,9 +1001,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i8.i8( define @intrinsic_vmsgeu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1049,9 +1049,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i8.i8( define @intrinsic_vmsgeu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1097,9 +1097,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv8i8.i8( define @intrinsic_vmsgeu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1145,9 +1145,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv16i8.i8( define @intrinsic_vmsgeu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v11, v10 ; CHECK-NEXT: ret @@ -1193,9 +1193,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv32i8.i8( define @intrinsic_vmsgeu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v13, v12 ; CHECK-NEXT: ret @@ -1241,9 +1241,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i16.i16( define @intrinsic_vmsgeu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1289,9 +1289,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i16.i16( define @intrinsic_vmsgeu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1337,9 +1337,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i16.i16( define @intrinsic_vmsgeu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1385,9 +1385,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv8i16.i16( define @intrinsic_vmsgeu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v11, v10 ; CHECK-NEXT: ret @@ -1433,9 +1433,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv16i16.i16( define @intrinsic_vmsgeu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v13, v12 ; CHECK-NEXT: ret @@ -1481,9 +1481,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv1i32.i32( define @intrinsic_vmsgeu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1529,9 +1529,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv2i32.i32( define @intrinsic_vmsgeu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v10, v9 ; CHECK-NEXT: ret @@ -1577,9 +1577,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv4i32.i32( define @intrinsic_vmsgeu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v11, v10 ; CHECK-NEXT: ret @@ -1625,9 +1625,9 @@ declare @llvm.riscv.vmsgeu.mask.nxv8i32.i32( define @intrinsic_vmsgeu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmxor.mm v0, v13, v12 ; CHECK-NEXT: ret @@ -1700,9 +1700,9 @@ define @intrinsic_vmsgeu_mask_vx_nxv1i64_i64( ; ; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmv1r.v v10, v0 ; RV64-NEXT: vmv1r.v v0, v9 -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmsltu.vx v10, v8, a0, v0.t ; RV64-NEXT: vmxor.mm v0, v10, v9 ; RV64-NEXT: ret @@ -1775,9 +1775,9 @@ define @intrinsic_vmsgeu_mask_vx_nxv2i64_i64( ; ; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmv1r.v v11, v0 ; RV64-NEXT: vmv1r.v v0, v10 -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmsltu.vx v11, v8, a0, v0.t ; RV64-NEXT: vmxor.mm v0, v11, v10 ; RV64-NEXT: ret @@ -1850,9 +1850,9 @@ define @intrinsic_vmsgeu_mask_vx_nxv4i64_i64( ; ; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmv1r.v v13, v0 ; RV64-NEXT: vmv1r.v v0, v12 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmsltu.vx v13, v8, a0, v0.t ; RV64-NEXT: vmxor.mm v0, v13, v12 ; RV64-NEXT: ret @@ -1885,9 +1885,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, -15, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1920,9 +1920,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, -13, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1955,9 +1955,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, -11, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1975,11 +1975,11 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv4i8_i8_1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8_1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: li a1, 99 +; CHECK-NEXT: li a0, 99 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmsgtu.vx v10, v8, a1, v0.t +; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret entry: @@ -2011,9 +2011,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, -9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2046,9 +2046,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v11, v8, -7, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2081,9 +2081,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v13, v8, -5, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2116,9 +2116,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, -3, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2214,9 +2214,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2249,9 +2249,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v11, v8, 2, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2284,9 +2284,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v13, v8, 4, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2319,9 +2319,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 6, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2354,9 +2354,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 8, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2389,9 +2389,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v11, v8, 10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2424,9 +2424,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v13, v8, 12, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2459,9 +2459,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 14, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2494,9 +2494,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v11, v8, -16, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2529,9 +2529,9 @@ entry: define @intrinsic_vmsgeu_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v13, v8, -14, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll index d7dee2e1bc580..62ac44bfdf38c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmsgt.mask.nxv1i8( define @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmsgt.mask.nxv2i8( define @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmsgt.mask.nxv4i8( define @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmsgt.mask.nxv8i8( define @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmsgt.mask.nxv16i8( define @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmsgt.mask.nxv32i8( define @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmsgt.mask.nxv1i16( define @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmsgt.mask.nxv2i16( define @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmsgt.mask.nxv4i16( define @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmsgt.mask.nxv8i16( define @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmsgt.mask.nxv16i16( define @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmsgt.mask.nxv1i32( define @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -646,8 +646,8 @@ declare @llvm.riscv.vmsgt.mask.nxv2i32( define @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -697,8 +697,8 @@ declare @llvm.riscv.vmsgt.mask.nxv4i32( define @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -748,8 +748,8 @@ declare @llvm.riscv.vmsgt.mask.nxv8i32( define @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -799,8 +799,8 @@ declare @llvm.riscv.vmsgt.mask.nxv1i64( define @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -850,8 +850,8 @@ declare @llvm.riscv.vmsgt.mask.nxv2i64( define @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -901,8 +901,8 @@ declare @llvm.riscv.vmsgt.mask.nxv4i64( define @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -952,9 +952,9 @@ declare @llvm.riscv.vmsgt.mask.nxv1i8.i8( define @intrinsic_vmsgt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -999,9 +999,9 @@ declare @llvm.riscv.vmsgt.mask.nxv2i8.i8( define @intrinsic_vmsgt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1046,9 +1046,9 @@ declare @llvm.riscv.vmsgt.mask.nxv4i8.i8( define @intrinsic_vmsgt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1093,9 +1093,9 @@ declare @llvm.riscv.vmsgt.mask.nxv8i8.i8( define @intrinsic_vmsgt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1140,9 +1140,9 @@ declare @llvm.riscv.vmsgt.mask.nxv16i8.i8( define @intrinsic_vmsgt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1187,9 +1187,9 @@ declare @llvm.riscv.vmsgt.mask.nxv32i8.i8( define @intrinsic_vmsgt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1234,9 +1234,9 @@ declare @llvm.riscv.vmsgt.mask.nxv1i16.i16( define @intrinsic_vmsgt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1281,9 +1281,9 @@ declare @llvm.riscv.vmsgt.mask.nxv2i16.i16( define @intrinsic_vmsgt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1328,9 +1328,9 @@ declare @llvm.riscv.vmsgt.mask.nxv4i16.i16( define @intrinsic_vmsgt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1375,9 +1375,9 @@ declare @llvm.riscv.vmsgt.mask.nxv8i16.i16( define @intrinsic_vmsgt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1422,9 +1422,9 @@ declare @llvm.riscv.vmsgt.mask.nxv16i16.i16( define @intrinsic_vmsgt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1469,9 +1469,9 @@ declare @llvm.riscv.vmsgt.mask.nxv1i32.i32( define @intrinsic_vmsgt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1516,9 +1516,9 @@ declare @llvm.riscv.vmsgt.mask.nxv2i32.i32( define @intrinsic_vmsgt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1563,9 +1563,9 @@ declare @llvm.riscv.vmsgt.mask.nxv4i32.i32( define @intrinsic_vmsgt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1610,9 +1610,9 @@ declare @llvm.riscv.vmsgt.mask.nxv8i32.i32( define @intrinsic_vmsgt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1684,9 +1684,9 @@ define @intrinsic_vmsgt_mask_vx_nxv1i64_i64( ; ; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmv1r.v v10, v0 ; RV64-NEXT: vmv1r.v v0, v9 -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmsgt.vx v10, v8, a0, v0.t ; RV64-NEXT: vmv.v.v v0, v10 ; RV64-NEXT: ret @@ -1758,9 +1758,9 @@ define @intrinsic_vmsgt_mask_vx_nxv2i64_i64( ; ; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmv1r.v v11, v0 ; RV64-NEXT: vmv1r.v v0, v10 -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmsgt.vx v11, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v11 ; RV64-NEXT: ret @@ -1832,9 +1832,9 @@ define @intrinsic_vmsgt_mask_vx_nxv4i64_i64( ; ; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmv1r.v v13, v0 ; RV64-NEXT: vmv1r.v v0, v12 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmsgt.vx v13, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v13 ; RV64-NEXT: ret @@ -1867,9 +1867,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1902,9 +1902,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1937,9 +1937,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1972,9 +1972,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2007,9 +2007,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2042,9 +2042,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2077,9 +2077,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2112,9 +2112,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2147,9 +2147,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2182,9 +2182,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2217,9 +2217,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2252,9 +2252,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2287,9 +2287,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2322,9 +2322,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2357,9 +2357,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2392,9 +2392,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2427,9 +2427,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2462,9 +2462,9 @@ entry: define @intrinsic_vmsgt_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll index fe9d522f6b401..d57b9cd5bae53 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i8( define @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i8( define @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i8( define @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv8i8( define @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv16i8( define @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv32i8( define @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i16( define @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i16( define @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i16( define @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv8i16( define @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv16i16( define @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i32( define @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -646,8 +646,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i32( define @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -697,8 +697,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i32( define @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -748,8 +748,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv8i32( define @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -799,8 +799,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i64( define @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -850,8 +850,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i64( define @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -901,8 +901,8 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i64( define @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -952,9 +952,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i8.i8( define @intrinsic_vmsgtu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -999,9 +999,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i8.i8( define @intrinsic_vmsgtu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1046,9 +1046,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i8.i8( define @intrinsic_vmsgtu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1093,9 +1093,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv8i8.i8( define @intrinsic_vmsgtu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1140,9 +1140,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv16i8.i8( define @intrinsic_vmsgtu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1187,9 +1187,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv32i8.i8( define @intrinsic_vmsgtu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1234,9 +1234,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i16.i16( define @intrinsic_vmsgtu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1281,9 +1281,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i16.i16( define @intrinsic_vmsgtu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1328,9 +1328,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i16.i16( define @intrinsic_vmsgtu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1375,9 +1375,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv8i16.i16( define @intrinsic_vmsgtu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1422,9 +1422,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv16i16.i16( define @intrinsic_vmsgtu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1469,9 +1469,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv1i32.i32( define @intrinsic_vmsgtu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1516,9 +1516,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv2i32.i32( define @intrinsic_vmsgtu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1563,9 +1563,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv4i32.i32( define @intrinsic_vmsgtu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1610,9 +1610,9 @@ declare @llvm.riscv.vmsgtu.mask.nxv8i32.i32( define @intrinsic_vmsgtu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1684,9 +1684,9 @@ define @intrinsic_vmsgtu_mask_vx_nxv1i64_i64( ; ; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmv1r.v v10, v0 ; RV64-NEXT: vmv1r.v v0, v9 -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmsgtu.vx v10, v8, a0, v0.t ; RV64-NEXT: vmv.v.v v0, v10 ; RV64-NEXT: ret @@ -1758,9 +1758,9 @@ define @intrinsic_vmsgtu_mask_vx_nxv2i64_i64( ; ; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmv1r.v v11, v0 ; RV64-NEXT: vmv1r.v v0, v10 -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmsgtu.vx v11, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v11 ; RV64-NEXT: ret @@ -1832,9 +1832,9 @@ define @intrinsic_vmsgtu_mask_vx_nxv4i64_i64( ; ; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmv1r.v v13, v0 ; RV64-NEXT: vmv1r.v v0, v12 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmsgtu.vx v13, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v13 ; RV64-NEXT: ret @@ -1867,9 +1867,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1902,9 +1902,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1937,9 +1937,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1972,9 +1972,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2007,9 +2007,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2042,9 +2042,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2077,9 +2077,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2112,9 +2112,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2147,9 +2147,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2182,9 +2182,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2217,9 +2217,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2252,9 +2252,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2287,9 +2287,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2322,9 +2322,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2357,9 +2357,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2392,9 +2392,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2427,9 +2427,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2462,9 +2462,9 @@ entry: define @intrinsic_vmsgtu_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll index 1dc52eb55455b..9c70dcab1efde 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll @@ -31,9 +31,9 @@ declare @llvm.riscv.vmsif.mask.nxv1i1( define @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmsif.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -73,9 +73,9 @@ declare @llvm.riscv.vmsif.mask.nxv2i1( define @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmsif.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -115,9 +115,9 @@ declare @llvm.riscv.vmsif.mask.nxv4i1( define @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmsif.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -157,9 +157,9 @@ declare @llvm.riscv.vmsif.mask.nxv8i1( define @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsif.m v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -199,9 +199,9 @@ declare @llvm.riscv.vmsif.mask.nxv16i1( define @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmsif.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -241,9 +241,9 @@ declare @llvm.riscv.vmsif.mask.nxv32i1( define @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmsif.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -283,9 +283,9 @@ declare @llvm.riscv.vmsif.mask.nxv64i1( define @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vmsif.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll index bc98b31957b25..9653dfd2518d8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmsle.mask.nxv1i8( define @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmsle.mask.nxv2i8( define @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmsle.mask.nxv4i8( define @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmsle.mask.nxv8i8( define @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmsle.mask.nxv16i8( define @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmsle.mask.nxv32i8( define @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmsle.mask.nxv1i16( define @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmsle.mask.nxv2i16( define @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmsle.mask.nxv4i16( define @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmsle.mask.nxv8i16( define @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmsle.mask.nxv16i16( define @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmsle.mask.nxv1i32( define @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -646,8 +646,8 @@ declare @llvm.riscv.vmsle.mask.nxv2i32( define @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -697,8 +697,8 @@ declare @llvm.riscv.vmsle.mask.nxv4i32( define @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -748,8 +748,8 @@ declare @llvm.riscv.vmsle.mask.nxv8i32( define @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -799,8 +799,8 @@ declare @llvm.riscv.vmsle.mask.nxv1i64( define @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -850,8 +850,8 @@ declare @llvm.riscv.vmsle.mask.nxv2i64( define @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -901,8 +901,8 @@ declare @llvm.riscv.vmsle.mask.nxv4i64( define @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -952,9 +952,9 @@ declare @llvm.riscv.vmsle.mask.nxv1i8.i8( define @intrinsic_vmsle_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -999,9 +999,9 @@ declare @llvm.riscv.vmsle.mask.nxv2i8.i8( define @intrinsic_vmsle_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1046,9 +1046,9 @@ declare @llvm.riscv.vmsle.mask.nxv4i8.i8( define @intrinsic_vmsle_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1093,9 +1093,9 @@ declare @llvm.riscv.vmsle.mask.nxv8i8.i8( define @intrinsic_vmsle_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1140,9 +1140,9 @@ declare @llvm.riscv.vmsle.mask.nxv16i8.i8( define @intrinsic_vmsle_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1187,9 +1187,9 @@ declare @llvm.riscv.vmsle.mask.nxv32i8.i8( define @intrinsic_vmsle_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1234,9 +1234,9 @@ declare @llvm.riscv.vmsle.mask.nxv1i16.i16( define @intrinsic_vmsle_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1281,9 +1281,9 @@ declare @llvm.riscv.vmsle.mask.nxv2i16.i16( define @intrinsic_vmsle_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1328,9 +1328,9 @@ declare @llvm.riscv.vmsle.mask.nxv4i16.i16( define @intrinsic_vmsle_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1375,9 +1375,9 @@ declare @llvm.riscv.vmsle.mask.nxv8i16.i16( define @intrinsic_vmsle_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1422,9 +1422,9 @@ declare @llvm.riscv.vmsle.mask.nxv16i16.i16( define @intrinsic_vmsle_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1469,9 +1469,9 @@ declare @llvm.riscv.vmsle.mask.nxv1i32.i32( define @intrinsic_vmsle_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1516,9 +1516,9 @@ declare @llvm.riscv.vmsle.mask.nxv2i32.i32( define @intrinsic_vmsle_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1563,9 +1563,9 @@ declare @llvm.riscv.vmsle.mask.nxv4i32.i32( define @intrinsic_vmsle_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1610,9 +1610,9 @@ declare @llvm.riscv.vmsle.mask.nxv8i32.i32( define @intrinsic_vmsle_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1684,9 +1684,9 @@ define @intrinsic_vmsle_mask_vx_nxv1i64_i64( ; ; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmv1r.v v10, v0 ; RV64-NEXT: vmv1r.v v0, v9 -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmsle.vx v10, v8, a0, v0.t ; RV64-NEXT: vmv.v.v v0, v10 ; RV64-NEXT: ret @@ -1758,9 +1758,9 @@ define @intrinsic_vmsle_mask_vx_nxv2i64_i64( ; ; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmv1r.v v11, v0 ; RV64-NEXT: vmv1r.v v0, v10 -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmsle.vx v11, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v11 ; RV64-NEXT: ret @@ -1832,9 +1832,9 @@ define @intrinsic_vmsle_mask_vx_nxv4i64_i64( ; ; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmv1r.v v13, v0 ; RV64-NEXT: vmv1r.v v0, v12 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmsle.vx v13, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v13 ; RV64-NEXT: ret @@ -1867,9 +1867,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1902,9 +1902,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1937,9 +1937,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1972,9 +1972,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2007,9 +2007,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2042,9 +2042,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2077,9 +2077,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2112,9 +2112,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2147,9 +2147,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2182,9 +2182,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2217,9 +2217,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2252,9 +2252,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2287,9 +2287,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2322,9 +2322,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2357,9 +2357,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2392,9 +2392,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2427,9 +2427,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2462,9 +2462,9 @@ entry: define @intrinsic_vmsle_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll index 731989cfe15d9..25ecfa65c7c48 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmsleu.mask.nxv1i8( define @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmsleu.mask.nxv2i8( define @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmsleu.mask.nxv4i8( define @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmsleu.mask.nxv8i8( define @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmsleu.mask.nxv16i8( define @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmsleu.mask.nxv32i8( define @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmsleu.mask.nxv1i16( define @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmsleu.mask.nxv2i16( define @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmsleu.mask.nxv4i16( define @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmsleu.mask.nxv8i16( define @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmsleu.mask.nxv16i16( define @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmsleu.mask.nxv1i32( define @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -646,8 +646,8 @@ declare @llvm.riscv.vmsleu.mask.nxv2i32( define @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -697,8 +697,8 @@ declare @llvm.riscv.vmsleu.mask.nxv4i32( define @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -748,8 +748,8 @@ declare @llvm.riscv.vmsleu.mask.nxv8i32( define @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -799,8 +799,8 @@ declare @llvm.riscv.vmsleu.mask.nxv1i64( define @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -850,8 +850,8 @@ declare @llvm.riscv.vmsleu.mask.nxv2i64( define @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -901,8 +901,8 @@ declare @llvm.riscv.vmsleu.mask.nxv4i64( define @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -952,9 +952,9 @@ declare @llvm.riscv.vmsleu.mask.nxv1i8.i8( define @intrinsic_vmsleu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -999,9 +999,9 @@ declare @llvm.riscv.vmsleu.mask.nxv2i8.i8( define @intrinsic_vmsleu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1046,9 +1046,9 @@ declare @llvm.riscv.vmsleu.mask.nxv4i8.i8( define @intrinsic_vmsleu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1093,9 +1093,9 @@ declare @llvm.riscv.vmsleu.mask.nxv8i8.i8( define @intrinsic_vmsleu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1140,9 +1140,9 @@ declare @llvm.riscv.vmsleu.mask.nxv16i8.i8( define @intrinsic_vmsleu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1187,9 +1187,9 @@ declare @llvm.riscv.vmsleu.mask.nxv32i8.i8( define @intrinsic_vmsleu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1234,9 +1234,9 @@ declare @llvm.riscv.vmsleu.mask.nxv1i16.i16( define @intrinsic_vmsleu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1281,9 +1281,9 @@ declare @llvm.riscv.vmsleu.mask.nxv2i16.i16( define @intrinsic_vmsleu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1328,9 +1328,9 @@ declare @llvm.riscv.vmsleu.mask.nxv4i16.i16( define @intrinsic_vmsleu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1375,9 +1375,9 @@ declare @llvm.riscv.vmsleu.mask.nxv8i16.i16( define @intrinsic_vmsleu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1422,9 +1422,9 @@ declare @llvm.riscv.vmsleu.mask.nxv16i16.i16( define @intrinsic_vmsleu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1469,9 +1469,9 @@ declare @llvm.riscv.vmsleu.mask.nxv1i32.i32( define @intrinsic_vmsleu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1516,9 +1516,9 @@ declare @llvm.riscv.vmsleu.mask.nxv2i32.i32( define @intrinsic_vmsleu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1563,9 +1563,9 @@ declare @llvm.riscv.vmsleu.mask.nxv4i32.i32( define @intrinsic_vmsleu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1610,9 +1610,9 @@ declare @llvm.riscv.vmsleu.mask.nxv8i32.i32( define @intrinsic_vmsleu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1684,9 +1684,9 @@ define @intrinsic_vmsleu_mask_vx_nxv1i64_i64( ; ; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmv1r.v v10, v0 ; RV64-NEXT: vmv1r.v v0, v9 -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmsleu.vx v10, v8, a0, v0.t ; RV64-NEXT: vmv.v.v v0, v10 ; RV64-NEXT: ret @@ -1758,9 +1758,9 @@ define @intrinsic_vmsleu_mask_vx_nxv2i64_i64( ; ; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmv1r.v v11, v0 ; RV64-NEXT: vmv1r.v v0, v10 -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmsleu.vx v11, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v11 ; RV64-NEXT: ret @@ -1832,9 +1832,9 @@ define @intrinsic_vmsleu_mask_vx_nxv4i64_i64( ; ; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmv1r.v v13, v0 ; RV64-NEXT: vmv1r.v v0, v12 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmsleu.vx v13, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v13 ; RV64-NEXT: ret @@ -1867,9 +1867,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1902,9 +1902,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1937,9 +1937,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1972,9 +1972,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2007,9 +2007,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2042,9 +2042,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2077,9 +2077,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2112,9 +2112,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2147,9 +2147,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2182,9 +2182,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2217,9 +2217,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2252,9 +2252,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2287,9 +2287,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2322,9 +2322,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2357,9 +2357,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2392,9 +2392,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2427,9 +2427,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2462,9 +2462,9 @@ entry: define @intrinsic_vmsleu_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll index 407f85b4f5996..c17495e3b2119 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmslt.mask.nxv1i8( define @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmslt.mask.nxv2i8( define @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmslt.mask.nxv4i8( define @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmslt.mask.nxv8i8( define @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmslt.mask.nxv16i8( define @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: vmslt.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmslt.mask.nxv32i8( define @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: vmslt.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmslt.mask.nxv1i16( define @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmslt.mask.nxv2i16( define @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmslt.mask.nxv4i16( define @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmslt.mask.nxv8i16( define @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: vmslt.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmslt.mask.nxv16i16( define @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: vmslt.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmslt.mask.nxv1i32( define @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -646,8 +646,8 @@ declare @llvm.riscv.vmslt.mask.nxv2i32( define @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -697,8 +697,8 @@ declare @llvm.riscv.vmslt.mask.nxv4i32( define @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: vmslt.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -748,8 +748,8 @@ declare @llvm.riscv.vmslt.mask.nxv8i32( define @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: vmslt.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -799,8 +799,8 @@ declare @llvm.riscv.vmslt.mask.nxv1i64( define @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -850,8 +850,8 @@ declare @llvm.riscv.vmslt.mask.nxv2i64( define @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: vmslt.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -901,8 +901,8 @@ declare @llvm.riscv.vmslt.mask.nxv4i64( define @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: vmslt.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -952,9 +952,9 @@ declare @llvm.riscv.vmslt.mask.nxv1i8.i8( define @intrinsic_vmslt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -999,9 +999,9 @@ declare @llvm.riscv.vmslt.mask.nxv2i8.i8( define @intrinsic_vmslt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1046,9 +1046,9 @@ declare @llvm.riscv.vmslt.mask.nxv4i8.i8( define @intrinsic_vmslt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1093,9 +1093,9 @@ declare @llvm.riscv.vmslt.mask.nxv8i8.i8( define @intrinsic_vmslt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1140,9 +1140,9 @@ declare @llvm.riscv.vmslt.mask.nxv16i8.i8( define @intrinsic_vmslt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1187,9 +1187,9 @@ declare @llvm.riscv.vmslt.mask.nxv32i8.i8( define @intrinsic_vmslt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1234,9 +1234,9 @@ declare @llvm.riscv.vmslt.mask.nxv1i16.i16( define @intrinsic_vmslt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1281,9 +1281,9 @@ declare @llvm.riscv.vmslt.mask.nxv2i16.i16( define @intrinsic_vmslt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1328,9 +1328,9 @@ declare @llvm.riscv.vmslt.mask.nxv4i16.i16( define @intrinsic_vmslt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1375,9 +1375,9 @@ declare @llvm.riscv.vmslt.mask.nxv8i16.i16( define @intrinsic_vmslt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1422,9 +1422,9 @@ declare @llvm.riscv.vmslt.mask.nxv16i16.i16( define @intrinsic_vmslt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1469,9 +1469,9 @@ declare @llvm.riscv.vmslt.mask.nxv1i32.i32( define @intrinsic_vmslt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1516,9 +1516,9 @@ declare @llvm.riscv.vmslt.mask.nxv2i32.i32( define @intrinsic_vmslt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1563,9 +1563,9 @@ declare @llvm.riscv.vmslt.mask.nxv4i32.i32( define @intrinsic_vmslt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1610,9 +1610,9 @@ declare @llvm.riscv.vmslt.mask.nxv8i32.i32( define @intrinsic_vmslt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1684,9 +1684,9 @@ define @intrinsic_vmslt_mask_vx_nxv1i64_i64( ; ; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmv1r.v v10, v0 ; RV64-NEXT: vmv1r.v v0, v9 -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmslt.vx v10, v8, a0, v0.t ; RV64-NEXT: vmv.v.v v0, v10 ; RV64-NEXT: ret @@ -1758,9 +1758,9 @@ define @intrinsic_vmslt_mask_vx_nxv2i64_i64( ; ; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmv1r.v v11, v0 ; RV64-NEXT: vmv1r.v v0, v10 -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmslt.vx v11, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v11 ; RV64-NEXT: ret @@ -1832,9 +1832,9 @@ define @intrinsic_vmslt_mask_vx_nxv4i64_i64( ; ; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmv1r.v v13, v0 ; RV64-NEXT: vmv1r.v v0, v12 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmslt.vx v13, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v13 ; RV64-NEXT: ret @@ -1867,9 +1867,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, -15, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1902,9 +1902,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, -13, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1937,9 +1937,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, -11, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1972,9 +1972,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, -9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2007,9 +2007,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmsle.vi v11, v8, -7, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2042,9 +2042,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmsle.vi v13, v8, -5, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2077,9 +2077,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, -3, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2112,9 +2112,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, zero, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2147,9 +2147,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2182,9 +2182,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vi v11, v8, 2, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2217,9 +2217,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmsle.vi v13, v8, 4, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2252,9 +2252,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 6, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2287,9 +2287,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 8, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2322,9 +2322,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmsle.vi v11, v8, 10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2357,9 +2357,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vi v13, v8, 12, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2392,9 +2392,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsle.vi v10, v8, 8, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2427,9 +2427,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmsle.vi v11, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2462,9 +2462,9 @@ entry: define @intrinsic_vmslt_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmsle.vi v13, v8, 8, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll index e051b332018fd..a37a02848365d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmsltu.mask.nxv1i8( define @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmsltu.mask.nxv2i8( define @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmsltu.mask.nxv4i8( define @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmsltu.mask.nxv8i8( define @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmsltu.mask.nxv16i8( define @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: vmsltu.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmsltu.mask.nxv32i8( define @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: vmsltu.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmsltu.mask.nxv1i16( define @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmsltu.mask.nxv2i16( define @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmsltu.mask.nxv4i16( define @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmsltu.mask.nxv8i16( define @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: vmsltu.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmsltu.mask.nxv16i16( define @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: vmsltu.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmsltu.mask.nxv1i32( define @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -646,8 +646,8 @@ declare @llvm.riscv.vmsltu.mask.nxv2i32( define @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -697,8 +697,8 @@ declare @llvm.riscv.vmsltu.mask.nxv4i32( define @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: vmsltu.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -748,8 +748,8 @@ declare @llvm.riscv.vmsltu.mask.nxv8i32( define @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: vmsltu.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -799,8 +799,8 @@ declare @llvm.riscv.vmsltu.mask.nxv1i64( define @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -850,8 +850,8 @@ declare @llvm.riscv.vmsltu.mask.nxv2i64( define @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: vmsltu.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -901,8 +901,8 @@ declare @llvm.riscv.vmsltu.mask.nxv4i64( define @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: vmsltu.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -952,9 +952,9 @@ declare @llvm.riscv.vmsltu.mask.nxv1i8.i8( define @intrinsic_vmsltu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -999,9 +999,9 @@ declare @llvm.riscv.vmsltu.mask.nxv2i8.i8( define @intrinsic_vmsltu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1046,9 +1046,9 @@ declare @llvm.riscv.vmsltu.mask.nxv4i8.i8( define @intrinsic_vmsltu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1093,9 +1093,9 @@ declare @llvm.riscv.vmsltu.mask.nxv8i8.i8( define @intrinsic_vmsltu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1140,9 +1140,9 @@ declare @llvm.riscv.vmsltu.mask.nxv16i8.i8( define @intrinsic_vmsltu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1187,9 +1187,9 @@ declare @llvm.riscv.vmsltu.mask.nxv32i8.i8( define @intrinsic_vmsltu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1234,9 +1234,9 @@ declare @llvm.riscv.vmsltu.mask.nxv1i16.i16( define @intrinsic_vmsltu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1281,9 +1281,9 @@ declare @llvm.riscv.vmsltu.mask.nxv2i16.i16( define @intrinsic_vmsltu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1328,9 +1328,9 @@ declare @llvm.riscv.vmsltu.mask.nxv4i16.i16( define @intrinsic_vmsltu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1375,9 +1375,9 @@ declare @llvm.riscv.vmsltu.mask.nxv8i16.i16( define @intrinsic_vmsltu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1422,9 +1422,9 @@ declare @llvm.riscv.vmsltu.mask.nxv16i16.i16( define @intrinsic_vmsltu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1469,9 +1469,9 @@ declare @llvm.riscv.vmsltu.mask.nxv1i32.i32( define @intrinsic_vmsltu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1516,9 +1516,9 @@ declare @llvm.riscv.vmsltu.mask.nxv2i32.i32( define @intrinsic_vmsltu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1563,9 +1563,9 @@ declare @llvm.riscv.vmsltu.mask.nxv4i32.i32( define @intrinsic_vmsltu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1610,9 +1610,9 @@ declare @llvm.riscv.vmsltu.mask.nxv8i32.i32( define @intrinsic_vmsltu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1684,9 +1684,9 @@ define @intrinsic_vmsltu_mask_vx_nxv1i64_i64( ; ; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmv1r.v v10, v0 ; RV64-NEXT: vmv1r.v v0, v9 -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmsltu.vx v10, v8, a0, v0.t ; RV64-NEXT: vmv.v.v v0, v10 ; RV64-NEXT: ret @@ -1758,9 +1758,9 @@ define @intrinsic_vmsltu_mask_vx_nxv2i64_i64( ; ; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmv1r.v v11, v0 ; RV64-NEXT: vmv1r.v v0, v10 -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmsltu.vx v11, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v11 ; RV64-NEXT: ret @@ -1832,9 +1832,9 @@ define @intrinsic_vmsltu_mask_vx_nxv4i64_i64( ; ; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmv1r.v v13, v0 ; RV64-NEXT: vmv1r.v v0, v12 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmsltu.vx v13, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v13 ; RV64-NEXT: ret @@ -1867,9 +1867,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, -15, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1902,9 +1902,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, -13, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1937,9 +1937,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, -11, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1972,9 +1972,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, -9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2007,9 +2007,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v11, v8, -7, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2042,9 +2042,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v13, v8, -5, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2077,9 +2077,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, -3, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2112,9 +2112,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, zero, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2147,9 +2147,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2182,9 +2182,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v11, v8, 2, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2217,9 +2217,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v13, v8, 4, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2252,9 +2252,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 6, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2287,9 +2287,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 8, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2322,9 +2322,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v11, v8, 10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2357,9 +2357,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v13, v8, 12, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2392,9 +2392,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v10, v8, 14, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2427,9 +2427,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v11, v8, -16, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2462,9 +2462,9 @@ entry: define @intrinsic_vmsltu_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v13, v8, -14, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll index 1e21b847ed20d..ed41a18dcc8d3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll @@ -34,8 +34,8 @@ declare @llvm.riscv.vmsne.mask.nxv1i8( define @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -85,8 +85,8 @@ declare @llvm.riscv.vmsne.mask.nxv2i8( define @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -136,8 +136,8 @@ declare @llvm.riscv.vmsne.mask.nxv4i8( define @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -187,8 +187,8 @@ declare @llvm.riscv.vmsne.mask.nxv8i8( define @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -238,8 +238,8 @@ declare @llvm.riscv.vmsne.mask.nxv16i8( define @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -289,8 +289,8 @@ declare @llvm.riscv.vmsne.mask.nxv32i8( define @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -340,8 +340,8 @@ declare @llvm.riscv.vmsne.mask.nxv1i16( define @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -391,8 +391,8 @@ declare @llvm.riscv.vmsne.mask.nxv2i16( define @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -442,8 +442,8 @@ declare @llvm.riscv.vmsne.mask.nxv4i16( define @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -493,8 +493,8 @@ declare @llvm.riscv.vmsne.mask.nxv8i16( define @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -544,8 +544,8 @@ declare @llvm.riscv.vmsne.mask.nxv16i16( define @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -595,8 +595,8 @@ declare @llvm.riscv.vmsne.mask.nxv1i32( define @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 @@ -646,8 +646,8 @@ declare @llvm.riscv.vmsne.mask.nxv2i32( define @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -697,8 +697,8 @@ declare @llvm.riscv.vmsne.mask.nxv4i32( define @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -748,8 +748,8 @@ declare @llvm.riscv.vmsne.mask.nxv8i32( define @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -799,8 +799,8 @@ declare @llvm.riscv.vmsne.mask.nxv1i64( define @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t ; CHECK-NEXT: vmv.v.v v0, v11 @@ -850,8 +850,8 @@ declare @llvm.riscv.vmsne.mask.nxv2i64( define @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t ; CHECK-NEXT: vmv1r.v v0, v14 @@ -901,8 +901,8 @@ declare @llvm.riscv.vmsne.mask.nxv4i64( define @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t ; CHECK-NEXT: vmv1r.v v0, v20 @@ -952,9 +952,9 @@ declare @llvm.riscv.vmsne.mask.nxv1i8.i8( define @intrinsic_vmsne_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -999,9 +999,9 @@ declare @llvm.riscv.vmsne.mask.nxv2i8.i8( define @intrinsic_vmsne_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1046,9 +1046,9 @@ declare @llvm.riscv.vmsne.mask.nxv4i8.i8( define @intrinsic_vmsne_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1093,9 +1093,9 @@ declare @llvm.riscv.vmsne.mask.nxv8i8.i8( define @intrinsic_vmsne_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1140,9 +1140,9 @@ declare @llvm.riscv.vmsne.mask.nxv16i8.i8( define @intrinsic_vmsne_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1187,9 +1187,9 @@ declare @llvm.riscv.vmsne.mask.nxv32i8.i8( define @intrinsic_vmsne_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1234,9 +1234,9 @@ declare @llvm.riscv.vmsne.mask.nxv1i16.i16( define @intrinsic_vmsne_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1281,9 +1281,9 @@ declare @llvm.riscv.vmsne.mask.nxv2i16.i16( define @intrinsic_vmsne_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1328,9 +1328,9 @@ declare @llvm.riscv.vmsne.mask.nxv4i16.i16( define @intrinsic_vmsne_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1375,9 +1375,9 @@ declare @llvm.riscv.vmsne.mask.nxv8i16.i16( define @intrinsic_vmsne_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1422,9 +1422,9 @@ declare @llvm.riscv.vmsne.mask.nxv16i16.i16( define @intrinsic_vmsne_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1469,9 +1469,9 @@ declare @llvm.riscv.vmsne.mask.nxv1i32.i32( define @intrinsic_vmsne_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1516,9 +1516,9 @@ declare @llvm.riscv.vmsne.mask.nxv2i32.i32( define @intrinsic_vmsne_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -1563,9 +1563,9 @@ declare @llvm.riscv.vmsne.mask.nxv4i32.i32( define @intrinsic_vmsne_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -1610,9 +1610,9 @@ declare @llvm.riscv.vmsne.mask.nxv8i32.i32( define @intrinsic_vmsne_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -1684,9 +1684,9 @@ define @intrinsic_vmsne_mask_vx_nxv1i64_i64( ; ; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmv1r.v v10, v0 ; RV64-NEXT: vmv1r.v v0, v9 -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmsne.vx v10, v8, a0, v0.t ; RV64-NEXT: vmv.v.v v0, v10 ; RV64-NEXT: ret @@ -1758,9 +1758,9 @@ define @intrinsic_vmsne_mask_vx_nxv2i64_i64( ; ; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmv1r.v v11, v0 ; RV64-NEXT: vmv1r.v v0, v10 -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmsne.vx v11, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v11 ; RV64-NEXT: ret @@ -1832,9 +1832,9 @@ define @intrinsic_vmsne_mask_vx_nxv4i64_i64( ; ; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64: ; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmv1r.v v13, v0 ; RV64-NEXT: vmv1r.v v0, v12 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmsne.vx v13, v8, a0, v0.t ; RV64-NEXT: vmv1r.v v0, v13 ; RV64-NEXT: ret @@ -1867,9 +1867,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1902,9 +1902,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1937,9 +1937,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1972,9 +1972,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2007,9 +2007,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2042,9 +2042,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2077,9 +2077,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2112,9 +2112,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2147,9 +2147,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2182,9 +2182,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2217,9 +2217,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2252,9 +2252,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -2287,9 +2287,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2322,9 +2322,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2357,9 +2357,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret @@ -2392,9 +2392,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -2427,9 +2427,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: ret @@ -2462,9 +2462,9 @@ entry: define @intrinsic_vmsne_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll index b0a28e6e455b0..4b818a2b1e58f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll @@ -31,9 +31,9 @@ declare @llvm.riscv.vmsof.mask.nxv1i1( define @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmsof.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -73,9 +73,9 @@ declare @llvm.riscv.vmsof.mask.nxv2i1( define @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmsof.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -115,9 +115,9 @@ declare @llvm.riscv.vmsof.mask.nxv4i1( define @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmsof.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -157,9 +157,9 @@ declare @llvm.riscv.vmsof.mask.nxv8i1( define @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsof.m v10, v8, v0.t ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -199,9 +199,9 @@ declare @llvm.riscv.vmsof.mask.nxv16i1( define @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmsof.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -241,9 +241,9 @@ declare @llvm.riscv.vmsof.mask.nxv32i1( define @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmsof.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -283,9 +283,9 @@ declare @llvm.riscv.vmsof.mask.nxv64i1( define @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vmsof.m v10, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll index 7f248a39b54fa..6345b90db23b8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll @@ -49,8 +49,8 @@ define @vadd_same_passthru( %passthru, @unfoldable_diff_avl_unknown( %passthru, %a, %b, iXLen %vl1, iXLen %vl2) { ; CHECK-LABEL: unfoldable_diff_avl_unknown: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vadd.vv v14, v10, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vmv.v.v v8, v14 diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll index f7ca65801dc87..b316f5f878816 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll @@ -5,9 +5,9 @@ define iXLen @bool_vec( %src, %m, i32 %evl) { ; RV32-LABEL: bool_vec: ; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vmv1r.v v9, v0 ; RV32-NEXT: vmv1r.v v0, v8 -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vfirst.m a1, v9, v0.t ; RV32-NEXT: bltz a1, .LBB0_2 ; RV32-NEXT: # %bb.1: @@ -17,6 +17,7 @@ define iXLen @bool_vec( %src, %m, i32 %evl) { ; ; RV64-LABEL: bool_vec: ; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-NEXT: vmv1r.v v9, v0 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 @@ -35,14 +36,15 @@ define iXLen @bool_vec( %src, %m, i32 %evl) { define iXLen @bool_vec_zero_poison( %src, %m, i32 %evl) { ; RV32-LABEL: bool_vec_zero_poison: ; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vmv1r.v v9, v0 ; RV32-NEXT: vmv1r.v v0, v8 -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vfirst.m a0, v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: bool_vec_zero_poison: ; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64-NEXT: vmv1r.v v9, v0 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-select.ll b/llvm/test/CodeGen/RISCV/rvv/vp-select.ll index c8a048971a803..652baf6692341 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-select.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-select.ll @@ -12,6 +12,7 @@ define @all_ones( %true, define @all_zeroes( %true, %false, i32 %evl) { ; CHECK-LABEL: all_zeroes: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv1i64( splat (i1 false), %true, %false, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll index 2a137099bcb0f..745cec4e7c4f6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll @@ -10,9 +10,9 @@ declare <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1>, <16 x i1>, i32, define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_v2i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -34,9 +34,9 @@ define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %ev define <2 x i1> @test_vp_splice_v2i1_negative_offset(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_v2i1_negative_offset: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -58,9 +58,9 @@ define <2 x i1> @test_vp_splice_v2i1_negative_offset(<2 x i1> %va, <2 x i1> %vb, define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_v2i1_masked: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -83,9 +83,9 @@ define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1> define <4 x i1> @test_vp_splice_v4i1(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_v4i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -107,9 +107,9 @@ define <4 x i1> @test_vp_splice_v4i1(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %ev define <4 x i1> @test_vp_splice_v4i1_negative_offset(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_v4i1_negative_offset: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -131,9 +131,9 @@ define <4 x i1> @test_vp_splice_v4i1_negative_offset(<4 x i1> %va, <4 x i1> %vb, define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_v4i1_masked: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -156,9 +156,9 @@ define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1> define <8 x i1> @test_vp_splice_v8i1(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_v8i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -180,9 +180,9 @@ define <8 x i1> @test_vp_splice_v8i1(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %ev define <8 x i1> @test_vp_splice_v8i1_negative_offset(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_v8i1_negative_offset: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -204,9 +204,9 @@ define <8 x i1> @test_vp_splice_v8i1_negative_offset(<8 x i1> %va, <8 x i1> %vb, define <8 x i1> @test_vp_splice_v8i1_masked(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_v8i1_masked: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -229,9 +229,9 @@ define <8 x i1> @test_vp_splice_v8i1_masked(<8 x i1> %va, <8 x i1> %vb, <8 x i1> define <16 x i1> @test_vp_splice_v16i1(<16 x i1> %va, <16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_v16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -253,9 +253,9 @@ define <16 x i1> @test_vp_splice_v16i1(<16 x i1> %va, <16 x i1> %vb, i32 zeroext define <16 x i1> @test_vp_splice_v16i1_negative_offset(<16 x i1> %va, <16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_v16i1_negative_offset: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -277,9 +277,9 @@ define <16 x i1> @test_vp_splice_v16i1_negative_offset(<16 x i1> %va, <16 x i1> define <16 x i1> @test_vp_splice_v16i1_masked(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_v16i1_masked: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll index fc446d0a3a88a..3b0b183537468 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll @@ -13,9 +13,9 @@ declare @llvm.experimental.vp.splice.nxv64i1( @test_vp_splice_nxv1i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv1i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -37,9 +37,9 @@ define @test_vp_splice_nxv1i1( %va, @test_vp_splice_nxv1i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv1i1_negative_offset: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -61,9 +61,9 @@ define @test_vp_splice_nxv1i1_negative_offset( @test_vp_splice_nxv1i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv1i1_masked: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma @@ -86,9 +86,9 @@ define @test_vp_splice_nxv1i1_masked( %va, @test_vp_splice_nxv2i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -110,9 +110,9 @@ define @test_vp_splice_nxv2i1( %va, @test_vp_splice_nxv2i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2i1_negative_offset: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -134,9 +134,9 @@ define @test_vp_splice_nxv2i1_negative_offset( @test_vp_splice_nxv2i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2i1_masked: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma @@ -159,9 +159,9 @@ define @test_vp_splice_nxv2i1_masked( %va, @test_vp_splice_nxv4i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv4i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -183,9 +183,9 @@ define @test_vp_splice_nxv4i1( %va, @test_vp_splice_nxv4i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv4i1_negative_offset: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -207,9 +207,9 @@ define @test_vp_splice_nxv4i1_negative_offset( @test_vp_splice_nxv4i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv4i1_masked: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -232,9 +232,9 @@ define @test_vp_splice_nxv4i1_masked( %va, @test_vp_splice_nxv8i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv8i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -256,9 +256,9 @@ define @test_vp_splice_nxv8i1( %va, @test_vp_splice_nxv8i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv8i1_negative_offset: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -280,9 +280,9 @@ define @test_vp_splice_nxv8i1_negative_offset( @test_vp_splice_nxv8i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv8i1_masked: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -305,9 +305,9 @@ define @test_vp_splice_nxv8i1_masked( %va, @test_vp_splice_nxv16i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -329,9 +329,9 @@ define @test_vp_splice_nxv16i1( %va, @test_vp_splice_nxv16i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv16i1_negative_offset: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -353,9 +353,9 @@ define @test_vp_splice_nxv16i1_negative_offset( @test_vp_splice_nxv16i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv16i1_masked: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma @@ -379,9 +379,9 @@ define @test_vp_splice_nxv16i1_masked( %va, define @test_vp_splice_nxv32i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv32i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -403,9 +403,9 @@ define @test_vp_splice_nxv32i1( %va, @test_vp_splice_nxv32i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv32i1_negative_offset: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -427,9 +427,9 @@ define @test_vp_splice_nxv32i1_negative_offset( @test_vp_splice_nxv32i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv32i1_masked: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma @@ -453,9 +453,9 @@ define @test_vp_splice_nxv32i1_masked( %va, define @test_vp_splice_nxv64i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv64i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -477,9 +477,9 @@ define @test_vp_splice_nxv64i1( %va, @test_vp_splice_nxv64i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv64i1_negative_offset: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma @@ -501,9 +501,9 @@ define @test_vp_splice_nxv64i1_negative_offset( @test_vp_splice_nxv64i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv64i1_masked: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll index 3e423c8ec9903..ca52ce6e2c4a1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll @@ -258,12 +258,12 @@ declare @llvm.vp.gather.nxv32i8.nxv32p0(, define @vpgather_baseidx_nxv32i8(ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv32i8: ; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; RV32-NEXT: vmv1r.v v12, v0 ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a2, a3, 1 ; RV32-NEXT: srli a3, a3, 2 ; RV32-NEXT: sub a4, a1, a2 -; RV32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma ; RV32-NEXT: vslidedown.vx v0, v0, a3 ; RV32-NEXT: sltu a3, a1, a4 ; RV32-NEXT: addi a3, a3, -1 @@ -285,12 +285,12 @@ define @vpgather_baseidx_nxv32i8(ptr %base, @llvm.vp.gather.nxv16f64.nxv16p0( @vpgather_nxv16f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv16f64: ; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV32-NEXT: vmv1r.v v24, v0 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: sub a2, a0, a1 ; RV32-NEXT: srli a3, a1, 3 -; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vx v0, v0, a3 ; RV32-NEXT: sltu a3, a0, a2 ; RV32-NEXT: addi a3, a3, -1 @@ -2480,11 +2480,11 @@ define @vpgather_nxv16f64( %ptrs, @vpgather_nxv16f64( %ptrs, @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv16i16_nxv16f64: ; RV32: # %bb.0: -; RV32-NEXT: vmv1r.v v12, v0 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma +; RV32-NEXT: vmv1r.v v12, v0 ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vsll.vi v24, v16, 3 @@ -2531,8 +2531,8 @@ define @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64: ; RV32: # %bb.0: -; RV32-NEXT: vmv1r.v v12, v0 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma +; RV32-NEXT: vmv1r.v v12, v0 ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vsll.vi v24, v16, 3 @@ -2589,8 +2589,8 @@ define @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base ; ; RV64-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64: ; RV64: # %bb.0: -; RV64-NEXT: vmv1r.v v12, v0 ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v12, v0 ; RV64-NEXT: vsext.vf4 v16, v10 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vsll.vi v16, v16, 3 @@ -2623,8 +2623,8 @@ define @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base define @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64: ; RV32: # %bb.0: -; RV32-NEXT: vmv1r.v v12, v0 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma +; RV32-NEXT: vmv1r.v v12, v0 ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vsll.vi v24, v16, 3 @@ -2648,8 +2648,8 @@ define @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base ; ; RV64-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64: ; RV64: # %bb.0: -; RV64-NEXT: vmv1r.v v12, v0 ; RV64-NEXT: vsetvli a2, zero, e32, m8, ta, ma +; RV64-NEXT: vmv1r.v v12, v0 ; RV64-NEXT: vzext.vf2 v16, v8 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vsll.vi v24, v16, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll index bd7ea6c19d0b3..0844180e49612 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll @@ -522,12 +522,12 @@ declare @llvm.vp.load.nxv16f64.p0(ptr, define @vpload_nxv16f64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv16f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v8, v0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: sub a3, a1, a2 ; CHECK-NEXT: slli a4, a2, 3 ; CHECK-NEXT: srli a5, a2, 3 -; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a5 ; CHECK-NEXT: sltu a5, a1, a3 ; CHECK-NEXT: addi a5, a5, -1 @@ -561,6 +561,7 @@ declare @llvm.vector.extract.nxv16f64( @vpload_nxv17f64(ptr %ptr, ptr %out, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv17f64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v8, v0 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: slli a5, a3, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll index f029d0b1b01bc..88a8ebcc90054 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll @@ -361,12 +361,12 @@ define @vpmerge_vv_nxv128i8( %va, @vpmerge_vv_nxv128i8( %va, @vpmerge_vx_nxv128i8(i8 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -429,8 +429,8 @@ define @vpmerge_vx_nxv128i8(i8 %a, %vb, define @vpmerge_vi_nxv128i8( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll index 8978dc268d4e5..7e7da529bf3d7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll @@ -468,6 +468,7 @@ define void @vpstore_nxv17f64( %val, ptr %ptr, , , i define zeroext i1 @vpreduce_or_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -39,9 +39,9 @@ declare i1 @llvm.vp.reduce.xor.nxv1i1(i1, , , define zeroext i1 @vpreduce_xor_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -71,9 +71,9 @@ declare i1 @llvm.vp.reduce.or.nxv2i1(i1, , , i define zeroext i1 @vpreduce_or_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -87,9 +87,9 @@ declare i1 @llvm.vp.reduce.xor.nxv2i1(i1, , , define zeroext i1 @vpreduce_xor_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -119,9 +119,9 @@ declare i1 @llvm.vp.reduce.or.nxv4i1(i1, , , i define zeroext i1 @vpreduce_or_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -135,9 +135,9 @@ declare i1 @llvm.vp.reduce.xor.nxv4i1(i1, , , define zeroext i1 @vpreduce_xor_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -167,9 +167,9 @@ declare i1 @llvm.vp.reduce.or.nxv8i1(i1, , , i define zeroext i1 @vpreduce_or_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv8i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -183,9 +183,9 @@ declare i1 @llvm.vp.reduce.xor.nxv8i1(i1, , , define zeroext i1 @vpreduce_xor_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv8i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -215,9 +215,9 @@ declare i1 @llvm.vp.reduce.or.nxv16i1(i1, , define zeroext i1 @vpreduce_or_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -231,9 +231,9 @@ declare i1 @llvm.vp.reduce.xor.nxv16i1(i1, , %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -263,9 +263,9 @@ declare i1 @llvm.vp.reduce.or.nxv32i1(i1, , define zeroext i1 @vpreduce_or_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv32i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -279,9 +279,9 @@ declare i1 @llvm.vp.reduce.xor.nxv32i1(i1, , %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv32i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -295,9 +295,9 @@ declare i1 @llvm.vp.reduce.or.nxv40i1(i1, , define zeroext i1 @vpreduce_or_nxv40i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv40i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -327,9 +327,9 @@ declare i1 @llvm.vp.reduce.or.nxv64i1(i1, , define zeroext i1 @vpreduce_or_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv64i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -343,9 +343,9 @@ declare i1 @llvm.vp.reduce.xor.nxv64i1(i1, , %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv64i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -359,6 +359,7 @@ declare i1 @llvm.vp.reduce.or.nxv128i1(i1, , %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv128i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 @@ -390,9 +391,9 @@ declare i1 @llvm.vp.reduce.add.nxv1i1(i1, , , define zeroext i1 @vpreduce_add_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -406,9 +407,9 @@ declare i1 @llvm.vp.reduce.add.nxv2i1(i1, , , define zeroext i1 @vpreduce_add_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -422,9 +423,9 @@ declare i1 @llvm.vp.reduce.add.nxv4i1(i1, , , define zeroext i1 @vpreduce_add_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -438,9 +439,9 @@ declare i1 @llvm.vp.reduce.add.nxv8i1(i1, , , define zeroext i1 @vpreduce_add_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv8i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -454,9 +455,9 @@ declare i1 @llvm.vp.reduce.add.nxv16i1(i1, , %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -470,9 +471,9 @@ declare i1 @llvm.vp.reduce.add.nxv32i1(i1, , %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv32i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -486,9 +487,9 @@ declare i1 @llvm.vp.reduce.add.nxv64i1(i1, , %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv64i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: xor a0, a1, a0 @@ -615,9 +616,9 @@ declare i1 @llvm.vp.reduce.smin.nxv1i1(i1, , , define zeroext i1 @vpreduce_smin_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -631,9 +632,9 @@ declare i1 @llvm.vp.reduce.smin.nxv2i1(i1, , , define zeroext i1 @vpreduce_smin_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -647,9 +648,9 @@ declare i1 @llvm.vp.reduce.smin.nxv4i1(i1, , , define zeroext i1 @vpreduce_smin_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -663,9 +664,9 @@ declare i1 @llvm.vp.reduce.smin.nxv8i1(i1, , , define zeroext i1 @vpreduce_smin_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv8i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -679,9 +680,9 @@ declare i1 @llvm.vp.reduce.smin.nxv16i1(i1, , %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -695,9 +696,9 @@ declare i1 @llvm.vp.reduce.smin.nxv32i1(i1, , %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv32i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -711,9 +712,9 @@ declare i1 @llvm.vp.reduce.smin.nxv64i1(i1, , %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv64i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -727,9 +728,9 @@ declare i1 @llvm.vp.reduce.umax.nxv1i1(i1, , , define zeroext i1 @vpreduce_umax_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv1i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -743,9 +744,9 @@ declare i1 @llvm.vp.reduce.umax.nxv2i1(i1, , , define zeroext i1 @vpreduce_umax_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv2i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -759,9 +760,9 @@ declare i1 @llvm.vp.reduce.umax.nxv4i1(i1, , , define zeroext i1 @vpreduce_umax_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv4i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -775,9 +776,9 @@ declare i1 @llvm.vp.reduce.umax.nxv8i1(i1, , , define zeroext i1 @vpreduce_umax_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv8i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -791,9 +792,9 @@ declare i1 @llvm.vp.reduce.umax.nxv16i1(i1, , %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv16i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -807,9 +808,9 @@ declare i1 @llvm.vp.reduce.umax.nxv32i1(i1, , %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv32i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 @@ -823,9 +824,9 @@ declare i1 @llvm.vp.reduce.umax.nxv64i1(i1, , %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv64i1: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll index 1779fc12095e8..7b460f2c058f8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll @@ -22,8 +22,8 @@ define internal void @foo( %v15, %0, This Inner Loop Header: Depth=1 ; NOSUBREG-NEXT: vl1r.v v9, (zero) -; NOSUBREG-NEXT: vmv1r.v v13, v12 ; NOSUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma +; NOSUBREG-NEXT: vmv1r.v v13, v12 ; NOSUBREG-NEXT: vrgatherei16.vv v13, v9, v10 ; NOSUBREG-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; NOSUBREG-NEXT: vand.vv v9, v8, v13 @@ -42,8 +42,8 @@ define internal void @foo( %v15, %0, This Inner Loop Header: Depth=1 ; SUBREG-NEXT: vl1r.v v9, (zero) -; SUBREG-NEXT: vmv1r.v v13, v12 ; SUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma +; SUBREG-NEXT: vmv1r.v v13, v12 ; SUBREG-NEXT: vrgatherei16.vv v13, v9, v10 ; SUBREG-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; SUBREG-NEXT: vand.vv v9, v8, v13 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll index 12c439346e356..3421c6af334bc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll @@ -572,8 +572,8 @@ declare @llvm.vp.sadd.sat.nxv128i8(, @vsadd_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vi_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -1350,11 +1350,11 @@ declare @llvm.vp.sadd.sat.nxv32i32(, @vsadd_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vi_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll index d962f703abfd2..180e0799044e8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll @@ -571,8 +571,8 @@ declare @llvm.vp.uadd.sat.nxv128i8(, @vsaddu_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vi_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -1349,11 +1349,11 @@ declare @llvm.vp.uadd.sat.nxv32i32(, @vsaddu_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vi_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll index a63d14e8b6c04..b793531e34069 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll @@ -126,6 +126,7 @@ define @vmerge_truelhs_nxv8bf16_0( %v define @vmerge_falselhs_nxv8bf16_0( %va, %vb) { ; CHECK-LABEL: vmerge_falselhs_nxv8bf16_0: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %vc = select zeroinitializer, %va, %vb diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll index 1fc33dc73a27d..be2fc6955294d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll @@ -175,6 +175,7 @@ define @vmerge_truelhs_nxv8f16_0( %va, @vmerge_falselhs_nxv8f16_0( %va, %vb) { ; CHECK-LABEL: vmerge_falselhs_nxv8f16_0: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %vc = select zeroinitializer, %va, %vb diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll index 9cafa28eb429f..4ec9e344e6278 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll @@ -803,6 +803,7 @@ define @vmerge_truelhs_nxv8i64_0( %va, @vmerge_falselhs_nxv8i64_0( %va, %vb) { ; CHECK-LABEL: vmerge_falselhs_nxv8i64_0: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret %vc = select zeroinitializer, %va, %vb diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll index bb51f0592dc17..d4ebe27420d7b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll @@ -362,6 +362,7 @@ define @select_nxv32i32( %a, @select_nxv32i32( %a, @select_evl_nxv32i32( %a, @select_evl_nxv32i32( %a, @select_nxv16f64( %a, @select_nxv16f64( %a, @select_cond_x_cond( %x, @select_undef_T_F( %x, %y, i32 zeroext %evl) { ; CHECK-LABEL: select_undef_T_F: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret %a = call @llvm.vp.select.nxv2i1( poison, %x, %y, i32 %evl) @@ -852,6 +853,7 @@ define @select_undef_undef_F( %x, i32 zeroext define @select_unknown_undef_F( %x, %y, i32 zeroext %evl) { ; CHECK-LABEL: select_unknown_undef_F: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret %a = call @llvm.vp.select.nxv2i1( %x, undef, %y, i32 %evl) @@ -861,6 +863,7 @@ define @select_unknown_undef_F( %x, @select_unknown_T_undef( %x, %y, i32 zeroext %evl) { ; CHECK-LABEL: select_unknown_T_undef: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret %a = call @llvm.vp.select.nxv2i1( %x, %y, poison, i32 %evl) @@ -870,6 +873,7 @@ define @select_unknown_T_undef( %x, @select_false_T_F( %x, %y, %z, i32 zeroext %evl) { ; CHECK-LABEL: select_false_T_F: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret %a = call @llvm.vp.select.nxv2i1( zeroinitializer, %y, %z, i32 %evl) @@ -879,6 +883,7 @@ define @select_false_T_F( %x, @select_unknown_T_T( %x, %y, i32 zeroext %evl) { ; CHECK-LABEL: select_unknown_T_T: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret %a = call @llvm.vp.select.nxv2i1( %x, %y, %y, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll index 33acfb7dceb94..d8620d0658261 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll @@ -18,11 +18,11 @@ declare @llvm.riscv.vle.mask.nxv1i64( define <2 x double> @fixed_length(<2 x double> %a, <2 x double> %b) nounwind { ; CHECK-LABEL: fixed_length: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: # kill: def $v11 killed $v10 ; CHECK-NEXT: # kill: def $v9 killed $v8 ; CHECK-NEXT: # implicit-def: $v9 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v9, v8, v10 ; CHECK-NEXT: # implicit-def: $v8 ; CHECK-NEXT: vfadd.vv v8, v9, v10 @@ -36,9 +36,9 @@ entry: define @scalable( %a, %b) nounwind { ; CHECK-LABEL: scalable: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: # implicit-def: $v9 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v9, v8, v10 ; CHECK-NEXT: # implicit-def: $v8 ; CHECK-NEXT: vfadd.vv v8, v9, v10 @@ -53,8 +53,8 @@ entry: define @intrinsic_same_vlmax( %a, %b) nounwind { ; CHECK-LABEL: intrinsic_same_vlmax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: # implicit-def: $v9 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfadd.vv v9, v8, v10 @@ -81,8 +81,8 @@ entry: define @intrinsic_same_avl_imm( %a, %b) nounwind { ; CHECK-LABEL: intrinsic_same_avl_imm: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vsetivli a0, 2, e32, mf2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: # implicit-def: $v9 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfadd.vv v9, v8, v10 @@ -108,6 +108,7 @@ entry: define @intrinsic_same_avl_reg(i64 %avl, %a, %b) nounwind { ; CHECK-LABEL: intrinsic_same_avl_reg: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma ; CHECK-NEXT: # implicit-def: $v9 @@ -135,6 +136,7 @@ entry: define @intrinsic_diff_avl_reg(i64 %avl, i64 %avl2, %a, %b) nounwind { ; CHECK-LABEL: intrinsic_diff_avl_reg: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma ; CHECK-NEXT: # implicit-def: $v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir index bf93f5cc1f6f2..55cefbbea81b2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -835,8 +835,8 @@ body: | ; CHECK-NEXT: PseudoBR %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3: + ; CHECK-NEXT: dead [[PseudoVSETVLIX0_1:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v0 = COPY %mask - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: PseudoVSOXEI64_V_M1_MF8_MASK [[COPY]], %b, %idxs, $v0, -1, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoRET bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll index b0cb6bc6125dd..8b48dc43eca29 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -377,8 +377,8 @@ entry: define @test19( %a, double %b) nounwind { ; CHECK-LABEL: test19: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e64, m8, tu, ma ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v9, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll index d3b905ef897b1..3c91131fe4d12 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll @@ -151,11 +151,11 @@ declare @llvm.vp.sext.nxv32i32.nxv32i8(, < define @vsext_nxv32i8_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv32i8_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll index 581cc666b6cbd..44d3ee96f5e61 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll @@ -508,11 +508,11 @@ declare @llvm.vp.sitofp.nxv32f16.nxv32i32( @vsitofp_nxv32f16_nxv32i32( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_nxv32f16_nxv32i32: ; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; ZVFH-NEXT: vmv1r.v v24, v0 ; ZVFH-NEXT: csrr a1, vlenb ; ZVFH-NEXT: srli a2, a1, 2 ; ZVFH-NEXT: slli a1, a1, 1 -; ZVFH-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; ZVFH-NEXT: vslidedown.vx v0, v0, a2 ; ZVFH-NEXT: sub a2, a0, a1 ; ZVFH-NEXT: sltu a3, a0, a2 @@ -532,11 +532,11 @@ define @vsitofp_nxv32f16_nxv32i32( %va, ; ; ZVFHMIN-LABEL: vsitofp_nxv32f16_nxv32i32: ; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; ZVFHMIN-NEXT: vmv1r.v v7, v0 ; ZVFHMIN-NEXT: csrr a1, vlenb ; ZVFHMIN-NEXT: srli a2, a1, 2 ; ZVFHMIN-NEXT: slli a1, a1, 1 -; ZVFHMIN-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2 ; ZVFHMIN-NEXT: sub a2, a0, a1 ; ZVFHMIN-NEXT: sltu a3, a0, a2 @@ -566,11 +566,11 @@ declare @llvm.vp.sitofp.nxv32f32.nxv32i32( @vsitofp_nxv32f32_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv32f32_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll index f9c24eeec31c5..7ee6ea9e19df0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll @@ -590,8 +590,8 @@ declare @llvm.vp.ssub.sat.nxv128i8(, @vssub_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vi_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -1392,11 +1392,11 @@ declare @llvm.vp.ssub.sat.nxv32i32(, @vssub_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vi_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll index 04a1b522a8a33..7674a457ca961 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll @@ -588,8 +588,8 @@ declare @llvm.vp.usub.sat.nxv128i8(, @vssubu_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vi_nxv128i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -1390,11 +1390,11 @@ declare @llvm.vp.usub.sat.nxv32i32(, @vssubu_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vi_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll index e62b7a0039638..fd5bf4ebcede8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll @@ -157,11 +157,11 @@ declare @llvm.vp.trunc.nxv15i16.nxv15i64( define @vtrunc_nxv15i16_nxv15i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sltu a2, a0, a3 ; CHECK-NEXT: addi a2, a2, -1 @@ -214,11 +214,11 @@ declare @llvm.vp.trunc.nxv32i7.nxv32i32(, define @vtrunc_nxv32i7_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv32i7_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 @@ -248,11 +248,11 @@ declare @llvm.vp.trunc.nxv32i8.nxv32i32(, define @vtrunc_nxv32i8_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv32i8_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 @@ -288,6 +288,7 @@ define @vtrunc_nxv32i64_nxv32i32( %a, @vtrunc_nxv32i64_nxv32i32( %a, @llvm.vp.uitofp.nxv32f16.nxv32i32( @vuitofp_nxv32f16_nxv32i32( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_nxv32f16_nxv32i32: ; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; ZVFH-NEXT: vmv1r.v v24, v0 ; ZVFH-NEXT: csrr a1, vlenb ; ZVFH-NEXT: srli a2, a1, 2 ; ZVFH-NEXT: slli a1, a1, 1 -; ZVFH-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; ZVFH-NEXT: vslidedown.vx v0, v0, a2 ; ZVFH-NEXT: sub a2, a0, a1 ; ZVFH-NEXT: sltu a3, a0, a2 @@ -524,11 +524,11 @@ define @vuitofp_nxv32f16_nxv32i32( %va, ; ; ZVFHMIN-LABEL: vuitofp_nxv32f16_nxv32i32: ; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; ZVFHMIN-NEXT: vmv1r.v v7, v0 ; ZVFHMIN-NEXT: csrr a1, vlenb ; ZVFHMIN-NEXT: srli a2, a1, 2 ; ZVFHMIN-NEXT: slli a1, a1, 1 -; ZVFHMIN-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2 ; ZVFHMIN-NEXT: sub a2, a0, a1 ; ZVFHMIN-NEXT: sltu a3, a0, a2 @@ -558,11 +558,11 @@ declare @llvm.vp.uitofp.nxv32f32.nxv32i32( @vuitofp_nxv32f32_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv32f32_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll index 10e655c844540..934d7eb43ac2a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll @@ -151,11 +151,11 @@ declare @llvm.vp.zext.nxv32i32.nxv32i8(, < define @vzext_nxv32i8_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv32i8_nxv32i32: ; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 2 ; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-NEXT: sub a2, a0, a1 ; CHECK-NEXT: sltu a3, a0, a2