diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp index 8402a41252bd3..1dd12ab396fd8 100644 --- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp +++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp @@ -1045,7 +1045,8 @@ static bool isSupportedInstr(const MachineInstr &MI) { case RISCV::VSLIDEUP_VI: case RISCV::VSLIDEDOWN_VX: case RISCV::VSLIDEDOWN_VI: - // TODO: Handle v[f]slide1up, but not v[f]slide1down. + case RISCV::VSLIDE1UP_VX: + case RISCV::VFSLIDE1UP_VF: // Vector Single-Width Floating-Point Add/Subtract Instructions case RISCV::VFADD_VF: case RISCV::VFADD_VV: diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll index f84eebe1c6d6a..e6407b322c447 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll @@ -3514,6 +3514,92 @@ define @vslidedown_vi( %a, iXLen %vl) { ret %2 } +define @vslide1up_vx( %a, iXLen %b, iXLen %vl) { +; NOVLOPT-LABEL: vslide1up_vx: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma +; NOVLOPT-NEXT: vslide1up.vx v10, v8, a0 +; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; NOVLOPT-NEXT: vadd.vv v8, v10, v10 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vslide1up_vx: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; VLOPT-NEXT: vslide1up.vx v10, v8, a0 +; VLOPT-NEXT: vadd.vv v8, v10, v10 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vslide1up( poison, %a, iXLen %b, iXLen -1) + %2 = call @llvm.riscv.vadd( poison, %1, %1, iXLen %vl) + ret %2 +} + +define @vfslide1up_vf( %a, float %b, iXLen %vl) { +; NOVLOPT-LABEL: vfslide1up_vf: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; NOVLOPT-NEXT: vfslide1up.vf v10, v8, fa0 +; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; NOVLOPT-NEXT: vfadd.vv v8, v10, v10 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vfslide1up_vf: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; VLOPT-NEXT: vfslide1up.vf v10, v8, fa0 +; VLOPT-NEXT: vfadd.vv v8, v10, v10 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vfslide1up( poison, %a, float %b, iXLen -1) + %2 = call @llvm.riscv.vfadd( poison, %1, %1, iXLen 7, iXLen %vl) + ret %2 +} + +; Negative test – not safe to reduce vl + +define @vslide1down_vx( %a, iXLen %b, iXLen %vl) { +; NOVLOPT-LABEL: vslide1down_vx: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma +; NOVLOPT-NEXT: vslide1down.vx v8, v8, a0 +; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; NOVLOPT-NEXT: vadd.vv v8, v8, v8 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vslide1down_vx: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma +; VLOPT-NEXT: vslide1down.vx v8, v8, a0 +; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; VLOPT-NEXT: vadd.vv v8, v8, v8 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vslide1down( poison, %a, iXLen %b, iXLen -1) + %2 = call @llvm.riscv.vadd( poison, %1, %1, iXLen %vl) + ret %2 +} + +; Negative test – not safe to reduce vl + +define @vfslide1down_vf( %a, float %b, iXLen %vl) { +; NOVLOPT-LABEL: vfslide1down_vf: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; NOVLOPT-NEXT: vfslide1down.vf v8, v8, fa0 +; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; NOVLOPT-NEXT: vfadd.vv v8, v8, v8 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vfslide1down_vf: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; VLOPT-NEXT: vfslide1down.vf v8, v8, fa0 +; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; VLOPT-NEXT: vfadd.vv v8, v8, v8 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vfslide1down( poison, %a, float %b, iXLen -1) + %2 = call @llvm.riscv.vfadd( poison, %1, %1, iXLen 7, iXLen %vl) + ret %2 +} + define @vfadd_vv( %a, %b, iXLen %vl) { ; NOVLOPT-LABEL: vfadd_vv: ; NOVLOPT: # %bb.0: