From c201c782d66df975d38a90e6a56db1af9cecc774 Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Wed, 2 Jul 2025 15:57:48 +0100 Subject: [PATCH 1/2] Precommit tests --- llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll | 88 ++++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll index 1cbb980aebffc..9b0a48ebfb585 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll @@ -3434,6 +3434,94 @@ define @vid.v( %c, iXLen %vl) { ret %2 } +define @vslide1up_vx( %a, iXLen %b, iXLen %vl) { +; NOVLOPT-LABEL: vslide1up_vx: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma +; NOVLOPT-NEXT: vslide1up.vx v10, v8, a0 +; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; NOVLOPT-NEXT: vadd.vv v8, v10, v10 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vslide1up_vx: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma +; VLOPT-NEXT: vslide1up.vx v10, v8, a0 +; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; VLOPT-NEXT: vadd.vv v8, v10, v10 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vslide1up( poison, %a, iXLen %b, iXLen -1) + %2 = call @llvm.riscv.vadd( poison, %1, %1, iXLen %vl) + ret %2 +} + +define @vfslide1up_vf( %a, float %b, iXLen %vl) { +; NOVLOPT-LABEL: vfslide1up_vf: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; NOVLOPT-NEXT: vfslide1up.vf v10, v8, fa0 +; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; NOVLOPT-NEXT: vfadd.vv v8, v10, v10 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vfslide1up_vf: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; VLOPT-NEXT: vfslide1up.vf v10, v8, fa0 +; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; VLOPT-NEXT: vfadd.vv v8, v10, v10 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vfslide1up( poison, %a, float %b, iXLen -1) + %2 = call @llvm.riscv.vfadd( poison, %1, %1, iXLen 7, iXLen %vl) + ret %2 +} + +; Negative test – not safe to reduce vl + +define @vslide1down_vx( %a, iXLen %b, iXLen %vl) { +; NOVLOPT-LABEL: vslide1down_vx: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma +; NOVLOPT-NEXT: vslide1down.vx v8, v8, a0 +; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; NOVLOPT-NEXT: vadd.vv v8, v8, v8 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vslide1down_vx: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma +; VLOPT-NEXT: vslide1down.vx v8, v8, a0 +; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; VLOPT-NEXT: vadd.vv v8, v8, v8 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vslide1down( poison, %a, iXLen %b, iXLen -1) + %2 = call @llvm.riscv.vadd( poison, %1, %1, iXLen %vl) + ret %2 +} + +; Negative test – not safe to reduce vl + +define @vfslide1down_vf( %a, float %b, iXLen %vl) { +; NOVLOPT-LABEL: vfslide1down_vf: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; NOVLOPT-NEXT: vfslide1down.vf v8, v8, fa0 +; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; NOVLOPT-NEXT: vfadd.vv v8, v8, v8 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vfslide1down_vf: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; VLOPT-NEXT: vfslide1down.vf v8, v8, fa0 +; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; VLOPT-NEXT: vfadd.vv v8, v8, v8 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vfslide1down( poison, %a, float %b, iXLen -1) + %2 = call @llvm.riscv.vfadd( poison, %1, %1, iXLen 7, iXLen %vl) + ret %2 +} + define @vfadd_vv( %a, %b, iXLen %vl) { ; NOVLOPT-LABEL: vfadd_vv: ; NOVLOPT: # %bb.0: From 573fa2f3f8495ba52d48d82e67f0e105d10c29a0 Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Wed, 2 Jul 2025 16:00:14 +0100 Subject: [PATCH 2/2] [RISCV][VLOPT] Support v[f]slide1up.v{x,f} Similarly to #146710, for vslide1ups vl only determines the destination elements written to so we can safely reduce their AVL. We cannot do this for vslide1downs as the vl determines which lane the new element is to be inserted in, so some negative tests have been added. --- llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 3 +++ llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll | 6 ++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp index d7aa85048eeda..d030dd1f97fb4 100644 --- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp +++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp @@ -1040,6 +1040,9 @@ static bool isSupportedInstr(const MachineInstr &MI) { case RISCV::VMSOF_M: case RISCV::VIOTA_M: case RISCV::VID_V: + // Vector Slide Instructions + case RISCV::VSLIDE1UP_VX: + case RISCV::VFSLIDE1UP_VF: // Vector Single-Width Floating-Point Add/Subtract Instructions case RISCV::VFADD_VF: case RISCV::VFADD_VV: diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll index 9b0a48ebfb585..8d8f78989945c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll @@ -3445,9 +3445,8 @@ define @vslide1up_vx( %a, iXLen %b, iXLen % ; ; VLOPT-LABEL: vslide1up_vx: ; VLOPT: # %bb.0: -; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma -; VLOPT-NEXT: vslide1up.vx v10, v8, a0 ; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; VLOPT-NEXT: vslide1up.vx v10, v8, a0 ; VLOPT-NEXT: vadd.vv v8, v10, v10 ; VLOPT-NEXT: ret %1 = call @llvm.riscv.vslide1up( poison, %a, iXLen %b, iXLen -1) @@ -3466,9 +3465,8 @@ define @vfslide1up_vf( %a, float %b, iX ; ; VLOPT-LABEL: vfslide1up_vf: ; VLOPT: # %bb.0: -; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; VLOPT-NEXT: vfslide1up.vf v10, v8, fa0 ; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; VLOPT-NEXT: vfslide1up.vf v10, v8, fa0 ; VLOPT-NEXT: vfadd.vv v8, v10, v10 ; VLOPT-NEXT: ret %1 = call @llvm.riscv.vfslide1up( poison, %a, float %b, iXLen -1)