Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1040,6 +1040,9 @@ static bool isSupportedInstr(const MachineInstr &MI) {
case RISCV::VMSOF_M:
case RISCV::VIOTA_M:
case RISCV::VID_V:
// Vector Slide Instructions
case RISCV::VSLIDE1UP_VX:
case RISCV::VFSLIDE1UP_VF:
// Vector Single-Width Floating-Point Add/Subtract Instructions
case RISCV::VFADD_VF:
case RISCV::VFADD_VV:
Expand Down
86 changes: 86 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3434,6 +3434,92 @@ define <vscale x 4 x i32> @vid.v(<vscale x 4 x i32> %c, iXLen %vl) {
ret <vscale x 4 x i32> %2
}

define <vscale x 4 x i32> @vslide1up_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) {
; NOVLOPT-LABEL: vslide1up_vx:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vslide1up.vx v10, v8, a0
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; NOVLOPT-NEXT: vadd.vv v8, v10, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vslide1up_vx:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vslide1up.vx v10, v8, a0
; VLOPT-NEXT: vadd.vv v8, v10, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vslide1up(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %b, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl)
ret <vscale x 4 x i32> %2
}

define <vscale x 4 x float> @vfslide1up_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) {
; NOVLOPT-LABEL: vfslide1up_vf:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vfslide1up.vf v10, v8, fa0
; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; NOVLOPT-NEXT: vfadd.vv v8, v10, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vfslide1up_vf:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vfslide1up.vf v10, v8, fa0
; VLOPT-NEXT: vfadd.vv v8, v10, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x float> @llvm.riscv.vfslide1up(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen -1)
%2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %1, iXLen 7, iXLen %vl)
ret <vscale x 4 x float> %2
}

; Negative test – not safe to reduce vl

define <vscale x 4 x i32> @vslide1down_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) {
; NOVLOPT-LABEL: vslide1down_vx:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vslide1down.vx v8, v8, a0
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; NOVLOPT-NEXT: vadd.vv v8, v8, v8
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vslide1down_vx:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; VLOPT-NEXT: vslide1down.vx v8, v8, a0
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vadd.vv v8, v8, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vslide1down(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen %b, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl)
ret <vscale x 4 x i32> %2
}

; Negative test – not safe to reduce vl

define <vscale x 4 x float> @vfslide1down_vf(<vscale x 4 x float> %a, float %b, iXLen %vl) {
; NOVLOPT-LABEL: vfslide1down_vf:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vfslide1down.vf v8, v8, fa0
; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; NOVLOPT-NEXT: vfadd.vv v8, v8, v8
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vfslide1down_vf:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; VLOPT-NEXT: vfslide1down.vf v8, v8, fa0
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vfadd.vv v8, v8, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x float> @llvm.riscv.vfslide1down(<vscale x 4 x float> poison, <vscale x 4 x float> %a, float %b, iXLen -1)
%2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %1, iXLen 7, iXLen %vl)
ret <vscale x 4 x float> %2
}

define <vscale x 4 x float> @vfadd_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) {
; NOVLOPT-LABEL: vfadd_vv:
; NOVLOPT: # %bb.0:
Expand Down
Loading