diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp index 52893728853e3..0df4c451894be 100644 --- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp +++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp @@ -563,8 +563,12 @@ static bool isSupportedInstr(const MachineInstr &MI) { // Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions // FIXME: Add support // Vector Narrowing Integer Right Shift Instructions - // FIXME: Add support + case RISCV::VNSRL_WX: case RISCV::VNSRL_WI: + case RISCV::VNSRL_WV: + case RISCV::VNSRA_WI: + case RISCV::VNSRA_WV: + case RISCV::VNSRA_WX: // Vector Integer Compare Instructions // FIXME: Add support // Vector Integer Min/Max Instructions diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll index 0215e6a80d09a..e13482d23a26f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll @@ -982,6 +982,107 @@ define @vnsrl_wi( %a, %b ret %2 } +define @vnsrl_wx( %a, %b, iXLen %c, iXLen %vl) { +; NOVLOPT-LABEL: vnsrl_wx: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; NOVLOPT-NEXT: vnsrl.wx v11, v8, a0 +; NOVLOPT-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; NOVLOPT-NEXT: vadd.vv v8, v11, v10 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vnsrl_wx: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; VLOPT-NEXT: vnsrl.wx v11, v8, a0 +; VLOPT-NEXT: vadd.vv v8, v11, v10 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( poison, %a, iXLen %c, iXLen -1) + %2 = call @llvm.riscv.vadd.nxv4i16.nxv4i16( poison, %1, %b, iXLen %vl) + ret %2 +} + +define @vnsrl_wv( %a, %b, %c, iXLen %vl) { +; NOVLOPT-LABEL: vnsrl_wv: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; NOVLOPT-NEXT: vnsrl.wv v12, v8, v11 +; NOVLOPT-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; NOVLOPT-NEXT: vadd.vv v8, v12, v10 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vnsrl_wv: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; VLOPT-NEXT: vnsrl.wv v12, v8, v11 +; VLOPT-NEXT: vadd.vv v8, v12, v10 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( poison, %a, %c, iXLen -1) + %2 = call @llvm.riscv.vadd.nxv4i16.nxv4i16( poison, %1, %b, iXLen %vl) + ret %2 +} + +define @vnsra_wi( %a, %b, iXLen %vl) { +; NOVLOPT-LABEL: vnsra_wi: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; NOVLOPT-NEXT: vnsra.wi v11, v8, 5 +; NOVLOPT-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; NOVLOPT-NEXT: vadd.vv v8, v11, v10 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vnsra_wi: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; VLOPT-NEXT: vnsra.wi v11, v8, 5 +; VLOPT-NEXT: vadd.vv v8, v11, v10 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( poison, %a, iXLen 5, iXLen -1) + %2 = call @llvm.riscv.vadd.nxv4i16.nxv4i16( poison, %1, %b, iXLen %vl) + ret %2 +} + +define @vnsra_wx( %a, %b, iXLen %c, iXLen %vl) { +; NOVLOPT-LABEL: vnsra_wx: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; NOVLOPT-NEXT: vnsra.wx v11, v8, a0 +; NOVLOPT-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; NOVLOPT-NEXT: vadd.vv v8, v11, v10 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vnsra_wx: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; VLOPT-NEXT: vnsra.wx v11, v8, a0 +; VLOPT-NEXT: vadd.vv v8, v11, v10 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( poison, %a, iXLen %c, iXLen -1) + %2 = call @llvm.riscv.vadd.nxv4i16.nxv4i16( poison, %1, %b, iXLen %vl) + ret %2 +} + +define @vnsra_wv( %a, %b, %c, iXLen %vl) { +; NOVLOPT-LABEL: vnsra_wv: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; NOVLOPT-NEXT: vnsra.wv v12, v8, v11 +; NOVLOPT-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; NOVLOPT-NEXT: vadd.vv v8, v12, v10 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vnsra_wv: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; VLOPT-NEXT: vnsra.wv v12, v8, v11 +; VLOPT-NEXT: vadd.vv v8, v12, v10 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( poison, %a, %c, iXLen -1) + %2 = call @llvm.riscv.vadd.nxv4i16.nxv4i16( poison, %1, %b, iXLen %vl) + ret %2 +} + + define @vminu_vv( %a, %b, iXLen %vl) { ; NOVLOPT-LABEL: vminu_vv: ; NOVLOPT: # %bb.0: