From 0399987aa4df632b43e27f9a74d663d8b897034b Mon Sep 17 00:00:00 2001 From: Michael Maitland Date: Thu, 9 Jan 2025 11:16:40 -0800 Subject: [PATCH] [RISCV][VLOPT] Add vadcto isSupportedInstr --- llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 4 ++ llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll | 60 ++++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp index 9870279ad17c7..9338e0a1c8741 100644 --- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp +++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp @@ -948,6 +948,10 @@ static bool isSupportedInstr(const MachineInstr &MI) { case RISCV::VMERGE_VIM: case RISCV::VMERGE_VVM: case RISCV::VMERGE_VXM: + // Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions + case RISCV::VADC_VIM: + case RISCV::VADC_VVM: + case RISCV::VADC_VXM: // Vector Widening Integer Multiply-Add Instructions case RISCV::VWMACCU_VV: case RISCV::VWMACCU_VX: diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll index ce79bd5d5ddcf..ce94e1c193645 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll @@ -3596,6 +3596,66 @@ define @vmerge_vim( %a, % ret %2 } +define @vadc_vvm( %a, %b, %c, iXLen %vl) { +; NOVLOPT-LABEL: vadc_vvm: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; NOVLOPT-NEXT: vadc.vvm v8, v8, v10, v0 +; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; NOVLOPT-NEXT: vadd.vv v8, v8, v10 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vadc_vvm: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; VLOPT-NEXT: vadc.vvm v8, v8, v10, v0 +; VLOPT-NEXT: vadd.vv v8, v8, v10 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vadc.nxv4i32.nxv4i32( poison, %a, %b, %c, iXLen -1) + %2 = call @llvm.riscv.vadd.nxv4i32.nxv4i32( poison, %1, %b, iXLen %vl) + ret %2 +} + +define @vadc_vxm( %a, i32 %b, %c, iXLen %vl) { +; NOVLOPT-LABEL: vadc_vxm: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma +; NOVLOPT-NEXT: vadc.vxm v8, v8, a0, v0 +; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; NOVLOPT-NEXT: vadd.vv v8, v8, v8 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vadc_vxm: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; VLOPT-NEXT: vadc.vxm v8, v8, a0, v0 +; VLOPT-NEXT: vadd.vv v8, v8, v8 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vadc.nxv4i32.i32( poison, %a, i32 %b, %c, iXLen -1) + %2 = call @llvm.riscv.vadd.nxv4i32.nxv4i32( poison, %1, %1, iXLen %vl) + ret %2 +} + +define @vadc_vim( %a, %c, iXLen %vl) { +; NOVLOPT-LABEL: vadc_vim: +; NOVLOPT: # %bb.0: +; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; NOVLOPT-NEXT: vadc.vim v8, v8, 9, v0 +; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; NOVLOPT-NEXT: vadd.vv v8, v8, v8 +; NOVLOPT-NEXT: ret +; +; VLOPT-LABEL: vadc_vim: +; VLOPT: # %bb.0: +; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; VLOPT-NEXT: vadc.vim v8, v8, 9, v0 +; VLOPT-NEXT: vadd.vv v8, v8, v8 +; VLOPT-NEXT: ret + %1 = call @llvm.riscv.vadc.nxv4i32.i32( poison, %a, i32 9, %c, iXLen -1) + %2 = call @llvm.riscv.vadd.nxv4i32.nxv4i32( poison, %1, %1, iXLen %vl) + ret %2 +} + define @vaadd_vv( %a, %b, iXLen %vl) { ; NOVLOPT-LABEL: vaadd_vv: ; NOVLOPT: # %bb.0: