Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -497,6 +497,10 @@ getOperandLog2EEW(const MachineOperand &MO, const MachineRegisterInfo *MRI) {
case RISCV::VANDN_VX:
// Vector Reverse Bits in Elements
case RISCV::VBREV_V:
// Vector Reverse Bytes in Bytes
case RISCV::VBREV8_V:
// Vector Reverse Bytes
case RISCV::VREV8_V:
// Vector Count Leading Zeros
case RISCV::VCLZ_V:
// Vector Count Trailing Zeros
Expand All @@ -510,6 +514,13 @@ getOperandLog2EEW(const MachineOperand &MO, const MachineRegisterInfo *MRI) {
case RISCV::VROR_VI:
case RISCV::VROR_VV:
case RISCV::VROR_VX:
// Vector Carry-less Multiplication Instructions (Zvbc)
// Vector Carry-less Multiply
case RISCV::VCLMUL_VV:
case RISCV::VCLMUL_VX:
// Vector Carry-less Multiply Return High Half
case RISCV::VCLMULH_VV:
case RISCV::VCLMULH_VX:
return MILog2SEW;

// Vector Widening Shift Left Logical (Zvbb)
Expand Down Expand Up @@ -1046,6 +1057,10 @@ static bool isSupportedInstr(const MachineInstr &MI) {
case RISCV::VANDN_VX:
// Vector Reverse Bits in Elements
case RISCV::VBREV_V:
// Vector Reverse Bytes in Bytes
case RISCV::VBREV8_V:
// Vector Reverse Bytes
case RISCV::VREV8_V:
// Vector Count Leading Zeros
case RISCV::VCLZ_V:
// Vector Count Trailing Zeros
Expand All @@ -1063,6 +1078,13 @@ static bool isSupportedInstr(const MachineInstr &MI) {
case RISCV::VWSLL_VI:
case RISCV::VWSLL_VX:
case RISCV::VWSLL_VV:
// Vector Carry-less Multiplication Instructions (Zvbc)
// Vector Carry-less Multiply
case RISCV::VCLMUL_VV:
case RISCV::VCLMUL_VX:
// Vector Carry-less Multiply Return High Half
case RISCV::VCLMULH_VV:
case RISCV::VCLMULH_VX:
// Vector Mask Instructions
// Vector Mask-Register Logical Instructions
// vmsbf.m set-before-first mask bit
Expand Down
18 changes: 6 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3438,9 +3438,8 @@ define <vscale x 4 x i32> @vbrev_v(<vscale x 4 x i32> %a, iXLen %vl) {
define <vscale x 4 x i32> @vbrev8_v(<vscale x 4 x i32> %a, iXLen %vl) {
; CHECK-LABEL: vbrev8_v:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; CHECK-NEXT: vbrev8.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vbrev8.v v10, v8
; CHECK-NEXT: vadd.vv v8, v10, v8
; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vbrev8.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> %a, iXLen -1)
Expand All @@ -3451,9 +3450,8 @@ define <vscale x 4 x i32> @vbrev8_v(<vscale x 4 x i32> %a, iXLen %vl) {
define <vscale x 4 x i32> @vrev8_v(<vscale x 4 x i32> %a, iXLen %vl) {
; CHECK-LABEL: vrev8_v:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; CHECK-NEXT: vrev8.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vrev8.v v10, v8
; CHECK-NEXT: vadd.vv v8, v10, v8
; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> %a, iXLen -1)
Expand Down Expand Up @@ -3560,9 +3558,8 @@ define <vscale x 4 x i32> @vrol_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) {
define <vscale x 2 x i64> @vclmul_vv(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, iXLen %vl) {
; CHECK-LABEL: vclmul_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-NEXT: vclmul.vv v10, v8, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vclmul.vv v10, v8, v10
; CHECK-NEXT: vadd.vv v8, v10, v8
; CHECK-NEXT: ret
%1 = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b, iXLen -1)
Expand All @@ -3573,9 +3570,8 @@ define <vscale x 2 x i64> @vclmul_vv(<vscale x 2 x i64> %a, <vscale x 2 x i64> %
define <vscale x 2 x i64> @vclmul_vx(<vscale x 2 x i64> %a, i32 %b, iXLen %vl) {
; CHECK-LABEL: vclmul_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma
; CHECK-NEXT: vclmul.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vclmul.vx v10, v8, a0
; CHECK-NEXT: vadd.vv v8, v10, v8
; CHECK-NEXT: ret
%1 = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.i32(<vscale x 2 x i64> undef, <vscale x 2 x i64> %a, i32 %b, iXLen -1)
Expand All @@ -3586,9 +3582,8 @@ define <vscale x 2 x i64> @vclmul_vx(<vscale x 2 x i64> %a, i32 %b, iXLen %vl) {
define <vscale x 2 x i64> @vclmulh_vv(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, iXLen %vl) {
; CHECK-LABEL: vclmulh_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-NEXT: vclmulh.vv v10, v8, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vclmulh.vv v10, v8, v10
; CHECK-NEXT: vadd.vv v8, v10, v8
; CHECK-NEXT: ret
%1 = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b, iXLen -1)
Expand All @@ -3599,9 +3594,8 @@ define <vscale x 2 x i64> @vclmulh_vv(<vscale x 2 x i64> %a, <vscale x 2 x i64>
define <vscale x 2 x i64> @vclmulh_vx(<vscale x 2 x i64> %a, i32 %b, iXLen %vl) {
; CHECK-LABEL: vclmulh_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma
; CHECK-NEXT: vclmulh.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vclmulh.vx v10, v8, a0
; CHECK-NEXT: vadd.vv v8, v10, v8
; CHECK-NEXT: ret
%1 = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i32(<vscale x 2 x i64> undef, <vscale x 2 x i64> %a, i32 %b, iXLen -1)
Expand Down