Skip to content

Commit d7c7fbd

Browse files
Pre-commit tests for PR adding more instruction to the vlopt pass
1 parent 4b7f380 commit d7c7fbd

File tree

1 file changed

+80
-2
lines changed

1 file changed

+80
-2
lines changed

llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll

Lines changed: 80 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2-
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbb,+zvfbfwma -verify-machineinstrs | FileCheck %s
3-
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb,+zvfbfwma -verify-machineinstrs | FileCheck %s
2+
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbb,+zvbc,+zvfbfwma -verify-machineinstrs | FileCheck %s
3+
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb,+zvbc,+zvfbfwma -verify-machineinstrs | FileCheck %s
44

55
; The purpose of this file is to check the behavior of specific instructions as it relates to the VL optimizer
66

@@ -3435,6 +3435,32 @@ define <vscale x 4 x i32> @vbrev_v(<vscale x 4 x i32> %a, iXLen %vl) {
34353435
ret <vscale x 4 x i32> %2
34363436
}
34373437

3438+
define <vscale x 4 x i32> @vbrev8_v(<vscale x 4 x i32> %a, iXLen %vl) {
3439+
; CHECK-LABEL: vbrev8_v:
3440+
; CHECK: # %bb.0:
3441+
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
3442+
; CHECK-NEXT: vbrev8.v v10, v8
3443+
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3444+
; CHECK-NEXT: vadd.vv v8, v10, v8
3445+
; CHECK-NEXT: ret
3446+
%1 = call <vscale x 4 x i32> @llvm.riscv.vbrev8.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> %a, iXLen -1)
3447+
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
3448+
ret <vscale x 4 x i32> %2
3449+
}
3450+
3451+
define <vscale x 4 x i32> @vrev8_v(<vscale x 4 x i32> %a, iXLen %vl) {
3452+
; CHECK-LABEL: vrev8_v:
3453+
; CHECK: # %bb.0:
3454+
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
3455+
; CHECK-NEXT: vrev8.v v10, v8
3456+
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3457+
; CHECK-NEXT: vadd.vv v8, v10, v8
3458+
; CHECK-NEXT: ret
3459+
%1 = call <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> %a, iXLen -1)
3460+
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
3461+
ret <vscale x 4 x i32> %2
3462+
}
3463+
34383464
define <vscale x 4 x i32> @vclz_v(<vscale x 4 x i32> %a, iXLen %vl) {
34393465
; CHECK-LABEL: vclz_v:
34403466
; CHECK: # %bb.0:
@@ -3530,3 +3556,55 @@ define <vscale x 4 x i32> @vrol_vx(<vscale x 4 x i32> %a, iXLen %b, iXLen %vl) {
35303556
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
35313557
ret <vscale x 4 x i32> %2
35323558
}
3559+
3560+
define <vscale x 2 x i64> @vclmul_vv(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, iXLen %vl) {
3561+
; CHECK-LABEL: vclmul_vv:
3562+
; CHECK: # %bb.0:
3563+
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
3564+
; CHECK-NEXT: vclmul.vv v10, v8, v10
3565+
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
3566+
; CHECK-NEXT: vadd.vv v8, v10, v8
3567+
; CHECK-NEXT: ret
3568+
%1 = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b, iXLen -1)
3569+
%2 = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> %1, <vscale x 2 x i64> %a, iXLen %vl)
3570+
ret <vscale x 2 x i64> %2
3571+
}
3572+
3573+
define <vscale x 2 x i64> @vclmul_vx(<vscale x 2 x i64> %a, i32 %b, iXLen %vl) {
3574+
; CHECK-LABEL: vclmul_vx:
3575+
; CHECK: # %bb.0:
3576+
; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma
3577+
; CHECK-NEXT: vclmul.vx v10, v8, a0
3578+
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3579+
; CHECK-NEXT: vadd.vv v8, v10, v8
3580+
; CHECK-NEXT: ret
3581+
%1 = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.i32(<vscale x 2 x i64> undef, <vscale x 2 x i64> %a, i32 %b, iXLen -1)
3582+
%2 = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> %1, <vscale x 2 x i64> %a, iXLen %vl)
3583+
ret <vscale x 2 x i64> %2
3584+
}
3585+
3586+
define <vscale x 2 x i64> @vclmulh_vv(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, iXLen %vl) {
3587+
; CHECK-LABEL: vclmulh_vv:
3588+
; CHECK: # %bb.0:
3589+
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
3590+
; CHECK-NEXT: vclmulh.vv v10, v8, v10
3591+
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
3592+
; CHECK-NEXT: vadd.vv v8, v10, v8
3593+
; CHECK-NEXT: ret
3594+
%1 = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b, iXLen -1)
3595+
%2 = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> %1, <vscale x 2 x i64> %a, iXLen %vl)
3596+
ret <vscale x 2 x i64> %2
3597+
}
3598+
3599+
define <vscale x 2 x i64> @vclmulh_vx(<vscale x 2 x i64> %a, i32 %b, iXLen %vl) {
3600+
; CHECK-LABEL: vclmulh_vx:
3601+
; CHECK: # %bb.0:
3602+
; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma
3603+
; CHECK-NEXT: vclmulh.vx v10, v8, a0
3604+
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3605+
; CHECK-NEXT: vadd.vv v8, v10, v8
3606+
; CHECK-NEXT: ret
3607+
%1 = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i32(<vscale x 2 x i64> undef, <vscale x 2 x i64> %a, i32 %b, iXLen -1)
3608+
%2 = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> %1, <vscale x 2 x i64> %a, iXLen %vl)
3609+
ret <vscale x 2 x i64> %2
3610+
}

0 commit comments

Comments
 (0)