Skip to content

Commit 737be7d

Browse files
committed
Remove vscale_range(2,1024)
1 parent ee35f21 commit 737be7d

File tree

6 files changed

+268
-110
lines changed

6 files changed

+268
-110
lines changed

llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll

Lines changed: 40 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1393,23 +1393,46 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va, i
13931393

13941394
declare i32 @llvm.vscale.i32()
13951395

1396-
define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m) vscale_range(2,1024) {
1397-
; CHECK-LABEL: vadd_vi_nxv32i32_evl_nx8:
1398-
; CHECK: # %bb.0:
1399-
; CHECK-NEXT: csrr a0, vlenb
1400-
; CHECK-NEXT: srli a1, a0, 2
1401-
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1402-
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
1403-
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
1404-
; CHECK-NEXT: vslidedown.vx v0, v0, a1
1405-
; CHECK-NEXT: slli a1, a0, 1
1406-
; CHECK-NEXT: sub a1, a0, a1
1407-
; CHECK-NEXT: sltu a0, a0, a1
1408-
; CHECK-NEXT: addi a0, a0, -1
1409-
; CHECK-NEXT: and a0, a0, a1
1410-
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1411-
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
1412-
; CHECK-NEXT: ret
1396+
define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m) {
1397+
; RV32-LABEL: vadd_vi_nxv32i32_evl_nx8:
1398+
; RV32: # %bb.0:
1399+
; RV32-NEXT: csrr a0, vlenb
1400+
; RV32-NEXT: srli a1, a0, 2
1401+
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1402+
; RV32-NEXT: vadd.vi v8, v8, -1, v0.t
1403+
; RV32-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
1404+
; RV32-NEXT: vslidedown.vx v0, v0, a1
1405+
; RV32-NEXT: slli a1, a0, 1
1406+
; RV32-NEXT: sub a1, a0, a1
1407+
; RV32-NEXT: sltu a0, a0, a1
1408+
; RV32-NEXT: addi a0, a0, -1
1409+
; RV32-NEXT: and a0, a0, a1
1410+
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1411+
; RV32-NEXT: vadd.vi v16, v16, -1, v0.t
1412+
; RV32-NEXT: ret
1413+
;
1414+
; RV64-LABEL: vadd_vi_nxv32i32_evl_nx8:
1415+
; RV64: # %bb.0:
1416+
; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
1417+
; RV64-NEXT: vmv1r.v v24, v0
1418+
; RV64-NEXT: csrr a0, vlenb
1419+
; RV64-NEXT: srli a2, a0, 2
1420+
; RV64-NEXT: slli a1, a0, 1
1421+
; RV64-NEXT: vslidedown.vx v0, v0, a2
1422+
; RV64-NEXT: sub a2, a0, a1
1423+
; RV64-NEXT: sltu a3, a0, a2
1424+
; RV64-NEXT: addi a3, a3, -1
1425+
; RV64-NEXT: and a2, a3, a2
1426+
; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma
1427+
; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
1428+
; RV64-NEXT: bltu a0, a1, .LBB120_2
1429+
; RV64-NEXT: # %bb.1:
1430+
; RV64-NEXT: mv a0, a1
1431+
; RV64-NEXT: .LBB120_2:
1432+
; RV64-NEXT: vmv1r.v v0, v24
1433+
; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1434+
; RV64-NEXT: vadd.vi v8, v8, -1, v0.t
1435+
; RV64-NEXT: ret
14131436
%evl = call i32 @llvm.vscale.i32()
14141437
%evl0 = mul i32 %evl, 8
14151438
%v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl0)

llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll

Lines changed: 40 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1026,23 +1026,46 @@ define <vscale x 32 x i32> @vmax_vx_nxv32i32_unmasked(<vscale x 32 x i32> %va, i
10261026

10271027
declare i32 @llvm.vscale.i32()
10281028

1029-
define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) vscale_range(2,1024) {
1030-
; CHECK-LABEL: vmax_vx_nxv32i32_evl_nx8:
1031-
; CHECK: # %bb.0:
1032-
; CHECK-NEXT: csrr a1, vlenb
1033-
; CHECK-NEXT: srli a2, a1, 2
1034-
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1035-
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
1036-
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
1037-
; CHECK-NEXT: vslidedown.vx v0, v0, a2
1038-
; CHECK-NEXT: slli a2, a1, 1
1039-
; CHECK-NEXT: sub a2, a1, a2
1040-
; CHECK-NEXT: sltu a1, a1, a2
1041-
; CHECK-NEXT: addi a1, a1, -1
1042-
; CHECK-NEXT: and a1, a1, a2
1043-
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1044-
; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
1045-
; CHECK-NEXT: ret
1029+
define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1030+
; RV32-LABEL: vmax_vx_nxv32i32_evl_nx8:
1031+
; RV32: # %bb.0:
1032+
; RV32-NEXT: csrr a1, vlenb
1033+
; RV32-NEXT: srli a2, a1, 2
1034+
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1035+
; RV32-NEXT: vmax.vx v8, v8, a0, v0.t
1036+
; RV32-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
1037+
; RV32-NEXT: vslidedown.vx v0, v0, a2
1038+
; RV32-NEXT: slli a2, a1, 1
1039+
; RV32-NEXT: sub a2, a1, a2
1040+
; RV32-NEXT: sltu a1, a1, a2
1041+
; RV32-NEXT: addi a1, a1, -1
1042+
; RV32-NEXT: and a1, a1, a2
1043+
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1044+
; RV32-NEXT: vmax.vx v16, v16, a0, v0.t
1045+
; RV32-NEXT: ret
1046+
;
1047+
; RV64-LABEL: vmax_vx_nxv32i32_evl_nx8:
1048+
; RV64: # %bb.0:
1049+
; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
1050+
; RV64-NEXT: vmv1r.v v24, v0
1051+
; RV64-NEXT: csrr a1, vlenb
1052+
; RV64-NEXT: srli a3, a1, 2
1053+
; RV64-NEXT: slli a2, a1, 1
1054+
; RV64-NEXT: vslidedown.vx v0, v0, a3
1055+
; RV64-NEXT: sub a3, a1, a2
1056+
; RV64-NEXT: sltu a4, a1, a3
1057+
; RV64-NEXT: addi a4, a4, -1
1058+
; RV64-NEXT: and a3, a4, a3
1059+
; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
1060+
; RV64-NEXT: vmax.vx v16, v16, a0, v0.t
1061+
; RV64-NEXT: bltu a1, a2, .LBB82_2
1062+
; RV64-NEXT: # %bb.1:
1063+
; RV64-NEXT: mv a1, a2
1064+
; RV64-NEXT: .LBB82_2:
1065+
; RV64-NEXT: vmv1r.v v0, v24
1066+
; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1067+
; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
1068+
; RV64-NEXT: ret
10461069
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
10471070
%vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
10481071
%evl = call i32 @llvm.vscale.i32()

llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll

Lines changed: 40 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1025,23 +1025,46 @@ define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_unmasked(<vscale x 32 x i32> %va,
10251025

10261026
declare i32 @llvm.vscale.i32()
10271027

1028-
define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) vscale_range(2,1024) {
1029-
; CHECK-LABEL: vmaxu_vx_nxv32i32_evl_nx8:
1030-
; CHECK: # %bb.0:
1031-
; CHECK-NEXT: csrr a1, vlenb
1032-
; CHECK-NEXT: srli a2, a1, 2
1033-
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1034-
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
1035-
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
1036-
; CHECK-NEXT: vslidedown.vx v0, v0, a2
1037-
; CHECK-NEXT: slli a2, a1, 1
1038-
; CHECK-NEXT: sub a2, a1, a2
1039-
; CHECK-NEXT: sltu a1, a1, a2
1040-
; CHECK-NEXT: addi a1, a1, -1
1041-
; CHECK-NEXT: and a1, a1, a2
1042-
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1043-
; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
1044-
; CHECK-NEXT: ret
1028+
define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1029+
; RV32-LABEL: vmaxu_vx_nxv32i32_evl_nx8:
1030+
; RV32: # %bb.0:
1031+
; RV32-NEXT: csrr a1, vlenb
1032+
; RV32-NEXT: srli a2, a1, 2
1033+
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1034+
; RV32-NEXT: vmaxu.vx v8, v8, a0, v0.t
1035+
; RV32-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
1036+
; RV32-NEXT: vslidedown.vx v0, v0, a2
1037+
; RV32-NEXT: slli a2, a1, 1
1038+
; RV32-NEXT: sub a2, a1, a2
1039+
; RV32-NEXT: sltu a1, a1, a2
1040+
; RV32-NEXT: addi a1, a1, -1
1041+
; RV32-NEXT: and a1, a1, a2
1042+
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1043+
; RV32-NEXT: vmaxu.vx v16, v16, a0, v0.t
1044+
; RV32-NEXT: ret
1045+
;
1046+
; RV64-LABEL: vmaxu_vx_nxv32i32_evl_nx8:
1047+
; RV64: # %bb.0:
1048+
; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
1049+
; RV64-NEXT: vmv1r.v v24, v0
1050+
; RV64-NEXT: csrr a1, vlenb
1051+
; RV64-NEXT: srli a3, a1, 2
1052+
; RV64-NEXT: slli a2, a1, 1
1053+
; RV64-NEXT: vslidedown.vx v0, v0, a3
1054+
; RV64-NEXT: sub a3, a1, a2
1055+
; RV64-NEXT: sltu a4, a1, a3
1056+
; RV64-NEXT: addi a4, a4, -1
1057+
; RV64-NEXT: and a3, a4, a3
1058+
; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
1059+
; RV64-NEXT: vmaxu.vx v16, v16, a0, v0.t
1060+
; RV64-NEXT: bltu a1, a2, .LBB82_2
1061+
; RV64-NEXT: # %bb.1:
1062+
; RV64-NEXT: mv a1, a2
1063+
; RV64-NEXT: .LBB82_2:
1064+
; RV64-NEXT: vmv1r.v v0, v24
1065+
; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1066+
; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
1067+
; RV64-NEXT: ret
10451068
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
10461069
%vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
10471070
%evl = call i32 @llvm.vscale.i32()

llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll

Lines changed: 40 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1026,23 +1026,46 @@ define <vscale x 32 x i32> @vmin_vx_nxv32i32_unmasked(<vscale x 32 x i32> %va, i
10261026

10271027
declare i32 @llvm.vscale.i32()
10281028

1029-
define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) vscale_range(2,1024) {
1030-
; CHECK-LABEL: vmin_vx_nxv32i32_evl_nx8:
1031-
; CHECK: # %bb.0:
1032-
; CHECK-NEXT: csrr a1, vlenb
1033-
; CHECK-NEXT: srli a2, a1, 2
1034-
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1035-
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
1036-
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
1037-
; CHECK-NEXT: vslidedown.vx v0, v0, a2
1038-
; CHECK-NEXT: slli a2, a1, 1
1039-
; CHECK-NEXT: sub a2, a1, a2
1040-
; CHECK-NEXT: sltu a1, a1, a2
1041-
; CHECK-NEXT: addi a1, a1, -1
1042-
; CHECK-NEXT: and a1, a1, a2
1043-
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1044-
; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
1045-
; CHECK-NEXT: ret
1029+
define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1030+
; RV32-LABEL: vmin_vx_nxv32i32_evl_nx8:
1031+
; RV32: # %bb.0:
1032+
; RV32-NEXT: csrr a1, vlenb
1033+
; RV32-NEXT: srli a2, a1, 2
1034+
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1035+
; RV32-NEXT: vmin.vx v8, v8, a0, v0.t
1036+
; RV32-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
1037+
; RV32-NEXT: vslidedown.vx v0, v0, a2
1038+
; RV32-NEXT: slli a2, a1, 1
1039+
; RV32-NEXT: sub a2, a1, a2
1040+
; RV32-NEXT: sltu a1, a1, a2
1041+
; RV32-NEXT: addi a1, a1, -1
1042+
; RV32-NEXT: and a1, a1, a2
1043+
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1044+
; RV32-NEXT: vmin.vx v16, v16, a0, v0.t
1045+
; RV32-NEXT: ret
1046+
;
1047+
; RV64-LABEL: vmin_vx_nxv32i32_evl_nx8:
1048+
; RV64: # %bb.0:
1049+
; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
1050+
; RV64-NEXT: vmv1r.v v24, v0
1051+
; RV64-NEXT: csrr a1, vlenb
1052+
; RV64-NEXT: srli a3, a1, 2
1053+
; RV64-NEXT: slli a2, a1, 1
1054+
; RV64-NEXT: vslidedown.vx v0, v0, a3
1055+
; RV64-NEXT: sub a3, a1, a2
1056+
; RV64-NEXT: sltu a4, a1, a3
1057+
; RV64-NEXT: addi a4, a4, -1
1058+
; RV64-NEXT: and a3, a4, a3
1059+
; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
1060+
; RV64-NEXT: vmin.vx v16, v16, a0, v0.t
1061+
; RV64-NEXT: bltu a1, a2, .LBB82_2
1062+
; RV64-NEXT: # %bb.1:
1063+
; RV64-NEXT: mv a1, a2
1064+
; RV64-NEXT: .LBB82_2:
1065+
; RV64-NEXT: vmv1r.v v0, v24
1066+
; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1067+
; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
1068+
; RV64-NEXT: ret
10461069
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
10471070
%vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
10481071
%evl = call i32 @llvm.vscale.i32()

llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll

Lines changed: 40 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1025,23 +1025,46 @@ define <vscale x 32 x i32> @vminu_vx_nxv32i32_unmasked(<vscale x 32 x i32> %va,
10251025

10261026
declare i32 @llvm.vscale.i32()
10271027

1028-
define <vscale x 32 x i32> @vminu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) vscale_range(2,1024) {
1029-
; CHECK-LABEL: vminu_vx_nxv32i32_evl_nx8:
1030-
; CHECK: # %bb.0:
1031-
; CHECK-NEXT: csrr a1, vlenb
1032-
; CHECK-NEXT: srli a2, a1, 2
1033-
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1034-
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
1035-
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
1036-
; CHECK-NEXT: vslidedown.vx v0, v0, a2
1037-
; CHECK-NEXT: slli a2, a1, 1
1038-
; CHECK-NEXT: sub a2, a1, a2
1039-
; CHECK-NEXT: sltu a1, a1, a2
1040-
; CHECK-NEXT: addi a1, a1, -1
1041-
; CHECK-NEXT: and a1, a1, a2
1042-
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1043-
; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t
1044-
; CHECK-NEXT: ret
1028+
define <vscale x 32 x i32> @vminu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1029+
; RV32-LABEL: vminu_vx_nxv32i32_evl_nx8:
1030+
; RV32: # %bb.0:
1031+
; RV32-NEXT: csrr a1, vlenb
1032+
; RV32-NEXT: srli a2, a1, 2
1033+
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1034+
; RV32-NEXT: vminu.vx v8, v8, a0, v0.t
1035+
; RV32-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
1036+
; RV32-NEXT: vslidedown.vx v0, v0, a2
1037+
; RV32-NEXT: slli a2, a1, 1
1038+
; RV32-NEXT: sub a2, a1, a2
1039+
; RV32-NEXT: sltu a1, a1, a2
1040+
; RV32-NEXT: addi a1, a1, -1
1041+
; RV32-NEXT: and a1, a1, a2
1042+
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1043+
; RV32-NEXT: vminu.vx v16, v16, a0, v0.t
1044+
; RV32-NEXT: ret
1045+
;
1046+
; RV64-LABEL: vminu_vx_nxv32i32_evl_nx8:
1047+
; RV64: # %bb.0:
1048+
; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
1049+
; RV64-NEXT: vmv1r.v v24, v0
1050+
; RV64-NEXT: csrr a1, vlenb
1051+
; RV64-NEXT: srli a3, a1, 2
1052+
; RV64-NEXT: slli a2, a1, 1
1053+
; RV64-NEXT: vslidedown.vx v0, v0, a3
1054+
; RV64-NEXT: sub a3, a1, a2
1055+
; RV64-NEXT: sltu a4, a1, a3
1056+
; RV64-NEXT: addi a4, a4, -1
1057+
; RV64-NEXT: and a3, a4, a3
1058+
; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
1059+
; RV64-NEXT: vminu.vx v16, v16, a0, v0.t
1060+
; RV64-NEXT: bltu a1, a2, .LBB82_2
1061+
; RV64-NEXT: # %bb.1:
1062+
; RV64-NEXT: mv a1, a2
1063+
; RV64-NEXT: .LBB82_2:
1064+
; RV64-NEXT: vmv1r.v v0, v24
1065+
; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1066+
; RV64-NEXT: vminu.vx v8, v8, a0, v0.t
1067+
; RV64-NEXT: ret
10451068
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
10461069
%vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
10471070
%evl = call i32 @llvm.vscale.i32()

0 commit comments

Comments
 (0)