|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
2 | | -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s |
3 | | -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s |
| 2 | +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V |
| 3 | +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V |
| 4 | +; RUN: llc -mtriple=riscv32 -mattr=+zve32x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVE32 |
| 5 | +; RUN: llc -mtriple=riscv64 -mattr=+zve32x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVE32 |
4 | 6 |
|
5 | 7 | define void @masked_store_nxv1i8(<vscale x 1 x i8> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind { |
6 | | -; CHECK-LABEL: masked_store_nxv1i8: |
7 | | -; CHECK: # %bb.0: |
8 | | -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma |
9 | | -; CHECK-NEXT: vse8.v v8, (a0), v0.t |
10 | | -; CHECK-NEXT: ret |
| 8 | +; V-LABEL: masked_store_nxv1i8: |
| 9 | +; V: # %bb.0: |
| 10 | +; V-NEXT: vsetvli a1, zero, e8, mf8, ta, ma |
| 11 | +; V-NEXT: vse8.v v8, (a0), v0.t |
| 12 | +; V-NEXT: ret |
| 13 | +; |
| 14 | +; ZVE32-LABEL: masked_store_nxv1i8: |
| 15 | +; ZVE32: # %bb.0: |
| 16 | +; ZVE32-NEXT: csrr a1, vlenb |
| 17 | +; ZVE32-NEXT: srli a1, a1, 3 |
| 18 | +; ZVE32-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| 19 | +; ZVE32-NEXT: vse8.v v8, (a0), v0.t |
| 20 | +; ZVE32-NEXT: ret |
11 | 21 | call void @llvm.masked.store.v1i8.p0(<vscale x 1 x i8> %val, ptr %a, i32 1, <vscale x 1 x i1> %mask) |
12 | 22 | ret void |
13 | 23 | } |
14 | 24 | declare void @llvm.masked.store.v1i8.p0(<vscale x 1 x i8>, ptr, i32, <vscale x 1 x i1>) |
15 | 25 |
|
16 | 26 | define void @masked_store_nxv1i16(<vscale x 1 x i16> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind { |
17 | | -; CHECK-LABEL: masked_store_nxv1i16: |
18 | | -; CHECK: # %bb.0: |
19 | | -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
20 | | -; CHECK-NEXT: vse16.v v8, (a0), v0.t |
21 | | -; CHECK-NEXT: ret |
| 27 | +; V-LABEL: masked_store_nxv1i16: |
| 28 | +; V: # %bb.0: |
| 29 | +; V-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| 30 | +; V-NEXT: vse16.v v8, (a0), v0.t |
| 31 | +; V-NEXT: ret |
| 32 | +; |
| 33 | +; ZVE32-LABEL: masked_store_nxv1i16: |
| 34 | +; ZVE32: # %bb.0: |
| 35 | +; ZVE32-NEXT: csrr a1, vlenb |
| 36 | +; ZVE32-NEXT: srli a1, a1, 3 |
| 37 | +; ZVE32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| 38 | +; ZVE32-NEXT: vse16.v v8, (a0), v0.t |
| 39 | +; ZVE32-NEXT: ret |
22 | 40 | call void @llvm.masked.store.v1i16.p0(<vscale x 1 x i16> %val, ptr %a, i32 2, <vscale x 1 x i1> %mask) |
23 | 41 | ret void |
24 | 42 | } |
25 | 43 | declare void @llvm.masked.store.v1i16.p0(<vscale x 1 x i16>, ptr, i32, <vscale x 1 x i1>) |
26 | 44 |
|
27 | 45 | define void @masked_store_nxv1i32(<vscale x 1 x i32> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind { |
28 | | -; CHECK-LABEL: masked_store_nxv1i32: |
29 | | -; CHECK: # %bb.0: |
30 | | -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma |
31 | | -; CHECK-NEXT: vse32.v v8, (a0), v0.t |
32 | | -; CHECK-NEXT: ret |
| 46 | +; V-LABEL: masked_store_nxv1i32: |
| 47 | +; V: # %bb.0: |
| 48 | +; V-NEXT: vsetvli a1, zero, e32, mf2, ta, ma |
| 49 | +; V-NEXT: vse32.v v8, (a0), v0.t |
| 50 | +; V-NEXT: ret |
| 51 | +; |
| 52 | +; ZVE32-LABEL: masked_store_nxv1i32: |
| 53 | +; ZVE32: # %bb.0: |
| 54 | +; ZVE32-NEXT: csrr a1, vlenb |
| 55 | +; ZVE32-NEXT: srli a1, a1, 3 |
| 56 | +; ZVE32-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| 57 | +; ZVE32-NEXT: vse32.v v8, (a0), v0.t |
| 58 | +; ZVE32-NEXT: ret |
33 | 59 | call void @llvm.masked.store.v1i32.p0(<vscale x 1 x i32> %val, ptr %a, i32 4, <vscale x 1 x i1> %mask) |
34 | 60 | ret void |
35 | 61 | } |
36 | 62 | declare void @llvm.masked.store.v1i32.p0(<vscale x 1 x i32>, ptr, i32, <vscale x 1 x i1>) |
37 | 63 |
|
38 | | -define void @masked_store_nxv1i64(<vscale x 1 x i64> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind { |
39 | | -; CHECK-LABEL: masked_store_nxv1i64: |
40 | | -; CHECK: # %bb.0: |
41 | | -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma |
42 | | -; CHECK-NEXT: vse64.v v8, (a0), v0.t |
43 | | -; CHECK-NEXT: ret |
44 | | - call void @llvm.masked.store.v1i64.p0(<vscale x 1 x i64> %val, ptr %a, i32 8, <vscale x 1 x i1> %mask) |
45 | | - ret void |
46 | | -} |
47 | | -declare void @llvm.masked.store.v1i64.p0(<vscale x 1 x i64>, ptr, i32, <vscale x 1 x i1>) |
48 | | - |
49 | 64 | define void @masked_store_nxv2i8(<vscale x 2 x i8> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind { |
50 | 65 | ; CHECK-LABEL: masked_store_nxv2i8: |
51 | 66 | ; CHECK: # %bb.0: |
@@ -79,17 +94,6 @@ define void @masked_store_nxv2i32(<vscale x 2 x i32> %val, ptr %a, <vscale x 2 x |
79 | 94 | } |
80 | 95 | declare void @llvm.masked.store.v2i32.p0(<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>) |
81 | 96 |
|
82 | | -define void @masked_store_nxv2i64(<vscale x 2 x i64> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind { |
83 | | -; CHECK-LABEL: masked_store_nxv2i64: |
84 | | -; CHECK: # %bb.0: |
85 | | -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma |
86 | | -; CHECK-NEXT: vse64.v v8, (a0), v0.t |
87 | | -; CHECK-NEXT: ret |
88 | | - call void @llvm.masked.store.v2i64.p0(<vscale x 2 x i64> %val, ptr %a, i32 8, <vscale x 2 x i1> %mask) |
89 | | - ret void |
90 | | -} |
91 | | -declare void @llvm.masked.store.v2i64.p0(<vscale x 2 x i64>, ptr, i32, <vscale x 2 x i1>) |
92 | | - |
93 | 97 | define void @masked_store_nxv4i8(<vscale x 4 x i8> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind { |
94 | 98 | ; CHECK-LABEL: masked_store_nxv4i8: |
95 | 99 | ; CHECK: # %bb.0: |
@@ -123,17 +127,6 @@ define void @masked_store_nxv4i32(<vscale x 4 x i32> %val, ptr %a, <vscale x 4 x |
123 | 127 | } |
124 | 128 | declare void @llvm.masked.store.v4i32.p0(<vscale x 4 x i32>, ptr, i32, <vscale x 4 x i1>) |
125 | 129 |
|
126 | | -define void @masked_store_nxv4i64(<vscale x 4 x i64> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind { |
127 | | -; CHECK-LABEL: masked_store_nxv4i64: |
128 | | -; CHECK: # %bb.0: |
129 | | -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma |
130 | | -; CHECK-NEXT: vse64.v v8, (a0), v0.t |
131 | | -; CHECK-NEXT: ret |
132 | | - call void @llvm.masked.store.v4i64.p0(<vscale x 4 x i64> %val, ptr %a, i32 8, <vscale x 4 x i1> %mask) |
133 | | - ret void |
134 | | -} |
135 | | -declare void @llvm.masked.store.v4i64.p0(<vscale x 4 x i64>, ptr, i32, <vscale x 4 x i1>) |
136 | | - |
137 | 130 | define void @masked_store_nxv8i8(<vscale x 8 x i8> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind { |
138 | 131 | ; CHECK-LABEL: masked_store_nxv8i8: |
139 | 132 | ; CHECK: # %bb.0: |
@@ -167,17 +160,6 @@ define void @masked_store_nxv8i32(<vscale x 8 x i32> %val, ptr %a, <vscale x 8 x |
167 | 160 | } |
168 | 161 | declare void @llvm.masked.store.v8i32.p0(<vscale x 8 x i32>, ptr, i32, <vscale x 8 x i1>) |
169 | 162 |
|
170 | | -define void @masked_store_nxv8i64(<vscale x 8 x i64> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind { |
171 | | -; CHECK-LABEL: masked_store_nxv8i64: |
172 | | -; CHECK: # %bb.0: |
173 | | -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma |
174 | | -; CHECK-NEXT: vse64.v v8, (a0), v0.t |
175 | | -; CHECK-NEXT: ret |
176 | | - call void @llvm.masked.store.v8i64.p0(<vscale x 8 x i64> %val, ptr %a, i32 8, <vscale x 8 x i1> %mask) |
177 | | - ret void |
178 | | -} |
179 | | -declare void @llvm.masked.store.v8i64.p0(<vscale x 8 x i64>, ptr, i32, <vscale x 8 x i1>) |
180 | | - |
181 | 163 | define void @masked_store_nxv16i8(<vscale x 16 x i8> %val, ptr %a, <vscale x 16 x i1> %mask) nounwind { |
182 | 164 | ; CHECK-LABEL: masked_store_nxv16i8: |
183 | 165 | ; CHECK: # %bb.0: |
|
0 commit comments