|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
2 | | -; RUN: llc < %s -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 |
3 | | -; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 |
| 2 | +; RUN: llc < %s -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s |
| 3 | +; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s |
4 | 4 |
|
5 | 5 | define <8 x float> @fpext_v8bf16(<8 x bfloat> %x) { |
6 | | -; RV32-LABEL: fpext_v8bf16: |
7 | | -; RV32: # %bb.0: |
8 | | -; RV32-NEXT: addi sp, sp, -32 |
9 | | -; RV32-NEXT: .cfi_def_cfa_offset 32 |
10 | | -; RV32-NEXT: fmv.x.w a0, fa0 |
11 | | -; RV32-NEXT: fmv.x.w a1, fa1 |
12 | | -; RV32-NEXT: fmv.x.w a2, fa2 |
13 | | -; RV32-NEXT: fmv.x.w a3, fa3 |
14 | | -; RV32-NEXT: fmv.x.w a4, fa4 |
15 | | -; RV32-NEXT: fmv.x.w a5, fa5 |
16 | | -; RV32-NEXT: fmv.x.w a6, fa6 |
17 | | -; RV32-NEXT: fmv.x.w a7, fa7 |
18 | | -; RV32-NEXT: slli a7, a7, 16 |
19 | | -; RV32-NEXT: sw a7, 28(sp) |
20 | | -; RV32-NEXT: slli a6, a6, 16 |
21 | | -; RV32-NEXT: sw a6, 24(sp) |
22 | | -; RV32-NEXT: slli a5, a5, 16 |
23 | | -; RV32-NEXT: sw a5, 20(sp) |
24 | | -; RV32-NEXT: slli a4, a4, 16 |
25 | | -; RV32-NEXT: sw a4, 16(sp) |
26 | | -; RV32-NEXT: slli a3, a3, 16 |
27 | | -; RV32-NEXT: sw a3, 12(sp) |
28 | | -; RV32-NEXT: slli a2, a2, 16 |
29 | | -; RV32-NEXT: sw a2, 8(sp) |
30 | | -; RV32-NEXT: slli a1, a1, 16 |
31 | | -; RV32-NEXT: sw a1, 4(sp) |
32 | | -; RV32-NEXT: slli a0, a0, 16 |
33 | | -; RV32-NEXT: sw a0, 0(sp) |
34 | | -; RV32-NEXT: addi a0, sp, 28 |
35 | | -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma |
36 | | -; RV32-NEXT: vle32.v v8, (a0) |
37 | | -; RV32-NEXT: addi a0, sp, 24 |
38 | | -; RV32-NEXT: vle32.v v9, (a0) |
39 | | -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
40 | | -; RV32-NEXT: vslideup.vi v9, v8, 1 |
41 | | -; RV32-NEXT: addi a0, sp, 20 |
42 | | -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma |
43 | | -; RV32-NEXT: vle32.v v10, (a0) |
44 | | -; RV32-NEXT: addi a0, sp, 16 |
45 | | -; RV32-NEXT: vle32.v v12, (a0) |
46 | | -; RV32-NEXT: addi a0, sp, 12 |
47 | | -; RV32-NEXT: vle32.v v11, (a0) |
48 | | -; RV32-NEXT: addi a0, sp, 8 |
49 | | -; RV32-NEXT: vle32.v v13, (a0) |
50 | | -; RV32-NEXT: addi a0, sp, 4 |
51 | | -; RV32-NEXT: vle32.v v14, (a0) |
52 | | -; RV32-NEXT: mv a0, sp |
53 | | -; RV32-NEXT: vle32.v v8, (a0) |
54 | | -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
55 | | -; RV32-NEXT: vslideup.vi v12, v10, 1 |
56 | | -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
57 | | -; RV32-NEXT: vslideup.vi v12, v9, 2 |
58 | | -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
59 | | -; RV32-NEXT: vslideup.vi v13, v11, 1 |
60 | | -; RV32-NEXT: vslideup.vi v8, v14, 1 |
61 | | -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
62 | | -; RV32-NEXT: vslideup.vi v8, v13, 2 |
63 | | -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
64 | | -; RV32-NEXT: vslideup.vi v8, v12, 4 |
65 | | -; RV32-NEXT: addi sp, sp, 32 |
66 | | -; RV32-NEXT: ret |
67 | | -; |
68 | | -; RV64-LABEL: fpext_v8bf16: |
69 | | -; RV64: # %bb.0: |
70 | | -; RV64-NEXT: addi sp, sp, -32 |
71 | | -; RV64-NEXT: .cfi_def_cfa_offset 32 |
72 | | -; RV64-NEXT: fmv.x.w a0, fa0 |
73 | | -; RV64-NEXT: fmv.x.w a1, fa1 |
74 | | -; RV64-NEXT: fmv.x.w a2, fa2 |
75 | | -; RV64-NEXT: fmv.x.w a3, fa3 |
76 | | -; RV64-NEXT: fmv.x.w a4, fa4 |
77 | | -; RV64-NEXT: fmv.x.w a5, fa5 |
78 | | -; RV64-NEXT: fmv.x.w a6, fa6 |
79 | | -; RV64-NEXT: fmv.x.w a7, fa7 |
80 | | -; RV64-NEXT: slli a7, a7, 16 |
81 | | -; RV64-NEXT: fmv.w.x fa5, a7 |
82 | | -; RV64-NEXT: fsw fa5, 28(sp) |
83 | | -; RV64-NEXT: slli a6, a6, 16 |
84 | | -; RV64-NEXT: fmv.w.x fa5, a6 |
85 | | -; RV64-NEXT: fsw fa5, 24(sp) |
86 | | -; RV64-NEXT: slli a5, a5, 16 |
87 | | -; RV64-NEXT: fmv.w.x fa5, a5 |
88 | | -; RV64-NEXT: fsw fa5, 20(sp) |
89 | | -; RV64-NEXT: slli a4, a4, 16 |
90 | | -; RV64-NEXT: fmv.w.x fa5, a4 |
91 | | -; RV64-NEXT: fsw fa5, 16(sp) |
92 | | -; RV64-NEXT: slli a3, a3, 16 |
93 | | -; RV64-NEXT: fmv.w.x fa5, a3 |
94 | | -; RV64-NEXT: fsw fa5, 12(sp) |
95 | | -; RV64-NEXT: slli a2, a2, 16 |
96 | | -; RV64-NEXT: fmv.w.x fa5, a2 |
97 | | -; RV64-NEXT: fsw fa5, 8(sp) |
98 | | -; RV64-NEXT: slli a1, a1, 16 |
99 | | -; RV64-NEXT: fmv.w.x fa5, a1 |
100 | | -; RV64-NEXT: fsw fa5, 4(sp) |
101 | | -; RV64-NEXT: slli a0, a0, 16 |
102 | | -; RV64-NEXT: fmv.w.x fa5, a0 |
103 | | -; RV64-NEXT: fsw fa5, 0(sp) |
104 | | -; RV64-NEXT: addi a0, sp, 28 |
105 | | -; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma |
106 | | -; RV64-NEXT: vle32.v v8, (a0) |
107 | | -; RV64-NEXT: addi a0, sp, 24 |
108 | | -; RV64-NEXT: vle32.v v9, (a0) |
109 | | -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
110 | | -; RV64-NEXT: vslideup.vi v9, v8, 1 |
111 | | -; RV64-NEXT: addi a0, sp, 20 |
112 | | -; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma |
113 | | -; RV64-NEXT: vle32.v v10, (a0) |
114 | | -; RV64-NEXT: addi a0, sp, 16 |
115 | | -; RV64-NEXT: vle32.v v12, (a0) |
116 | | -; RV64-NEXT: addi a0, sp, 12 |
117 | | -; RV64-NEXT: vle32.v v11, (a0) |
118 | | -; RV64-NEXT: addi a0, sp, 8 |
119 | | -; RV64-NEXT: vle32.v v13, (a0) |
120 | | -; RV64-NEXT: addi a0, sp, 4 |
121 | | -; RV64-NEXT: vle32.v v14, (a0) |
122 | | -; RV64-NEXT: mv a0, sp |
123 | | -; RV64-NEXT: vle32.v v8, (a0) |
124 | | -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
125 | | -; RV64-NEXT: vslideup.vi v12, v10, 1 |
126 | | -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
127 | | -; RV64-NEXT: vslideup.vi v12, v9, 2 |
128 | | -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
129 | | -; RV64-NEXT: vslideup.vi v13, v11, 1 |
130 | | -; RV64-NEXT: vslideup.vi v8, v14, 1 |
131 | | -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
132 | | -; RV64-NEXT: vslideup.vi v8, v13, 2 |
133 | | -; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
134 | | -; RV64-NEXT: vslideup.vi v8, v12, 4 |
135 | | -; RV64-NEXT: addi sp, sp, 32 |
136 | | -; RV64-NEXT: ret |
| 6 | +; CHECK-LABEL: fpext_v8bf16: |
| 7 | +; CHECK: # %bb.0: |
| 8 | +; CHECK-NEXT: fmv.x.w a0, fa0 |
| 9 | +; CHECK-NEXT: fmv.x.w a1, fa1 |
| 10 | +; CHECK-NEXT: fmv.x.w a2, fa2 |
| 11 | +; CHECK-NEXT: fmv.x.w a3, fa3 |
| 12 | +; CHECK-NEXT: fmv.x.w a4, fa4 |
| 13 | +; CHECK-NEXT: fmv.x.w a5, fa5 |
| 14 | +; CHECK-NEXT: fmv.x.w a6, fa6 |
| 15 | +; CHECK-NEXT: fmv.x.w a7, fa7 |
| 16 | +; CHECK-NEXT: slli a7, a7, 16 |
| 17 | +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
| 18 | +; CHECK-NEXT: vmv.s.x v8, a7 |
| 19 | +; CHECK-NEXT: slli a6, a6, 16 |
| 20 | +; CHECK-NEXT: vmv.s.x v9, a6 |
| 21 | +; CHECK-NEXT: vslideup.vi v9, v8, 1 |
| 22 | +; CHECK-NEXT: slli a5, a5, 16 |
| 23 | +; CHECK-NEXT: vmv.s.x v8, a5 |
| 24 | +; CHECK-NEXT: slli a4, a4, 16 |
| 25 | +; CHECK-NEXT: vmv.s.x v10, a4 |
| 26 | +; CHECK-NEXT: vslideup.vi v10, v8, 1 |
| 27 | +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| 28 | +; CHECK-NEXT: vslideup.vi v10, v9, 2 |
| 29 | +; CHECK-NEXT: slli a3, a3, 16 |
| 30 | +; CHECK-NEXT: vmv.s.x v8, a3 |
| 31 | +; CHECK-NEXT: slli a2, a2, 16 |
| 32 | +; CHECK-NEXT: vmv.s.x v9, a2 |
| 33 | +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
| 34 | +; CHECK-NEXT: vslideup.vi v9, v8, 1 |
| 35 | +; CHECK-NEXT: slli a1, a1, 16 |
| 36 | +; CHECK-NEXT: vmv.s.x v11, a1 |
| 37 | +; CHECK-NEXT: slli a0, a0, 16 |
| 38 | +; CHECK-NEXT: vmv.s.x v8, a0 |
| 39 | +; CHECK-NEXT: vslideup.vi v8, v11, 1 |
| 40 | +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| 41 | +; CHECK-NEXT: vslideup.vi v8, v9, 2 |
| 42 | +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| 43 | +; CHECK-NEXT: vslideup.vi v8, v10, 4 |
| 44 | +; CHECK-NEXT: ret |
137 | 45 | %y = fpext <8 x bfloat> %x to <8 x float> |
138 | 46 | ret <8 x float> %y |
139 | 47 | } |
140 | 48 |
|
141 | 49 | define <8 x float> @fpext_v8f16(<8 x bfloat> %x) { |
142 | | -; RV32-LABEL: fpext_v8f16: |
143 | | -; RV32: # %bb.0: |
144 | | -; RV32-NEXT: addi sp, sp, -32 |
145 | | -; RV32-NEXT: .cfi_def_cfa_offset 32 |
146 | | -; RV32-NEXT: fmv.x.w a0, fa0 |
147 | | -; RV32-NEXT: fmv.x.w a1, fa1 |
148 | | -; RV32-NEXT: fmv.x.w a2, fa2 |
149 | | -; RV32-NEXT: fmv.x.w a3, fa3 |
150 | | -; RV32-NEXT: fmv.x.w a4, fa4 |
151 | | -; RV32-NEXT: fmv.x.w a5, fa5 |
152 | | -; RV32-NEXT: fmv.x.w a6, fa6 |
153 | | -; RV32-NEXT: fmv.x.w a7, fa7 |
154 | | -; RV32-NEXT: slli a7, a7, 16 |
155 | | -; RV32-NEXT: sw a7, 28(sp) |
156 | | -; RV32-NEXT: slli a6, a6, 16 |
157 | | -; RV32-NEXT: sw a6, 24(sp) |
158 | | -; RV32-NEXT: slli a5, a5, 16 |
159 | | -; RV32-NEXT: sw a5, 20(sp) |
160 | | -; RV32-NEXT: slli a4, a4, 16 |
161 | | -; RV32-NEXT: sw a4, 16(sp) |
162 | | -; RV32-NEXT: slli a3, a3, 16 |
163 | | -; RV32-NEXT: sw a3, 12(sp) |
164 | | -; RV32-NEXT: slli a2, a2, 16 |
165 | | -; RV32-NEXT: sw a2, 8(sp) |
166 | | -; RV32-NEXT: slli a1, a1, 16 |
167 | | -; RV32-NEXT: sw a1, 4(sp) |
168 | | -; RV32-NEXT: slli a0, a0, 16 |
169 | | -; RV32-NEXT: sw a0, 0(sp) |
170 | | -; RV32-NEXT: addi a0, sp, 28 |
171 | | -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma |
172 | | -; RV32-NEXT: vle32.v v8, (a0) |
173 | | -; RV32-NEXT: addi a0, sp, 24 |
174 | | -; RV32-NEXT: vle32.v v9, (a0) |
175 | | -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
176 | | -; RV32-NEXT: vslideup.vi v9, v8, 1 |
177 | | -; RV32-NEXT: addi a0, sp, 20 |
178 | | -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma |
179 | | -; RV32-NEXT: vle32.v v10, (a0) |
180 | | -; RV32-NEXT: addi a0, sp, 16 |
181 | | -; RV32-NEXT: vle32.v v12, (a0) |
182 | | -; RV32-NEXT: addi a0, sp, 12 |
183 | | -; RV32-NEXT: vle32.v v11, (a0) |
184 | | -; RV32-NEXT: addi a0, sp, 8 |
185 | | -; RV32-NEXT: vle32.v v13, (a0) |
186 | | -; RV32-NEXT: addi a0, sp, 4 |
187 | | -; RV32-NEXT: vle32.v v14, (a0) |
188 | | -; RV32-NEXT: mv a0, sp |
189 | | -; RV32-NEXT: vle32.v v8, (a0) |
190 | | -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
191 | | -; RV32-NEXT: vslideup.vi v12, v10, 1 |
192 | | -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
193 | | -; RV32-NEXT: vslideup.vi v12, v9, 2 |
194 | | -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
195 | | -; RV32-NEXT: vslideup.vi v13, v11, 1 |
196 | | -; RV32-NEXT: vslideup.vi v8, v14, 1 |
197 | | -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
198 | | -; RV32-NEXT: vslideup.vi v8, v13, 2 |
199 | | -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
200 | | -; RV32-NEXT: vslideup.vi v8, v12, 4 |
201 | | -; RV32-NEXT: addi sp, sp, 32 |
202 | | -; RV32-NEXT: ret |
203 | | -; |
204 | | -; RV64-LABEL: fpext_v8f16: |
205 | | -; RV64: # %bb.0: |
206 | | -; RV64-NEXT: addi sp, sp, -32 |
207 | | -; RV64-NEXT: .cfi_def_cfa_offset 32 |
208 | | -; RV64-NEXT: fmv.x.w a0, fa0 |
209 | | -; RV64-NEXT: fmv.x.w a1, fa1 |
210 | | -; RV64-NEXT: fmv.x.w a2, fa2 |
211 | | -; RV64-NEXT: fmv.x.w a3, fa3 |
212 | | -; RV64-NEXT: fmv.x.w a4, fa4 |
213 | | -; RV64-NEXT: fmv.x.w a5, fa5 |
214 | | -; RV64-NEXT: fmv.x.w a6, fa6 |
215 | | -; RV64-NEXT: fmv.x.w a7, fa7 |
216 | | -; RV64-NEXT: slli a7, a7, 16 |
217 | | -; RV64-NEXT: fmv.w.x fa5, a7 |
218 | | -; RV64-NEXT: fsw fa5, 28(sp) |
219 | | -; RV64-NEXT: slli a6, a6, 16 |
220 | | -; RV64-NEXT: fmv.w.x fa5, a6 |
221 | | -; RV64-NEXT: fsw fa5, 24(sp) |
222 | | -; RV64-NEXT: slli a5, a5, 16 |
223 | | -; RV64-NEXT: fmv.w.x fa5, a5 |
224 | | -; RV64-NEXT: fsw fa5, 20(sp) |
225 | | -; RV64-NEXT: slli a4, a4, 16 |
226 | | -; RV64-NEXT: fmv.w.x fa5, a4 |
227 | | -; RV64-NEXT: fsw fa5, 16(sp) |
228 | | -; RV64-NEXT: slli a3, a3, 16 |
229 | | -; RV64-NEXT: fmv.w.x fa5, a3 |
230 | | -; RV64-NEXT: fsw fa5, 12(sp) |
231 | | -; RV64-NEXT: slli a2, a2, 16 |
232 | | -; RV64-NEXT: fmv.w.x fa5, a2 |
233 | | -; RV64-NEXT: fsw fa5, 8(sp) |
234 | | -; RV64-NEXT: slli a1, a1, 16 |
235 | | -; RV64-NEXT: fmv.w.x fa5, a1 |
236 | | -; RV64-NEXT: fsw fa5, 4(sp) |
237 | | -; RV64-NEXT: slli a0, a0, 16 |
238 | | -; RV64-NEXT: fmv.w.x fa5, a0 |
239 | | -; RV64-NEXT: fsw fa5, 0(sp) |
240 | | -; RV64-NEXT: addi a0, sp, 28 |
241 | | -; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma |
242 | | -; RV64-NEXT: vle32.v v8, (a0) |
243 | | -; RV64-NEXT: addi a0, sp, 24 |
244 | | -; RV64-NEXT: vle32.v v9, (a0) |
245 | | -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
246 | | -; RV64-NEXT: vslideup.vi v9, v8, 1 |
247 | | -; RV64-NEXT: addi a0, sp, 20 |
248 | | -; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma |
249 | | -; RV64-NEXT: vle32.v v10, (a0) |
250 | | -; RV64-NEXT: addi a0, sp, 16 |
251 | | -; RV64-NEXT: vle32.v v12, (a0) |
252 | | -; RV64-NEXT: addi a0, sp, 12 |
253 | | -; RV64-NEXT: vle32.v v11, (a0) |
254 | | -; RV64-NEXT: addi a0, sp, 8 |
255 | | -; RV64-NEXT: vle32.v v13, (a0) |
256 | | -; RV64-NEXT: addi a0, sp, 4 |
257 | | -; RV64-NEXT: vle32.v v14, (a0) |
258 | | -; RV64-NEXT: mv a0, sp |
259 | | -; RV64-NEXT: vle32.v v8, (a0) |
260 | | -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
261 | | -; RV64-NEXT: vslideup.vi v12, v10, 1 |
262 | | -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
263 | | -; RV64-NEXT: vslideup.vi v12, v9, 2 |
264 | | -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
265 | | -; RV64-NEXT: vslideup.vi v13, v11, 1 |
266 | | -; RV64-NEXT: vslideup.vi v8, v14, 1 |
267 | | -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
268 | | -; RV64-NEXT: vslideup.vi v8, v13, 2 |
269 | | -; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
270 | | -; RV64-NEXT: vslideup.vi v8, v12, 4 |
271 | | -; RV64-NEXT: addi sp, sp, 32 |
272 | | -; RV64-NEXT: ret |
| 50 | +; CHECK-LABEL: fpext_v8f16: |
| 51 | +; CHECK: # %bb.0: |
| 52 | +; CHECK-NEXT: fmv.x.w a0, fa0 |
| 53 | +; CHECK-NEXT: fmv.x.w a1, fa1 |
| 54 | +; CHECK-NEXT: fmv.x.w a2, fa2 |
| 55 | +; CHECK-NEXT: fmv.x.w a3, fa3 |
| 56 | +; CHECK-NEXT: fmv.x.w a4, fa4 |
| 57 | +; CHECK-NEXT: fmv.x.w a5, fa5 |
| 58 | +; CHECK-NEXT: fmv.x.w a6, fa6 |
| 59 | +; CHECK-NEXT: fmv.x.w a7, fa7 |
| 60 | +; CHECK-NEXT: slli a7, a7, 16 |
| 61 | +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
| 62 | +; CHECK-NEXT: vmv.s.x v8, a7 |
| 63 | +; CHECK-NEXT: slli a6, a6, 16 |
| 64 | +; CHECK-NEXT: vmv.s.x v9, a6 |
| 65 | +; CHECK-NEXT: vslideup.vi v9, v8, 1 |
| 66 | +; CHECK-NEXT: slli a5, a5, 16 |
| 67 | +; CHECK-NEXT: vmv.s.x v8, a5 |
| 68 | +; CHECK-NEXT: slli a4, a4, 16 |
| 69 | +; CHECK-NEXT: vmv.s.x v10, a4 |
| 70 | +; CHECK-NEXT: vslideup.vi v10, v8, 1 |
| 71 | +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| 72 | +; CHECK-NEXT: vslideup.vi v10, v9, 2 |
| 73 | +; CHECK-NEXT: slli a3, a3, 16 |
| 74 | +; CHECK-NEXT: vmv.s.x v8, a3 |
| 75 | +; CHECK-NEXT: slli a2, a2, 16 |
| 76 | +; CHECK-NEXT: vmv.s.x v9, a2 |
| 77 | +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
| 78 | +; CHECK-NEXT: vslideup.vi v9, v8, 1 |
| 79 | +; CHECK-NEXT: slli a1, a1, 16 |
| 80 | +; CHECK-NEXT: vmv.s.x v11, a1 |
| 81 | +; CHECK-NEXT: slli a0, a0, 16 |
| 82 | +; CHECK-NEXT: vmv.s.x v8, a0 |
| 83 | +; CHECK-NEXT: vslideup.vi v8, v11, 1 |
| 84 | +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| 85 | +; CHECK-NEXT: vslideup.vi v8, v9, 2 |
| 86 | +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| 87 | +; CHECK-NEXT: vslideup.vi v8, v10, 4 |
| 88 | +; CHECK-NEXT: ret |
273 | 89 | %y = fpext <8 x bfloat> %x to <8 x float> |
274 | 90 | ret <8 x float> %y |
275 | 91 | } |
276 | | -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
277 | | -; CHECK: {{.*}} |
| 92 | + |
0 commit comments