@@ -184,9 +184,9 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_nxv8i64_nxv
184184; CHECK-NEXT: addi sp, sp, -16
185185; CHECK-NEXT: .cfi_def_cfa_offset 16
186186; CHECK-NEXT: csrr a0, vlenb
187- ; CHECK-NEXT: slli a0, a0, 4
187+ ; CHECK-NEXT: slli a0, a0, 3
188188; CHECK-NEXT: sub sp, sp, a0
189- ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10 , 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
189+ ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08 , 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
190190; CHECK-NEXT: li a0, 85
191191; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
192192; CHECK-NEXT: vmv.v.x v7, a0
@@ -200,25 +200,15 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_nxv8i64_nxv
200200; CHECK-NEXT: vcompress.vm v8, v16, v28
201201; CHECK-NEXT: addi a0, sp, 16
202202; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
203- ; CHECK-NEXT: vcompress.vm v8, v16, v29
204- ; CHECK-NEXT: csrr a0, vlenb
205- ; CHECK-NEXT: slli a0, a0, 3
206- ; CHECK-NEXT: add a0, sp, a0
207- ; CHECK-NEXT: addi a0, a0, 16
208- ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
209- ; CHECK-NEXT: addi a0, sp, 16
203+ ; CHECK-NEXT: vmv8r.v v8, v16
204+ ; CHECK-NEXT: vcompress.vm v16, v8, v29
210205; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
211206; CHECK-NEXT: vmv4r.v v28, v8
212- ; CHECK-NEXT: csrr a0, vlenb
213- ; CHECK-NEXT: slli a0, a0, 3
214- ; CHECK-NEXT: add a0, sp, a0
215- ; CHECK-NEXT: addi a0, a0, 16
216- ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
217- ; CHECK-NEXT: vmv4r.v v4, v8
207+ ; CHECK-NEXT: vmv4r.v v4, v16
218208; CHECK-NEXT: vmv8r.v v8, v24
219209; CHECK-NEXT: vmv8r.v v16, v0
220210; CHECK-NEXT: csrr a0, vlenb
221- ; CHECK-NEXT: slli a0, a0, 4
211+ ; CHECK-NEXT: slli a0, a0, 3
222212; CHECK-NEXT: add sp, sp, a0
223213; CHECK-NEXT: .cfi_def_cfa sp, 16
224214; CHECK-NEXT: addi sp, sp, 16
@@ -417,9 +407,9 @@ define {<vscale x 8 x double>, <vscale x 8 x double>} @vector_deinterleave_nxv8f
417407; CHECK-NEXT: addi sp, sp, -16
418408; CHECK-NEXT: .cfi_def_cfa_offset 16
419409; CHECK-NEXT: csrr a0, vlenb
420- ; CHECK-NEXT: slli a0, a0, 4
410+ ; CHECK-NEXT: slli a0, a0, 3
421411; CHECK-NEXT: sub sp, sp, a0
422- ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10 , 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
412+ ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08 , 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
423413; CHECK-NEXT: li a0, 85
424414; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
425415; CHECK-NEXT: vmv.v.x v7, a0
@@ -433,25 +423,15 @@ define {<vscale x 8 x double>, <vscale x 8 x double>} @vector_deinterleave_nxv8f
433423; CHECK-NEXT: vcompress.vm v8, v16, v28
434424; CHECK-NEXT: addi a0, sp, 16
435425; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
436- ; CHECK-NEXT: vcompress.vm v8, v16, v29
437- ; CHECK-NEXT: csrr a0, vlenb
438- ; CHECK-NEXT: slli a0, a0, 3
439- ; CHECK-NEXT: add a0, sp, a0
440- ; CHECK-NEXT: addi a0, a0, 16
441- ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
442- ; CHECK-NEXT: addi a0, sp, 16
426+ ; CHECK-NEXT: vmv8r.v v8, v16
427+ ; CHECK-NEXT: vcompress.vm v16, v8, v29
443428; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
444429; CHECK-NEXT: vmv4r.v v28, v8
445- ; CHECK-NEXT: csrr a0, vlenb
446- ; CHECK-NEXT: slli a0, a0, 3
447- ; CHECK-NEXT: add a0, sp, a0
448- ; CHECK-NEXT: addi a0, a0, 16
449- ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
450- ; CHECK-NEXT: vmv4r.v v4, v8
430+ ; CHECK-NEXT: vmv4r.v v4, v16
451431; CHECK-NEXT: vmv8r.v v8, v24
452432; CHECK-NEXT: vmv8r.v v16, v0
453433; CHECK-NEXT: csrr a0, vlenb
454- ; CHECK-NEXT: slli a0, a0, 4
434+ ; CHECK-NEXT: slli a0, a0, 3
455435; CHECK-NEXT: add sp, sp, a0
456436; CHECK-NEXT: .cfi_def_cfa sp, 16
457437; CHECK-NEXT: addi sp, sp, 16
0 commit comments