Skip to content

Commit 777ccf8

Browse files
committed
Add nounwind to avoid cfi directives
1 parent 915c27d commit 777ccf8

File tree

2 files changed

+18
-256
lines changed

2 files changed

+18
-256
lines changed

llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll

Lines changed: 6 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -189,15 +189,13 @@ define {<8 x i64>, <8 x i64>} @vector_deinterleave_v8i64_v16i64(<16 x i64> %vec)
189189
ret {<8 x i64>, <8 x i64>} %retval
190190
}
191191

192-
define {<2 x i32>, <2 x i32>, <2 x i32>} @vector_deinterleave3_v2i32_v6i32(<6 x i32> %v) {
192+
define {<2 x i32>, <2 x i32>, <2 x i32>} @vector_deinterleave3_v2i32_v6i32(<6 x i32> %v) nounwind {
193193
; CHECK-LABEL: vector_deinterleave3_v2i32_v6i32:
194194
; CHECK: # %bb.0:
195195
; CHECK-NEXT: addi sp, sp, -16
196-
; CHECK-NEXT: .cfi_def_cfa_offset 16
197196
; CHECK-NEXT: csrr a0, vlenb
198197
; CHECK-NEXT: slli a0, a0, 1
199198
; CHECK-NEXT: sub sp, sp, a0
200-
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
201199
; CHECK-NEXT: csrr a0, vlenb
202200
; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
203201
; CHECK-NEXT: vslidedown.vi v12, v8, 2
@@ -215,23 +213,19 @@ define {<2 x i32>, <2 x i32>, <2 x i32>} @vector_deinterleave3_v2i32_v6i32(<6 x
215213
; CHECK-NEXT: csrr a0, vlenb
216214
; CHECK-NEXT: slli a0, a0, 1
217215
; CHECK-NEXT: add sp, sp, a0
218-
; CHECK-NEXT: .cfi_def_cfa sp, 16
219216
; CHECK-NEXT: addi sp, sp, 16
220-
; CHECK-NEXT: .cfi_def_cfa_offset 0
221217
; CHECK-NEXT: ret
222218
%res = call {<2 x i32>, <2 x i32>, <2 x i32>} @llvm.vector.deinterleave3.v6i32(<6 x i32> %v)
223219
ret {<2 x i32>, <2 x i32>, <2 x i32>} %res
224220
}
225221

226-
define {<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>} @vector_deinterleave3_v2i32_v8i32(<8 x i32> %v) {
222+
define {<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>} @vector_deinterleave3_v2i32_v8i32(<8 x i32> %v) nounwind {
227223
; CHECK-LABEL: vector_deinterleave3_v2i32_v8i32:
228224
; CHECK: # %bb.0:
229225
; CHECK-NEXT: addi sp, sp, -16
230-
; CHECK-NEXT: .cfi_def_cfa_offset 16
231226
; CHECK-NEXT: csrr a0, vlenb
232227
; CHECK-NEXT: slli a0, a0, 1
233228
; CHECK-NEXT: sub sp, sp, a0
234-
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
235229
; CHECK-NEXT: csrr a0, vlenb
236230
; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
237231
; CHECK-NEXT: vslidedown.vi v10, v8, 6
@@ -251,23 +245,19 @@ define {<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>} @vector_deinterleave3_v2i32_
251245
; CHECK-NEXT: csrr a0, vlenb
252246
; CHECK-NEXT: slli a0, a0, 1
253247
; CHECK-NEXT: add sp, sp, a0
254-
; CHECK-NEXT: .cfi_def_cfa sp, 16
255248
; CHECK-NEXT: addi sp, sp, 16
256-
; CHECK-NEXT: .cfi_def_cfa_offset 0
257249
; CHECK-NEXT: ret
258250
%res = call {<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>} @llvm.vector.deinterleave4.v8i32(<8 x i32> %v)
259251
ret {<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>} %res
260252
}
261253

262-
define {<2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>} @vector_deinterleave5_v2i16_v10i16(<10 x i16> %v) {
254+
define {<2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>} @vector_deinterleave5_v2i16_v10i16(<10 x i16> %v) nounwind {
263255
; CHECK-LABEL: vector_deinterleave5_v2i16_v10i16:
264256
; CHECK: # %bb.0:
265257
; CHECK-NEXT: addi sp, sp, -16
266-
; CHECK-NEXT: .cfi_def_cfa_offset 16
267258
; CHECK-NEXT: csrr a0, vlenb
268259
; CHECK-NEXT: slli a0, a0, 1
269260
; CHECK-NEXT: sub sp, sp, a0
270-
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
271261
; CHECK-NEXT: csrr a0, vlenb
272262
; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma
273263
; CHECK-NEXT: vslidedown.vi v12, v8, 6
@@ -292,23 +282,19 @@ define {<2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>} @vector_deinterle
292282
; CHECK-NEXT: csrr a0, vlenb
293283
; CHECK-NEXT: slli a0, a0, 1
294284
; CHECK-NEXT: add sp, sp, a0
295-
; CHECK-NEXT: .cfi_def_cfa sp, 16
296285
; CHECK-NEXT: addi sp, sp, 16
297-
; CHECK-NEXT: .cfi_def_cfa_offset 0
298286
; CHECK-NEXT: ret
299287
%res = call {<2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>} @llvm.vector.deinterleave5.v10i16(<10 x i16> %v)
300288
ret {<2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>} %res
301289
}
302290

303-
define {<2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>} @vector_deinterleave6_v2i16_v12i16(<12 x i16> %v) {
291+
define {<2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>} @vector_deinterleave6_v2i16_v12i16(<12 x i16> %v) nounwind {
304292
; CHECK-LABEL: vector_deinterleave6_v2i16_v12i16:
305293
; CHECK: # %bb.0:
306294
; CHECK-NEXT: addi sp, sp, -16
307-
; CHECK-NEXT: .cfi_def_cfa_offset 16
308295
; CHECK-NEXT: csrr a0, vlenb
309296
; CHECK-NEXT: slli a0, a0, 1
310297
; CHECK-NEXT: sub sp, sp, a0
311-
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
312298
; CHECK-NEXT: csrr a0, vlenb
313299
; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma
314300
; CHECK-NEXT: vslidedown.vi v14, v8, 6
@@ -335,29 +321,22 @@ define {<2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>} @vecto
335321
; CHECK-NEXT: csrr a0, vlenb
336322
; CHECK-NEXT: slli a0, a0, 1
337323
; CHECK-NEXT: add sp, sp, a0
338-
; CHECK-NEXT: .cfi_def_cfa sp, 16
339324
; CHECK-NEXT: addi sp, sp, 16
340-
; CHECK-NEXT: .cfi_def_cfa_offset 0
341325
; CHECK-NEXT: ret
342326
%res = call {<2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>} @llvm.vector.deinterleave6.v12i16(<12 x i16> %v)
343327
ret {<2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>, <2 x i16>} %res
344328
}
345329

346-
define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @vector_deinterleave7_v14i8_v2i8(<14 x i8> %v) {
330+
define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @vector_deinterleave7_v14i8_v2i8(<14 x i8> %v) nounwind {
347331
; RV32-LABEL: vector_deinterleave7_v14i8_v2i8:
348332
; RV32: # %bb.0:
349333
; RV32-NEXT: addi sp, sp, -48
350-
; RV32-NEXT: .cfi_def_cfa_offset 48
351334
; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
352335
; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
353336
; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
354-
; RV32-NEXT: .cfi_offset ra, -4
355-
; RV32-NEXT: .cfi_offset s0, -8
356-
; RV32-NEXT: .cfi_offset s1, -12
357337
; RV32-NEXT: csrr a0, vlenb
358338
; RV32-NEXT: slli a0, a0, 2
359339
; RV32-NEXT: sub sp, sp, a0
360-
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
361340
; RV32-NEXT: addi a0, sp, 32
362341
; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
363342
; RV32-NEXT: csrr s1, vlenb
@@ -424,31 +403,21 @@ define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @v
424403
; RV32-NEXT: csrr a0, vlenb
425404
; RV32-NEXT: slli a0, a0, 2
426405
; RV32-NEXT: add sp, sp, a0
427-
; RV32-NEXT: .cfi_def_cfa sp, 48
428406
; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
429407
; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
430408
; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
431-
; RV32-NEXT: .cfi_restore ra
432-
; RV32-NEXT: .cfi_restore s0
433-
; RV32-NEXT: .cfi_restore s1
434409
; RV32-NEXT: addi sp, sp, 48
435-
; RV32-NEXT: .cfi_def_cfa_offset 0
436410
; RV32-NEXT: ret
437411
;
438412
; RV64-LABEL: vector_deinterleave7_v14i8_v2i8:
439413
; RV64: # %bb.0:
440414
; RV64-NEXT: addi sp, sp, -64
441-
; RV64-NEXT: .cfi_def_cfa_offset 64
442415
; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
443416
; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
444417
; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
445-
; RV64-NEXT: .cfi_offset ra, -8
446-
; RV64-NEXT: .cfi_offset s0, -16
447-
; RV64-NEXT: .cfi_offset s1, -24
448418
; RV64-NEXT: csrr a0, vlenb
449419
; RV64-NEXT: slli a0, a0, 2
450420
; RV64-NEXT: sub sp, sp, a0
451-
; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
452421
; RV64-NEXT: addi a0, sp, 32
453422
; RV64-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
454423
; RV64-NEXT: csrr s1, vlenb
@@ -515,31 +484,21 @@ define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @v
515484
; RV64-NEXT: csrr a0, vlenb
516485
; RV64-NEXT: slli a0, a0, 2
517486
; RV64-NEXT: add sp, sp, a0
518-
; RV64-NEXT: .cfi_def_cfa sp, 64
519487
; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
520488
; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
521489
; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
522-
; RV64-NEXT: .cfi_restore ra
523-
; RV64-NEXT: .cfi_restore s0
524-
; RV64-NEXT: .cfi_restore s1
525490
; RV64-NEXT: addi sp, sp, 64
526-
; RV64-NEXT: .cfi_def_cfa_offset 0
527491
; RV64-NEXT: ret
528492
;
529493
; ZIP-LABEL: vector_deinterleave7_v14i8_v2i8:
530494
; ZIP: # %bb.0:
531495
; ZIP-NEXT: addi sp, sp, -64
532-
; ZIP-NEXT: .cfi_def_cfa_offset 64
533496
; ZIP-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
534497
; ZIP-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
535498
; ZIP-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
536-
; ZIP-NEXT: .cfi_offset ra, -8
537-
; ZIP-NEXT: .cfi_offset s0, -16
538-
; ZIP-NEXT: .cfi_offset s1, -24
539499
; ZIP-NEXT: csrr a0, vlenb
540500
; ZIP-NEXT: slli a0, a0, 2
541501
; ZIP-NEXT: sub sp, sp, a0
542-
; ZIP-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
543502
; ZIP-NEXT: addi a0, sp, 32
544503
; ZIP-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
545504
; ZIP-NEXT: csrr s1, vlenb
@@ -606,42 +565,29 @@ define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @v
606565
; ZIP-NEXT: csrr a0, vlenb
607566
; ZIP-NEXT: slli a0, a0, 2
608567
; ZIP-NEXT: add sp, sp, a0
609-
; ZIP-NEXT: .cfi_def_cfa sp, 64
610568
; ZIP-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
611569
; ZIP-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
612570
; ZIP-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
613-
; ZIP-NEXT: .cfi_restore ra
614-
; ZIP-NEXT: .cfi_restore s0
615-
; ZIP-NEXT: .cfi_restore s1
616571
; ZIP-NEXT: addi sp, sp, 64
617-
; ZIP-NEXT: .cfi_def_cfa_offset 0
618572
; ZIP-NEXT: ret
619573
%res = call {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @llvm.vector.deinterleave7.v14i8(<14 x i8> %v)
620574
ret {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} %res
621575
}
622576

623-
define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @vector_deinterleave8_v16i8_v2i8(<16 x i8> %v) {
577+
define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @vector_deinterleave8_v16i8_v2i8(<16 x i8> %v) nounwind {
624578
; RV32-LABEL: vector_deinterleave8_v16i8_v2i8:
625579
; RV32: # %bb.0:
626580
; RV32-NEXT: addi sp, sp, -48
627-
; RV32-NEXT: .cfi_def_cfa_offset 48
628581
; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
629582
; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
630583
; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
631584
; RV32-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
632585
; RV32-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
633586
; RV32-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
634-
; RV32-NEXT: .cfi_offset ra, -4
635-
; RV32-NEXT: .cfi_offset s0, -8
636-
; RV32-NEXT: .cfi_offset s1, -12
637-
; RV32-NEXT: .cfi_offset s2, -16
638-
; RV32-NEXT: .cfi_offset s3, -20
639-
; RV32-NEXT: .cfi_offset s4, -24
640587
; RV32-NEXT: csrr a0, vlenb
641588
; RV32-NEXT: slli a1, a0, 1
642589
; RV32-NEXT: add a0, a1, a0
643590
; RV32-NEXT: sub sp, sp, a0
644-
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
645591
; RV32-NEXT: addi a0, sp, 16
646592
; RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
647593
; RV32-NEXT: csrr s1, vlenb
@@ -701,44 +647,28 @@ define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2
701647
; RV32-NEXT: slli a1, a0, 1
702648
; RV32-NEXT: add a0, a1, a0
703649
; RV32-NEXT: add sp, sp, a0
704-
; RV32-NEXT: .cfi_def_cfa sp, 48
705650
; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
706651
; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
707652
; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
708653
; RV32-NEXT: lw s2, 32(sp) # 4-byte Folded Reload
709654
; RV32-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
710655
; RV32-NEXT: lw s4, 24(sp) # 4-byte Folded Reload
711-
; RV32-NEXT: .cfi_restore ra
712-
; RV32-NEXT: .cfi_restore s0
713-
; RV32-NEXT: .cfi_restore s1
714-
; RV32-NEXT: .cfi_restore s2
715-
; RV32-NEXT: .cfi_restore s3
716-
; RV32-NEXT: .cfi_restore s4
717656
; RV32-NEXT: addi sp, sp, 48
718-
; RV32-NEXT: .cfi_def_cfa_offset 0
719657
; RV32-NEXT: ret
720658
;
721659
; RV64-LABEL: vector_deinterleave8_v16i8_v2i8:
722660
; RV64: # %bb.0:
723661
; RV64-NEXT: addi sp, sp, -64
724-
; RV64-NEXT: .cfi_def_cfa_offset 64
725662
; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
726663
; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
727664
; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
728665
; RV64-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
729666
; RV64-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
730667
; RV64-NEXT: sd s4, 16(sp) # 8-byte Folded Spill
731-
; RV64-NEXT: .cfi_offset ra, -8
732-
; RV64-NEXT: .cfi_offset s0, -16
733-
; RV64-NEXT: .cfi_offset s1, -24
734-
; RV64-NEXT: .cfi_offset s2, -32
735-
; RV64-NEXT: .cfi_offset s3, -40
736-
; RV64-NEXT: .cfi_offset s4, -48
737668
; RV64-NEXT: csrr a0, vlenb
738669
; RV64-NEXT: slli a1, a0, 1
739670
; RV64-NEXT: add a0, a1, a0
740671
; RV64-NEXT: sub sp, sp, a0
741-
; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
742672
; RV64-NEXT: addi a0, sp, 16
743673
; RV64-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
744674
; RV64-NEXT: csrr s1, vlenb
@@ -798,44 +728,28 @@ define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2
798728
; RV64-NEXT: slli a1, a0, 1
799729
; RV64-NEXT: add a0, a1, a0
800730
; RV64-NEXT: add sp, sp, a0
801-
; RV64-NEXT: .cfi_def_cfa sp, 64
802731
; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
803732
; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
804733
; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
805734
; RV64-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
806735
; RV64-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
807736
; RV64-NEXT: ld s4, 16(sp) # 8-byte Folded Reload
808-
; RV64-NEXT: .cfi_restore ra
809-
; RV64-NEXT: .cfi_restore s0
810-
; RV64-NEXT: .cfi_restore s1
811-
; RV64-NEXT: .cfi_restore s2
812-
; RV64-NEXT: .cfi_restore s3
813-
; RV64-NEXT: .cfi_restore s4
814737
; RV64-NEXT: addi sp, sp, 64
815-
; RV64-NEXT: .cfi_def_cfa_offset 0
816738
; RV64-NEXT: ret
817739
;
818740
; ZIP-LABEL: vector_deinterleave8_v16i8_v2i8:
819741
; ZIP: # %bb.0:
820742
; ZIP-NEXT: addi sp, sp, -64
821-
; ZIP-NEXT: .cfi_def_cfa_offset 64
822743
; ZIP-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
823744
; ZIP-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
824745
; ZIP-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
825746
; ZIP-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
826747
; ZIP-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
827748
; ZIP-NEXT: sd s4, 16(sp) # 8-byte Folded Spill
828-
; ZIP-NEXT: .cfi_offset ra, -8
829-
; ZIP-NEXT: .cfi_offset s0, -16
830-
; ZIP-NEXT: .cfi_offset s1, -24
831-
; ZIP-NEXT: .cfi_offset s2, -32
832-
; ZIP-NEXT: .cfi_offset s3, -40
833-
; ZIP-NEXT: .cfi_offset s4, -48
834749
; ZIP-NEXT: csrr a0, vlenb
835750
; ZIP-NEXT: slli a1, a0, 1
836751
; ZIP-NEXT: add a0, a1, a0
837752
; ZIP-NEXT: sub sp, sp, a0
838-
; ZIP-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
839753
; ZIP-NEXT: addi a0, sp, 16
840754
; ZIP-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
841755
; ZIP-NEXT: csrr s1, vlenb
@@ -895,21 +809,13 @@ define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2
895809
; ZIP-NEXT: slli a1, a0, 1
896810
; ZIP-NEXT: add a0, a1, a0
897811
; ZIP-NEXT: add sp, sp, a0
898-
; ZIP-NEXT: .cfi_def_cfa sp, 64
899812
; ZIP-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
900813
; ZIP-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
901814
; ZIP-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
902815
; ZIP-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
903816
; ZIP-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
904817
; ZIP-NEXT: ld s4, 16(sp) # 8-byte Folded Reload
905-
; ZIP-NEXT: .cfi_restore ra
906-
; ZIP-NEXT: .cfi_restore s0
907-
; ZIP-NEXT: .cfi_restore s1
908-
; ZIP-NEXT: .cfi_restore s2
909-
; ZIP-NEXT: .cfi_restore s3
910-
; ZIP-NEXT: .cfi_restore s4
911818
; ZIP-NEXT: addi sp, sp, 64
912-
; ZIP-NEXT: .cfi_def_cfa_offset 0
913819
; ZIP-NEXT: ret
914820
%res = call {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @llvm.vector.deinterleave8.v16i8(<16 x i8> %v)
915821
ret {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} %res

0 commit comments

Comments
 (0)