Skip to content

Commit afc7cc7

Browse files
committed
[RISCV] Fix missing CHECK prefixes in vector lrint test files. NFC
All of these test cases had iXLen in their name which got replaced by sed. This prevented FileCheck from finding the function. The other test cases in these files do not have that issue.
1 parent 922700d commit afc7cc7

File tree

4 files changed

+314
-4
lines changed

4 files changed

+314
-4
lines changed

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint-vp.ll

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,25 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x, <8 x i1> %m, i32 zeroext %evl) {
126126
}
127127
declare <8 x iXLen> @llvm.vp.lrint.v8iXLen.v8f32(<8 x float>, <8 x i1>, i32)
128128

129-
define <16 x iXLen> @lrint_v16iXLen_v16f32(<16 x float> %x, <16 x i1> %m, i32 zeroext %evl) {
129+
define <16 x iXLen> @lrint_v16f32(<16 x float> %x, <16 x i1> %m, i32 zeroext %evl) {
130+
; RV32-LABEL: lrint_v16f32:
131+
; RV32: # %bb.0:
132+
; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma
133+
; RV32-NEXT: vfcvt.x.f.v v8, v8, v0.t
134+
; RV32-NEXT: ret
135+
;
136+
; RV64-i32-LABEL: lrint_v16f32:
137+
; RV64-i32: # %bb.0:
138+
; RV64-i32-NEXT: vsetvli zero, a0, e32, m4, ta, ma
139+
; RV64-i32-NEXT: vfcvt.x.f.v v8, v8, v0.t
140+
; RV64-i32-NEXT: ret
141+
;
142+
; RV64-i64-LABEL: lrint_v16f32:
143+
; RV64-i64: # %bb.0:
144+
; RV64-i64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
145+
; RV64-i64-NEXT: vfwcvt.x.f.v v16, v8, v0.t
146+
; RV64-i64-NEXT: vmv8r.v v8, v16
147+
; RV64-i64-NEXT: ret
130148
%a = call <16 x iXLen> @llvm.vp.lrint.v16iXLen.v16f32(<16 x float> %x, <16 x i1> %m, i32 %evl)
131149
ret <16 x iXLen> %a
132150
}

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll

Lines changed: 240 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,246 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x) {
380380
}
381381
declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float>)
382382

383-
define <16 x iXLen> @lrint_v16iXLen_v16f32(<16 x float> %x) {
383+
define <16 x iXLen> @lrint_v16f32(<16 x float> %x) {
384+
; RV32-LABEL: lrint_v16f32:
385+
; RV32: # %bb.0:
386+
; RV32-NEXT: addi sp, sp, -192
387+
; RV32-NEXT: .cfi_def_cfa_offset 192
388+
; RV32-NEXT: sw ra, 188(sp) # 4-byte Folded Spill
389+
; RV32-NEXT: sw s0, 184(sp) # 4-byte Folded Spill
390+
; RV32-NEXT: .cfi_offset ra, -4
391+
; RV32-NEXT: .cfi_offset s0, -8
392+
; RV32-NEXT: addi s0, sp, 192
393+
; RV32-NEXT: .cfi_def_cfa s0, 0
394+
; RV32-NEXT: andi sp, sp, -64
395+
; RV32-NEXT: mv a0, sp
396+
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
397+
; RV32-NEXT: vse32.v v8, (a0)
398+
; RV32-NEXT: flw fa5, 60(sp)
399+
; RV32-NEXT: fcvt.w.s a0, fa5
400+
; RV32-NEXT: sw a0, 124(sp)
401+
; RV32-NEXT: flw fa5, 56(sp)
402+
; RV32-NEXT: fcvt.w.s a0, fa5
403+
; RV32-NEXT: sw a0, 120(sp)
404+
; RV32-NEXT: flw fa5, 52(sp)
405+
; RV32-NEXT: fcvt.w.s a0, fa5
406+
; RV32-NEXT: sw a0, 116(sp)
407+
; RV32-NEXT: flw fa5, 48(sp)
408+
; RV32-NEXT: fcvt.w.s a0, fa5
409+
; RV32-NEXT: sw a0, 112(sp)
410+
; RV32-NEXT: flw fa5, 44(sp)
411+
; RV32-NEXT: fcvt.w.s a0, fa5
412+
; RV32-NEXT: sw a0, 108(sp)
413+
; RV32-NEXT: flw fa5, 40(sp)
414+
; RV32-NEXT: fcvt.w.s a0, fa5
415+
; RV32-NEXT: sw a0, 104(sp)
416+
; RV32-NEXT: flw fa5, 36(sp)
417+
; RV32-NEXT: fcvt.w.s a0, fa5
418+
; RV32-NEXT: sw a0, 100(sp)
419+
; RV32-NEXT: flw fa5, 32(sp)
420+
; RV32-NEXT: fcvt.w.s a0, fa5
421+
; RV32-NEXT: sw a0, 96(sp)
422+
; RV32-NEXT: vfmv.f.s fa5, v8
423+
; RV32-NEXT: fcvt.w.s a0, fa5
424+
; RV32-NEXT: sw a0, 64(sp)
425+
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
426+
; RV32-NEXT: vslidedown.vi v10, v8, 3
427+
; RV32-NEXT: vfmv.f.s fa5, v10
428+
; RV32-NEXT: fcvt.w.s a0, fa5
429+
; RV32-NEXT: sw a0, 76(sp)
430+
; RV32-NEXT: vslidedown.vi v10, v8, 2
431+
; RV32-NEXT: vfmv.f.s fa5, v10
432+
; RV32-NEXT: fcvt.w.s a0, fa5
433+
; RV32-NEXT: sw a0, 72(sp)
434+
; RV32-NEXT: vslidedown.vi v10, v8, 1
435+
; RV32-NEXT: vfmv.f.s fa5, v10
436+
; RV32-NEXT: fcvt.w.s a0, fa5
437+
; RV32-NEXT: sw a0, 68(sp)
438+
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
439+
; RV32-NEXT: vslidedown.vi v10, v8, 7
440+
; RV32-NEXT: vfmv.f.s fa5, v10
441+
; RV32-NEXT: fcvt.w.s a0, fa5
442+
; RV32-NEXT: sw a0, 92(sp)
443+
; RV32-NEXT: vslidedown.vi v10, v8, 6
444+
; RV32-NEXT: vfmv.f.s fa5, v10
445+
; RV32-NEXT: fcvt.w.s a0, fa5
446+
; RV32-NEXT: sw a0, 88(sp)
447+
; RV32-NEXT: vslidedown.vi v10, v8, 5
448+
; RV32-NEXT: vfmv.f.s fa5, v10
449+
; RV32-NEXT: fcvt.w.s a0, fa5
450+
; RV32-NEXT: sw a0, 84(sp)
451+
; RV32-NEXT: vslidedown.vi v8, v8, 4
452+
; RV32-NEXT: vfmv.f.s fa5, v8
453+
; RV32-NEXT: fcvt.w.s a0, fa5
454+
; RV32-NEXT: sw a0, 80(sp)
455+
; RV32-NEXT: addi a0, sp, 64
456+
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
457+
; RV32-NEXT: vle32.v v8, (a0)
458+
; RV32-NEXT: addi sp, s0, -192
459+
; RV32-NEXT: lw ra, 188(sp) # 4-byte Folded Reload
460+
; RV32-NEXT: lw s0, 184(sp) # 4-byte Folded Reload
461+
; RV32-NEXT: addi sp, sp, 192
462+
; RV32-NEXT: ret
463+
;
464+
; RV64-i32-LABEL: lrint_v16f32:
465+
; RV64-i32: # %bb.0:
466+
; RV64-i32-NEXT: addi sp, sp, -192
467+
; RV64-i32-NEXT: .cfi_def_cfa_offset 192
468+
; RV64-i32-NEXT: sd ra, 184(sp) # 8-byte Folded Spill
469+
; RV64-i32-NEXT: sd s0, 176(sp) # 8-byte Folded Spill
470+
; RV64-i32-NEXT: .cfi_offset ra, -8
471+
; RV64-i32-NEXT: .cfi_offset s0, -16
472+
; RV64-i32-NEXT: addi s0, sp, 192
473+
; RV64-i32-NEXT: .cfi_def_cfa s0, 0
474+
; RV64-i32-NEXT: andi sp, sp, -64
475+
; RV64-i32-NEXT: mv a0, sp
476+
; RV64-i32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
477+
; RV64-i32-NEXT: vse32.v v8, (a0)
478+
; RV64-i32-NEXT: flw fa5, 60(sp)
479+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
480+
; RV64-i32-NEXT: sw a0, 124(sp)
481+
; RV64-i32-NEXT: flw fa5, 56(sp)
482+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
483+
; RV64-i32-NEXT: sw a0, 120(sp)
484+
; RV64-i32-NEXT: flw fa5, 52(sp)
485+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
486+
; RV64-i32-NEXT: sw a0, 116(sp)
487+
; RV64-i32-NEXT: flw fa5, 48(sp)
488+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
489+
; RV64-i32-NEXT: sw a0, 112(sp)
490+
; RV64-i32-NEXT: flw fa5, 44(sp)
491+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
492+
; RV64-i32-NEXT: sw a0, 108(sp)
493+
; RV64-i32-NEXT: flw fa5, 40(sp)
494+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
495+
; RV64-i32-NEXT: sw a0, 104(sp)
496+
; RV64-i32-NEXT: flw fa5, 36(sp)
497+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
498+
; RV64-i32-NEXT: sw a0, 100(sp)
499+
; RV64-i32-NEXT: flw fa5, 32(sp)
500+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
501+
; RV64-i32-NEXT: sw a0, 96(sp)
502+
; RV64-i32-NEXT: vfmv.f.s fa5, v8
503+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
504+
; RV64-i32-NEXT: sw a0, 64(sp)
505+
; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
506+
; RV64-i32-NEXT: vslidedown.vi v10, v8, 3
507+
; RV64-i32-NEXT: vfmv.f.s fa5, v10
508+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
509+
; RV64-i32-NEXT: sw a0, 76(sp)
510+
; RV64-i32-NEXT: vslidedown.vi v10, v8, 2
511+
; RV64-i32-NEXT: vfmv.f.s fa5, v10
512+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
513+
; RV64-i32-NEXT: sw a0, 72(sp)
514+
; RV64-i32-NEXT: vslidedown.vi v10, v8, 1
515+
; RV64-i32-NEXT: vfmv.f.s fa5, v10
516+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
517+
; RV64-i32-NEXT: sw a0, 68(sp)
518+
; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
519+
; RV64-i32-NEXT: vslidedown.vi v10, v8, 7
520+
; RV64-i32-NEXT: vfmv.f.s fa5, v10
521+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
522+
; RV64-i32-NEXT: sw a0, 92(sp)
523+
; RV64-i32-NEXT: vslidedown.vi v10, v8, 6
524+
; RV64-i32-NEXT: vfmv.f.s fa5, v10
525+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
526+
; RV64-i32-NEXT: sw a0, 88(sp)
527+
; RV64-i32-NEXT: vslidedown.vi v10, v8, 5
528+
; RV64-i32-NEXT: vfmv.f.s fa5, v10
529+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
530+
; RV64-i32-NEXT: sw a0, 84(sp)
531+
; RV64-i32-NEXT: vslidedown.vi v8, v8, 4
532+
; RV64-i32-NEXT: vfmv.f.s fa5, v8
533+
; RV64-i32-NEXT: fcvt.l.s a0, fa5
534+
; RV64-i32-NEXT: sw a0, 80(sp)
535+
; RV64-i32-NEXT: addi a0, sp, 64
536+
; RV64-i32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
537+
; RV64-i32-NEXT: vle32.v v8, (a0)
538+
; RV64-i32-NEXT: addi sp, s0, -192
539+
; RV64-i32-NEXT: ld ra, 184(sp) # 8-byte Folded Reload
540+
; RV64-i32-NEXT: ld s0, 176(sp) # 8-byte Folded Reload
541+
; RV64-i32-NEXT: addi sp, sp, 192
542+
; RV64-i32-NEXT: ret
543+
;
544+
; RV64-i64-LABEL: lrint_v16f32:
545+
; RV64-i64: # %bb.0:
546+
; RV64-i64-NEXT: addi sp, sp, -384
547+
; RV64-i64-NEXT: .cfi_def_cfa_offset 384
548+
; RV64-i64-NEXT: sd ra, 376(sp) # 8-byte Folded Spill
549+
; RV64-i64-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
550+
; RV64-i64-NEXT: .cfi_offset ra, -8
551+
; RV64-i64-NEXT: .cfi_offset s0, -16
552+
; RV64-i64-NEXT: addi s0, sp, 384
553+
; RV64-i64-NEXT: .cfi_def_cfa s0, 0
554+
; RV64-i64-NEXT: andi sp, sp, -128
555+
; RV64-i64-NEXT: addi a0, sp, 64
556+
; RV64-i64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
557+
; RV64-i64-NEXT: vse32.v v8, (a0)
558+
; RV64-i64-NEXT: flw fa5, 124(sp)
559+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
560+
; RV64-i64-NEXT: sd a0, 248(sp)
561+
; RV64-i64-NEXT: flw fa5, 120(sp)
562+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
563+
; RV64-i64-NEXT: sd a0, 240(sp)
564+
; RV64-i64-NEXT: flw fa5, 116(sp)
565+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
566+
; RV64-i64-NEXT: sd a0, 232(sp)
567+
; RV64-i64-NEXT: flw fa5, 112(sp)
568+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
569+
; RV64-i64-NEXT: sd a0, 224(sp)
570+
; RV64-i64-NEXT: flw fa5, 108(sp)
571+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
572+
; RV64-i64-NEXT: sd a0, 216(sp)
573+
; RV64-i64-NEXT: flw fa5, 104(sp)
574+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
575+
; RV64-i64-NEXT: sd a0, 208(sp)
576+
; RV64-i64-NEXT: flw fa5, 100(sp)
577+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
578+
; RV64-i64-NEXT: sd a0, 200(sp)
579+
; RV64-i64-NEXT: flw fa5, 96(sp)
580+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
581+
; RV64-i64-NEXT: sd a0, 192(sp)
582+
; RV64-i64-NEXT: vfmv.f.s fa5, v8
583+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
584+
; RV64-i64-NEXT: sd a0, 128(sp)
585+
; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
586+
; RV64-i64-NEXT: vslidedown.vi v10, v8, 3
587+
; RV64-i64-NEXT: vfmv.f.s fa5, v10
588+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
589+
; RV64-i64-NEXT: sd a0, 152(sp)
590+
; RV64-i64-NEXT: vslidedown.vi v10, v8, 2
591+
; RV64-i64-NEXT: vfmv.f.s fa5, v10
592+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
593+
; RV64-i64-NEXT: sd a0, 144(sp)
594+
; RV64-i64-NEXT: vslidedown.vi v10, v8, 1
595+
; RV64-i64-NEXT: vfmv.f.s fa5, v10
596+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
597+
; RV64-i64-NEXT: sd a0, 136(sp)
598+
; RV64-i64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
599+
; RV64-i64-NEXT: vslidedown.vi v10, v8, 7
600+
; RV64-i64-NEXT: vfmv.f.s fa5, v10
601+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
602+
; RV64-i64-NEXT: sd a0, 184(sp)
603+
; RV64-i64-NEXT: vslidedown.vi v10, v8, 6
604+
; RV64-i64-NEXT: vfmv.f.s fa5, v10
605+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
606+
; RV64-i64-NEXT: sd a0, 176(sp)
607+
; RV64-i64-NEXT: vslidedown.vi v10, v8, 5
608+
; RV64-i64-NEXT: vfmv.f.s fa5, v10
609+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
610+
; RV64-i64-NEXT: sd a0, 168(sp)
611+
; RV64-i64-NEXT: vslidedown.vi v8, v8, 4
612+
; RV64-i64-NEXT: vfmv.f.s fa5, v8
613+
; RV64-i64-NEXT: fcvt.l.s a0, fa5
614+
; RV64-i64-NEXT: sd a0, 160(sp)
615+
; RV64-i64-NEXT: addi a0, sp, 128
616+
; RV64-i64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
617+
; RV64-i64-NEXT: vle64.v v8, (a0)
618+
; RV64-i64-NEXT: addi sp, s0, -384
619+
; RV64-i64-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
620+
; RV64-i64-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
621+
; RV64-i64-NEXT: addi sp, sp, 384
622+
; RV64-i64-NEXT: ret
384623
%a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float> %x)
385624
ret <16 x iXLen> %a
386625
}

llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,26 @@ define <vscale x 8 x iXLen> @lrint_nxv8f32(<vscale x 8 x float> %x) {
102102
}
103103
declare <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f32(<vscale x 8 x float>)
104104

105-
define <vscale x 16 x iXLen> @lrint_nxv16iXLen_nxv16f32(<vscale x 16 x float> %x) {
105+
define <vscale x 16 x iXLen> @lrint_nxv16f32(<vscale x 16 x float> %x) {
106+
; RV32-LABEL: lrint_nxv16f32:
107+
; RV32: # %bb.0:
108+
; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, ma
109+
; RV32-NEXT: vfcvt.x.f.v v8, v8
110+
; RV32-NEXT: ret
111+
;
112+
; RV64-i32-LABEL: lrint_nxv16f32:
113+
; RV64-i32: # %bb.0:
114+
; RV64-i32-NEXT: vsetvli a0, zero, e32, m8, ta, ma
115+
; RV64-i32-NEXT: vfcvt.x.f.v v8, v8
116+
; RV64-i32-NEXT: ret
117+
;
118+
; RV64-i64-LABEL: lrint_nxv16f32:
119+
; RV64-i64: # %bb.0:
120+
; RV64-i64-NEXT: vsetvli a0, zero, e32, m4, ta, ma
121+
; RV64-i64-NEXT: vfwcvt.x.f.v v24, v8
122+
; RV64-i64-NEXT: vfwcvt.x.f.v v16, v12
123+
; RV64-i64-NEXT: vmv8r.v v8, v24
124+
; RV64-i64-NEXT: ret
106125
%a = call <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16f32(<vscale x 16 x float> %x)
107126
ret <vscale x 16 x iXLen> %a
108127
}

llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll

Lines changed: 35 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,41 @@ define <vscale x 8 x iXLen> @lrint_nxv8f32(<vscale x 8 x float> %x, <vscale x 8
102102
}
103103
declare <vscale x 8 x iXLen> @llvm.vp.lrint.nxv8iXLen.nxv8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32)
104104

105-
define <vscale x 16 x iXLen> @lrint_nxv16iXLen_nxv16f32(<vscale x 16 x float> %x, <vscale x 16 x i1> %m, i32 zeroext %evl) {
105+
define <vscale x 16 x iXLen> @lrint_nxv16f32(<vscale x 16 x float> %x, <vscale x 16 x i1> %m, i32 zeroext %evl) {
106+
; RV32-LABEL: lrint_nxv16f32:
107+
; RV32: # %bb.0:
108+
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
109+
; RV32-NEXT: vfcvt.x.f.v v8, v8, v0.t
110+
; RV32-NEXT: ret
111+
;
112+
; RV64-i32-LABEL: lrint_nxv16f32:
113+
; RV64-i32: # %bb.0:
114+
; RV64-i32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
115+
; RV64-i32-NEXT: vfcvt.x.f.v v8, v8, v0.t
116+
; RV64-i32-NEXT: ret
117+
;
118+
; RV64-i64-LABEL: lrint_nxv16f32:
119+
; RV64-i64: # %bb.0:
120+
; RV64-i64-NEXT: vmv1r.v v24, v0
121+
; RV64-i64-NEXT: csrr a1, vlenb
122+
; RV64-i64-NEXT: srli a2, a1, 3
123+
; RV64-i64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
124+
; RV64-i64-NEXT: vslidedown.vx v0, v0, a2
125+
; RV64-i64-NEXT: sub a2, a0, a1
126+
; RV64-i64-NEXT: sltu a3, a0, a2
127+
; RV64-i64-NEXT: addi a3, a3, -1
128+
; RV64-i64-NEXT: and a2, a3, a2
129+
; RV64-i64-NEXT: vsetvli zero, a2, e32, m4, ta, ma
130+
; RV64-i64-NEXT: vfwcvt.x.f.v v16, v12, v0.t
131+
; RV64-i64-NEXT: bltu a0, a1, .LBB4_2
132+
; RV64-i64-NEXT: # %bb.1:
133+
; RV64-i64-NEXT: mv a0, a1
134+
; RV64-i64-NEXT: .LBB4_2:
135+
; RV64-i64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
136+
; RV64-i64-NEXT: vmv1r.v v0, v24
137+
; RV64-i64-NEXT: vfwcvt.x.f.v v24, v8, v0.t
138+
; RV64-i64-NEXT: vmv8r.v v8, v24
139+
; RV64-i64-NEXT: ret
106140
%a = call <vscale x 16 x iXLen> @llvm.vp.lrint.nxv16iXLen.nxv16f32(<vscale x 16 x float> %x, <vscale x 16 x i1> %m, i32 %evl)
107141
ret <vscale x 16 x iXLen> %a
108142
}

0 commit comments

Comments
 (0)