Skip to content

Commit f151a36

Browse files
authored
[RISCV] Disable slideup optimization on the inconsistent element type of EVec and ContainerVT (#159373)
Fixes #159294 The element type of EVecContainerVT and ContainerVT can be different after promoting integer types. This patch disables the slideup optimization in that case.
1 parent e60ca86 commit f151a36

File tree

2 files changed

+325
-0
lines changed

2 files changed

+325
-0
lines changed

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4564,6 +4564,14 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
45644564
break;
45654565
}
45664566

4567+
// Do not slideup if the element type of EVec is different.
4568+
if (SlideUp) {
4569+
MVT EVecEltVT = EVec.getSimpleValueType().getVectorElementType();
4570+
MVT ContainerEltVT = ContainerVT.getVectorElementType();
4571+
if (EVecEltVT != ContainerEltVT)
4572+
SlideUp = false;
4573+
}
4574+
45674575
if (SlideUp) {
45684576
MVT EVecContainerVT = EVec.getSimpleValueType();
45694577
// Make sure the original vector has scalable vector type.

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll

Lines changed: 317 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3597,5 +3597,322 @@ define <4 x i32> @buildvec_vredmax_slideup(<8 x i32> %arg0, <8 x i32> %arg1, <8
35973597
ret <4 x i32> %255
35983598
}
35993599

3600+
define <16 x i16> @PR159294(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) {
3601+
; RV32-ONLY-LABEL: PR159294:
3602+
; RV32-ONLY: # %bb.0: # %entry
3603+
; RV32-ONLY-NEXT: vsetivli zero, 1, e32, m1, ta, ma
3604+
; RV32-ONLY-NEXT: vmv.x.s a0, v8
3605+
; RV32-ONLY-NEXT: vmv.x.s a1, v9
3606+
; RV32-ONLY-NEXT: vmv.x.s a2, v10
3607+
; RV32-ONLY-NEXT: vsetivli zero, 16, e16, m2, ta, ma
3608+
; RV32-ONLY-NEXT: vmv.v.x v8, a2
3609+
; RV32-ONLY-NEXT: vslide1down.vx v8, v8, a0
3610+
; RV32-ONLY-NEXT: vslide1down.vx v8, v8, a1
3611+
; RV32-ONLY-NEXT: vslidedown.vi v8, v8, 13
3612+
; RV32-ONLY-NEXT: ret
3613+
;
3614+
; RV32VB-LABEL: PR159294:
3615+
; RV32VB: # %bb.0: # %entry
3616+
; RV32VB-NEXT: vsetivli zero, 8, e32, m2, ta, ma
3617+
; RV32VB-NEXT: vmv.x.s a0, v8
3618+
; RV32VB-NEXT: vmv.x.s a1, v10
3619+
; RV32VB-NEXT: slli a0, a0, 16
3620+
; RV32VB-NEXT: zext.h a1, a1
3621+
; RV32VB-NEXT: or a0, a1, a0
3622+
; RV32VB-NEXT: vmv.x.s a1, v9
3623+
; RV32VB-NEXT: vmv.v.i v8, 0
3624+
; RV32VB-NEXT: zext.h a1, a1
3625+
; RV32VB-NEXT: vsetvli zero, zero, e32, m2, tu, ma
3626+
; RV32VB-NEXT: vmv.s.x v8, a0
3627+
; RV32VB-NEXT: vmv.s.x v10, a1
3628+
; RV32VB-NEXT: vsetivli zero, 2, e32, m1, tu, ma
3629+
; RV32VB-NEXT: vslideup.vi v8, v10, 1
3630+
; RV32VB-NEXT: ret
3631+
;
3632+
; RV32VB-PACK-LABEL: PR159294:
3633+
; RV32VB-PACK: # %bb.0: # %entry
3634+
; RV32VB-PACK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
3635+
; RV32VB-PACK-NEXT: vmv.x.s a0, v8
3636+
; RV32VB-PACK-NEXT: vmv.x.s a1, v10
3637+
; RV32VB-PACK-NEXT: vmv.x.s a2, v9
3638+
; RV32VB-PACK-NEXT: pack a0, a1, a0
3639+
; RV32VB-PACK-NEXT: pack a1, a0, a0
3640+
; RV32VB-PACK-NEXT: vmv.v.x v8, a1
3641+
; RV32VB-PACK-NEXT: pack a1, a2, a0
3642+
; RV32VB-PACK-NEXT: vsetvli zero, zero, e32, m2, tu, ma
3643+
; RV32VB-PACK-NEXT: vmv.s.x v8, a0
3644+
; RV32VB-PACK-NEXT: vmv.s.x v10, a1
3645+
; RV32VB-PACK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
3646+
; RV32VB-PACK-NEXT: vslideup.vi v8, v10, 1
3647+
; RV32VB-PACK-NEXT: ret
3648+
;
3649+
; RV64V-ONLY-LABEL: PR159294:
3650+
; RV64V-ONLY: # %bb.0: # %entry
3651+
; RV64V-ONLY-NEXT: vsetivli zero, 1, e32, m1, ta, ma
3652+
; RV64V-ONLY-NEXT: vmv.x.s a0, v8
3653+
; RV64V-ONLY-NEXT: vmv.x.s a1, v9
3654+
; RV64V-ONLY-NEXT: vmv.x.s a2, v10
3655+
; RV64V-ONLY-NEXT: vsetivli zero, 16, e16, m2, ta, ma
3656+
; RV64V-ONLY-NEXT: vmv.v.x v8, a2
3657+
; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a0
3658+
; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a1
3659+
; RV64V-ONLY-NEXT: vslidedown.vi v8, v8, 13
3660+
; RV64V-ONLY-NEXT: ret
3661+
;
3662+
; RVA22U64-LABEL: PR159294:
3663+
; RVA22U64: # %bb.0: # %entry
3664+
; RVA22U64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
3665+
; RVA22U64-NEXT: vmv.x.s a0, v8
3666+
; RVA22U64-NEXT: vmv.x.s a1, v10
3667+
; RVA22U64-NEXT: slli a0, a0, 16
3668+
; RVA22U64-NEXT: zext.h a1, a1
3669+
; RVA22U64-NEXT: or a0, a0, a1
3670+
; RVA22U64-NEXT: vmv.x.s a1, v9
3671+
; RVA22U64-NEXT: vmv.v.i v8, 0
3672+
; RVA22U64-NEXT: zext.h a1, a1
3673+
; RVA22U64-NEXT: vsetvli zero, zero, e32, m2, tu, ma
3674+
; RVA22U64-NEXT: vmv.s.x v8, a0
3675+
; RVA22U64-NEXT: vmv.s.x v10, a1
3676+
; RVA22U64-NEXT: vsetivli zero, 2, e32, m1, tu, ma
3677+
; RVA22U64-NEXT: vslideup.vi v8, v10, 1
3678+
; RVA22U64-NEXT: ret
3679+
;
3680+
; RVA22U64-PACK-LABEL: PR159294:
3681+
; RVA22U64-PACK: # %bb.0: # %entry
3682+
; RVA22U64-PACK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
3683+
; RVA22U64-PACK-NEXT: vmv.x.s a0, v8
3684+
; RVA22U64-PACK-NEXT: vmv.x.s a1, v10
3685+
; RVA22U64-PACK-NEXT: vmv.x.s a2, v9
3686+
; RVA22U64-PACK-NEXT: packw a0, a1, a0
3687+
; RVA22U64-PACK-NEXT: packw a1, a0, a0
3688+
; RVA22U64-PACK-NEXT: vmv.v.x v8, a1
3689+
; RVA22U64-PACK-NEXT: packw a1, a2, a0
3690+
; RVA22U64-PACK-NEXT: vsetvli zero, zero, e32, m2, tu, ma
3691+
; RVA22U64-PACK-NEXT: vmv.s.x v8, a0
3692+
; RVA22U64-PACK-NEXT: vmv.s.x v10, a1
3693+
; RVA22U64-PACK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
3694+
; RVA22U64-PACK-NEXT: vslideup.vi v8, v10, 1
3695+
; RVA22U64-PACK-NEXT: ret
3696+
;
3697+
; RV64ZVE32-LABEL: PR159294:
3698+
; RV64ZVE32: # %bb.0: # %entry
3699+
; RV64ZVE32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
3700+
; RV64ZVE32-NEXT: vmv.x.s a0, v8
3701+
; RV64ZVE32-NEXT: vmv.x.s a1, v9
3702+
; RV64ZVE32-NEXT: vmv.x.s a2, v10
3703+
; RV64ZVE32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
3704+
; RV64ZVE32-NEXT: vmv.v.x v8, a2
3705+
; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a0
3706+
; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a1
3707+
; RV64ZVE32-NEXT: vslidedown.vi v8, v8, 13
3708+
; RV64ZVE32-NEXT: ret
3709+
entry:
3710+
%vecext3 = extractelement <2 x i32> %a, i32 0
3711+
%conv4 = trunc i32 %vecext3 to i16
3712+
%vecinit5 = insertelement <16 x i16> <i16 0, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison>, i16 %conv4, i32 1
3713+
%vecext7 = extractelement <2 x i32> %b, i32 0
3714+
%conv8 = trunc i32 %vecext7 to i16
3715+
%vecinit9 = insertelement <16 x i16> %vecinit5, i16 %conv8, i32 2
3716+
%vecext59 = extractelement <2 x i32> %c, i32 0
3717+
%conv60 = trunc i32 %vecext59 to i16
3718+
%vecinit61 = insertelement <16 x i16> %vecinit9, i16 %conv60, i32 0
3719+
ret <16 x i16> %vecinit61
3720+
}
3721+
3722+
define <16 x i32> @PR159294_zext(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
3723+
; RV32-LABEL: PR159294_zext:
3724+
; RV32: # %bb.0: # %entry
3725+
; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
3726+
; RV32-NEXT: vmv.x.s a0, v8
3727+
; RV32-NEXT: vmv.x.s a1, v9
3728+
; RV32-NEXT: vmv.x.s a2, v10
3729+
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
3730+
; RV32-NEXT: vmv.v.x v8, a2
3731+
; RV32-NEXT: lui a2, 16
3732+
; RV32-NEXT: vslide1down.vx v8, v8, a0
3733+
; RV32-NEXT: vslide1down.vx v8, v8, a1
3734+
; RV32-NEXT: vslidedown.vi v8, v8, 13
3735+
; RV32-NEXT: addi a2, a2, -1
3736+
; RV32-NEXT: vand.vx v8, v8, a2
3737+
; RV32-NEXT: ret
3738+
;
3739+
; RV64V-ONLY-LABEL: PR159294_zext:
3740+
; RV64V-ONLY: # %bb.0: # %entry
3741+
; RV64V-ONLY-NEXT: vsetivli zero, 1, e16, m1, ta, ma
3742+
; RV64V-ONLY-NEXT: vmv.x.s a0, v8
3743+
; RV64V-ONLY-NEXT: lui a1, 16
3744+
; RV64V-ONLY-NEXT: vmv.x.s a2, v9
3745+
; RV64V-ONLY-NEXT: vmv.x.s a3, v10
3746+
; RV64V-ONLY-NEXT: addi a1, a1, -1
3747+
; RV64V-ONLY-NEXT: and a0, a0, a1
3748+
; RV64V-ONLY-NEXT: and a2, a2, a1
3749+
; RV64V-ONLY-NEXT: and a1, a3, a1
3750+
; RV64V-ONLY-NEXT: vsetivli zero, 16, e32, m4, ta, ma
3751+
; RV64V-ONLY-NEXT: vmv.v.x v8, a1
3752+
; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a0
3753+
; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a2
3754+
; RV64V-ONLY-NEXT: vslidedown.vi v8, v8, 13
3755+
; RV64V-ONLY-NEXT: ret
3756+
;
3757+
; RVA22U64-LABEL: PR159294_zext:
3758+
; RVA22U64: # %bb.0: # %entry
3759+
; RVA22U64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
3760+
; RVA22U64-NEXT: vmv.x.s a0, v8
3761+
; RVA22U64-NEXT: vmv.x.s a1, v10
3762+
; RVA22U64-NEXT: slli a0, a0, 48
3763+
; RVA22U64-NEXT: zext.h a1, a1
3764+
; RVA22U64-NEXT: srli a0, a0, 16
3765+
; RVA22U64-NEXT: or a0, a0, a1
3766+
; RVA22U64-NEXT: vmv.x.s a1, v9
3767+
; RVA22U64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
3768+
; RVA22U64-NEXT: vmv.v.i v8, 0
3769+
; RVA22U64-NEXT: zext.h a1, a1
3770+
; RVA22U64-NEXT: vsetvli zero, zero, e64, m4, tu, ma
3771+
; RVA22U64-NEXT: vmv.s.x v8, a0
3772+
; RVA22U64-NEXT: vmv.s.x v12, a1
3773+
; RVA22U64-NEXT: vsetivli zero, 2, e64, m1, tu, ma
3774+
; RVA22U64-NEXT: vslideup.vi v8, v12, 1
3775+
; RVA22U64-NEXT: ret
3776+
;
3777+
; RVA22U64-PACK-LABEL: PR159294_zext:
3778+
; RVA22U64-PACK: # %bb.0: # %entry
3779+
; RVA22U64-PACK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
3780+
; RVA22U64-PACK-NEXT: vmv1r.v v12, v9
3781+
; RVA22U64-PACK-NEXT: vmv.x.s a0, v8
3782+
; RVA22U64-PACK-NEXT: vmv.x.s a1, v10
3783+
; RVA22U64-PACK-NEXT: pack a2, a0, a0
3784+
; RVA22U64-PACK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
3785+
; RVA22U64-PACK-NEXT: vmv.v.x v8, a2
3786+
; RVA22U64-PACK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
3787+
; RVA22U64-PACK-NEXT: vmv.x.s a2, v12
3788+
; RVA22U64-PACK-NEXT: zext.h a0, a0
3789+
; RVA22U64-PACK-NEXT: zext.h a1, a1
3790+
; RVA22U64-PACK-NEXT: zext.h a2, a2
3791+
; RVA22U64-PACK-NEXT: pack a0, a1, a0
3792+
; RVA22U64-PACK-NEXT: pack a1, a2, a0
3793+
; RVA22U64-PACK-NEXT: vsetvli zero, zero, e64, m4, tu, ma
3794+
; RVA22U64-PACK-NEXT: vmv.s.x v8, a0
3795+
; RVA22U64-PACK-NEXT: vmv.s.x v12, a1
3796+
; RVA22U64-PACK-NEXT: vsetivli zero, 2, e64, m1, tu, ma
3797+
; RVA22U64-PACK-NEXT: vslideup.vi v8, v12, 1
3798+
; RVA22U64-PACK-NEXT: ret
3799+
;
3800+
; RV64ZVE32-LABEL: PR159294_zext:
3801+
; RV64ZVE32: # %bb.0: # %entry
3802+
; RV64ZVE32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
3803+
; RV64ZVE32-NEXT: vmv.x.s a0, v8
3804+
; RV64ZVE32-NEXT: lui a1, 16
3805+
; RV64ZVE32-NEXT: vmv.x.s a2, v9
3806+
; RV64ZVE32-NEXT: vmv.x.s a3, v10
3807+
; RV64ZVE32-NEXT: addi a1, a1, -1
3808+
; RV64ZVE32-NEXT: and a0, a0, a1
3809+
; RV64ZVE32-NEXT: and a2, a2, a1
3810+
; RV64ZVE32-NEXT: and a1, a3, a1
3811+
; RV64ZVE32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
3812+
; RV64ZVE32-NEXT: vmv.v.x v8, a1
3813+
; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a0
3814+
; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a2
3815+
; RV64ZVE32-NEXT: vslidedown.vi v8, v8, 13
3816+
; RV64ZVE32-NEXT: ret
3817+
entry:
3818+
%vecext3 = extractelement <2 x i16> %a, i32 0
3819+
%conv4 = zext i16 %vecext3 to i32
3820+
%vecinit5 = insertelement <16 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>, i32 %conv4, i32 1
3821+
%vecext7 = extractelement <2 x i16> %b, i32 0
3822+
%conv8 = zext i16 %vecext7 to i32
3823+
%vecinit9 = insertelement <16 x i32> %vecinit5, i32 %conv8, i32 2
3824+
%vecext59 = extractelement <2 x i16> %c, i32 0
3825+
%conv60 = zext i16 %vecext59 to i32
3826+
%vecinit61 = insertelement <16 x i32> %vecinit9, i32 %conv60, i32 0
3827+
ret <16 x i32> %vecinit61
3828+
}
3829+
3830+
define <16 x i32> @PR159294_sext(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
3831+
; RV32-LABEL: PR159294_sext:
3832+
; RV32: # %bb.0: # %entry
3833+
; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
3834+
; RV32-NEXT: vmv.x.s a0, v8
3835+
; RV32-NEXT: vmv.x.s a1, v9
3836+
; RV32-NEXT: vmv.x.s a2, v10
3837+
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
3838+
; RV32-NEXT: vmv.v.x v8, a2
3839+
; RV32-NEXT: vslide1down.vx v8, v8, a0
3840+
; RV32-NEXT: vslide1down.vx v8, v8, a1
3841+
; RV32-NEXT: vslidedown.vi v8, v8, 13
3842+
; RV32-NEXT: ret
3843+
;
3844+
; RV64V-ONLY-LABEL: PR159294_sext:
3845+
; RV64V-ONLY: # %bb.0: # %entry
3846+
; RV64V-ONLY-NEXT: vsetivli zero, 1, e16, m1, ta, ma
3847+
; RV64V-ONLY-NEXT: vmv.x.s a0, v8
3848+
; RV64V-ONLY-NEXT: vmv.x.s a1, v9
3849+
; RV64V-ONLY-NEXT: vmv.x.s a2, v10
3850+
; RV64V-ONLY-NEXT: vsetivli zero, 16, e32, m4, ta, ma
3851+
; RV64V-ONLY-NEXT: vmv.v.x v8, a2
3852+
; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a0
3853+
; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a1
3854+
; RV64V-ONLY-NEXT: vslidedown.vi v8, v8, 13
3855+
; RV64V-ONLY-NEXT: ret
3856+
;
3857+
; RVA22U64-LABEL: PR159294_sext:
3858+
; RVA22U64: # %bb.0: # %entry
3859+
; RVA22U64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
3860+
; RVA22U64-NEXT: vmv.x.s a0, v8
3861+
; RVA22U64-NEXT: vmv.x.s a1, v10
3862+
; RVA22U64-NEXT: slli a0, a0, 32
3863+
; RVA22U64-NEXT: add.uw a0, a1, a0
3864+
; RVA22U64-NEXT: vmv.x.s a1, v9
3865+
; RVA22U64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
3866+
; RVA22U64-NEXT: vmv.v.i v8, 0
3867+
; RVA22U64-NEXT: zext.w a1, a1
3868+
; RVA22U64-NEXT: vsetvli zero, zero, e64, m4, tu, ma
3869+
; RVA22U64-NEXT: vmv.s.x v8, a0
3870+
; RVA22U64-NEXT: vmv.s.x v12, a1
3871+
; RVA22U64-NEXT: vsetivli zero, 2, e64, m1, tu, ma
3872+
; RVA22U64-NEXT: vslideup.vi v8, v12, 1
3873+
; RVA22U64-NEXT: ret
3874+
;
3875+
; RVA22U64-PACK-LABEL: PR159294_sext:
3876+
; RVA22U64-PACK: # %bb.0: # %entry
3877+
; RVA22U64-PACK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
3878+
; RVA22U64-PACK-NEXT: vmv.x.s a0, v8
3879+
; RVA22U64-PACK-NEXT: vmv.x.s a1, v10
3880+
; RVA22U64-PACK-NEXT: vmv.x.s a2, v9
3881+
; RVA22U64-PACK-NEXT: pack a0, a1, a0
3882+
; RVA22U64-PACK-NEXT: pack a1, a0, a0
3883+
; RVA22U64-PACK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
3884+
; RVA22U64-PACK-NEXT: vmv.v.x v8, a1
3885+
; RVA22U64-PACK-NEXT: pack a1, a2, a0
3886+
; RVA22U64-PACK-NEXT: vsetvli zero, zero, e64, m4, tu, ma
3887+
; RVA22U64-PACK-NEXT: vmv.s.x v8, a0
3888+
; RVA22U64-PACK-NEXT: vmv.s.x v12, a1
3889+
; RVA22U64-PACK-NEXT: vsetivli zero, 2, e64, m1, tu, ma
3890+
; RVA22U64-PACK-NEXT: vslideup.vi v8, v12, 1
3891+
; RVA22U64-PACK-NEXT: ret
3892+
;
3893+
; RV64ZVE32-LABEL: PR159294_sext:
3894+
; RV64ZVE32: # %bb.0: # %entry
3895+
; RV64ZVE32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
3896+
; RV64ZVE32-NEXT: vmv.x.s a0, v8
3897+
; RV64ZVE32-NEXT: vmv.x.s a1, v9
3898+
; RV64ZVE32-NEXT: vmv.x.s a2, v10
3899+
; RV64ZVE32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
3900+
; RV64ZVE32-NEXT: vmv.v.x v8, a2
3901+
; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a0
3902+
; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a1
3903+
; RV64ZVE32-NEXT: vslidedown.vi v8, v8, 13
3904+
; RV64ZVE32-NEXT: ret
3905+
entry:
3906+
%vecext3 = extractelement <2 x i16> %a, i32 0
3907+
%conv4 = sext i16 %vecext3 to i32
3908+
%vecinit5 = insertelement <16 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>, i32 %conv4, i32 1
3909+
%vecext7 = extractelement <2 x i16> %b, i32 0
3910+
%conv8 = sext i16 %vecext7 to i32
3911+
%vecinit9 = insertelement <16 x i32> %vecinit5, i32 %conv8, i32 2
3912+
%vecext59 = extractelement <2 x i16> %c, i32 0
3913+
%conv60 = sext i16 %vecext59 to i32
3914+
%vecinit61 = insertelement <16 x i32> %vecinit9, i32 %conv60, i32 0
3915+
ret <16 x i32> %vecinit61
3916+
}
36003917
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
36013918
; RV64: {{.*}}

0 commit comments

Comments
 (0)