diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll index 8a71cd0826672..e316c022727ab 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll @@ -347,6 +347,629 @@ define {, } @vector_deinterleave_nxv8i64_nxv ret {, } %retval } +define {, , } @vector_deinterleave_nxv16i1_nxv48i1( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv48i1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmv1r.v v8, v0 +; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: vmerge.vim v16, v10, 1, v0 +; CHECK-NEXT: srli a1, a0, 2 +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a1 +; CHECK-NEXT: srli a0, a0, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; CHECK-NEXT: vmerge.vim v18, v10, 1, v0 +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v8, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; CHECK-NEXT: vmerge.vim v20, v10, 1, v0 +; CHECK-NEXT: vs8r.v v16, (a0) +; CHECK-NEXT: vlseg3e8.v v8, (a0) +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: vmsne.vi v8, v10, 0 +; CHECK-NEXT: vmsne.vi v9, v12, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , } @llvm.vector.deinterleave3.nxv48i1( %vec) + ret {, , } %retval +} + + +define {, , } @vector_deinterleave_nxv16i8_nxv48i8( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv48i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; CHECK-NEXT: vlseg3e8.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , } @llvm.vector.deinterleave3.nxv48i8( %vec) + ret {, , } %retval +} + + +define {, , } @vector_deinterleave_nxv8i16_nxv24i16( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv24i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , } @llvm.vector.deinterleave3.nxv24i16( %vec) + ret {, , } %retval +} + + +define {, , } @vector_deinterleave_nxv4i32_nxv12i32( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv12i32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vlseg3e32.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , } @llvm.vector.deinterleave3.nxv12i32( %vec) + ret {, , } %retval +} + + +define {, , } @vector_deinterleave_nxv2i64_nxv6i64( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv6i64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-NEXT: vlseg3e64.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , } @llvm.vector.deinterleave3.nxv6i64( %vec) + ret {, , } %retval +} + +define {, , , , } @vector_deinterleave_nxv16i1_nxv80i1( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv80i1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 +; CHECK-NEXT: srli a1, a0, 2 +; CHECK-NEXT: srli a2, a0, 1 +; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a1 +; CHECK-NEXT: srli a1, a0, 3 +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: vsetvli a3, zero, e8, m2, ta, ma +; CHECK-NEXT: vmerge.vim v18, v12, 1, v0 +; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v9, a2 +; CHECK-NEXT: sub a0, a0, a1 +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; CHECK-NEXT: vmerge.vim v20, v12, 1, v0 +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v9, a0 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmerge.vim v14, v12, 1, v0 +; CHECK-NEXT: vmv1r.v v10, v15 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 +; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vmv1r.v v8, v21 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vmv1r.v v9, v14 +; CHECK-NEXT: vs8r.v v16, (a0) +; CHECK-NEXT: vmv1r.v v12, v13 +; CHECK-NEXT: vs8r.v v8, (a1) +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma +; CHECK-NEXT: vlseg5e8.v v8, (a0) +; CHECK-NEXT: vlseg5e8.v v14, (a1) +; CHECK-NEXT: vmv2r.v v20, v8 +; CHECK-NEXT: vmv2r.v v22, v10 +; CHECK-NEXT: vmv1r.v v21, v14 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmsne.vi v0, v20, 0 +; CHECK-NEXT: vmv1r.v v14, v9 +; CHECK-NEXT: vmsne.vi v8, v14, 0 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vmsne.vi v9, v22, 0 +; CHECK-NEXT: vmv1r.v v16, v11 +; CHECK-NEXT: vmsne.vi v10, v16, 0 +; CHECK-NEXT: vmv1r.v v13, v18 +; CHECK-NEXT: vmsne.vi v11, v12, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , , , } @llvm.vector.deinterleave5.nxv80i1( %vec) + ret {, , , , } %retval +} + + +define {, , , , } @vector_deinterleave_nxv16i8_nxv80i8( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv80i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v26, v15 +; CHECK-NEXT: vmv1r.v v27, v16 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vmv1r.v v24, v13 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vmv1r.v v25, v14 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vmv1r.v v28, v17 +; CHECK-NEXT: vs8r.v v24, (a1) +; CHECK-NEXT: vlseg5e8.v v12, (a0) +; CHECK-NEXT: vlseg5e8.v v18, (a1) +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv1r.v v9, v18 +; CHECK-NEXT: vmv1r.v v18, v13 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vmv1r.v v13, v20 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v17, v22 +; CHECK-NEXT: vmv2r.v v10, v18 +; CHECK-NEXT: vmv2r.v v14, v20 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , , , } @llvm.vector.deinterleave5.nxv80i8( %vec) + ret {, , , , } %retval +} + + +define {, , , , } @vector_deinterleave_nxv8i16_nxv40i16( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv40i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv1r.v v26, v15 +; CHECK-NEXT: vmv1r.v v27, v16 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vmv1r.v v24, v13 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vmv1r.v v25, v14 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vmv1r.v v28, v17 +; CHECK-NEXT: vs8r.v v24, (a1) +; CHECK-NEXT: vlseg5e16.v v12, (a0) +; CHECK-NEXT: vlseg5e16.v v18, (a1) +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv1r.v v9, v18 +; CHECK-NEXT: vmv1r.v v18, v13 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vmv1r.v v13, v20 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v17, v22 +; CHECK-NEXT: vmv2r.v v10, v18 +; CHECK-NEXT: vmv2r.v v14, v20 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , , , } @llvm.vector.deinterleave5.nxv40i16( %vec) + ret {, , , , } %retval +} + + +define {, , , , } @vector_deinterleave_nxv4i32_nxv20i32( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv20i32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vmv1r.v v26, v15 +; CHECK-NEXT: vmv1r.v v27, v16 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vmv1r.v v24, v13 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vmv1r.v v25, v14 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vmv1r.v v28, v17 +; CHECK-NEXT: vs8r.v v24, (a1) +; CHECK-NEXT: vlseg5e32.v v12, (a0) +; CHECK-NEXT: vlseg5e32.v v18, (a1) +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv1r.v v9, v18 +; CHECK-NEXT: vmv1r.v v18, v13 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vmv1r.v v13, v20 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v17, v22 +; CHECK-NEXT: vmv2r.v v10, v18 +; CHECK-NEXT: vmv2r.v v14, v20 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , , , } @llvm.vector.deinterleave5.nxv20i32( %vec) + ret {, , , , } %retval +} + + +define {, , , , } @vector_deinterleave_nxv2i64_nxv10i64( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv10i64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vmv1r.v v26, v15 +; CHECK-NEXT: vmv1r.v v27, v16 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vmv1r.v v24, v13 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vmv1r.v v25, v14 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vmv1r.v v28, v17 +; CHECK-NEXT: vs8r.v v24, (a1) +; CHECK-NEXT: vlseg5e64.v v12, (a0) +; CHECK-NEXT: vlseg5e64.v v18, (a1) +; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv1r.v v9, v18 +; CHECK-NEXT: vmv1r.v v18, v13 +; CHECK-NEXT: vmv2r.v v12, v14 +; CHECK-NEXT: vmv1r.v v13, v20 +; CHECK-NEXT: vmv1r.v v20, v15 +; CHECK-NEXT: vmv1r.v v17, v22 +; CHECK-NEXT: vmv2r.v v10, v18 +; CHECK-NEXT: vmv2r.v v14, v20 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , , , } @llvm.vector.deinterleave5.nxv10i64( %vec) + ret {, , , , } %retval +} + +define {, , , , , , } @vector_deinterleave_nxv16i1_nxv112i1( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv112i1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 +; CHECK-NEXT: srli a1, a0, 2 +; CHECK-NEXT: srli a2, a0, 1 +; CHECK-NEXT: srli a3, a0, 3 +; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a1 +; CHECK-NEXT: slli a3, a3, 1 +; CHECK-NEXT: vsetvli a4, zero, e8, m2, ta, ma +; CHECK-NEXT: vmerge.vim v18, v12, 1, v0 +; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v9, a2 +; CHECK-NEXT: sub a0, a0, a3 +; CHECK-NEXT: vsetvli a3, zero, e8, m2, ta, ma +; CHECK-NEXT: vmerge.vim v20, v12, 1, v0 +; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v9, a0 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmerge.vim v22, v12, 1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v14, v12, 1, v0 +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v8, a1 +; CHECK-NEXT: vmv1r.v v10, v15 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmerge.vim v24, v12, 1, v0 +; CHECK-NEXT: vmv1r.v v11, v24 +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v8, a2 +; CHECK-NEXT: vmv1r.v v8, v23 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vmv1r.v v9, v14 +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; CHECK-NEXT: vmerge.vim v14, v12, 1, v0 +; CHECK-NEXT: vmv1r.v v12, v25 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vmv1r.v v13, v14 +; CHECK-NEXT: vs8r.v v16, (a0) +; CHECK-NEXT: vmv1r.v v14, v15 +; CHECK-NEXT: vs8r.v v8, (a1) +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma +; CHECK-NEXT: vlseg7e8.v v8, (a0) +; CHECK-NEXT: vlseg7e8.v v16, (a1) +; CHECK-NEXT: vmv2r.v v24, v8 +; CHECK-NEXT: vmv2r.v v26, v10 +; CHECK-NEXT: vmv2r.v v28, v12 +; CHECK-NEXT: vmv1r.v v25, v16 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmsne.vi v8, v16, 0 +; CHECK-NEXT: vmv1r.v v27, v18 +; CHECK-NEXT: vmsne.vi v9, v26, 0 +; CHECK-NEXT: vmv1r.v v18, v11 +; CHECK-NEXT: vmsne.vi v10, v18, 0 +; CHECK-NEXT: vmv1r.v v29, v20 +; CHECK-NEXT: vmsne.vi v11, v28, 0 +; CHECK-NEXT: vmv1r.v v20, v13 +; CHECK-NEXT: vmsne.vi v12, v20, 0 +; CHECK-NEXT: vmv1r.v v15, v22 +; CHECK-NEXT: vmsne.vi v13, v14, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , , , , , } @llvm.vector.deinterleave7.nxv112i1( %vec) + ret {, , , , , , } %retval +} + + +define {, , , , , , } @vector_deinterleave_nxv16i8_nxv112i8( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv112i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v30, v21 +; CHECK-NEXT: vmv1r.v v28, v19 +; CHECK-NEXT: vmv1r.v v29, v20 +; CHECK-NEXT: vmv1r.v v26, v17 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vmv1r.v v27, v18 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vmv1r.v v24, v15 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vmv1r.v v25, v16 +; CHECK-NEXT: vs8r.v v24, (a1) +; CHECK-NEXT: vlseg7e8.v v14, (a0) +; CHECK-NEXT: vlseg7e8.v v22, (a1) +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv1r.v v9, v22 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vmv2r.v v12, v16 +; CHECK-NEXT: vmv1r.v v13, v24 +; CHECK-NEXT: vmv1r.v v24, v17 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: vmv1r.v v17, v26 +; CHECK-NEXT: vmv1r.v v26, v19 +; CHECK-NEXT: vmv1r.v v21, v28 +; CHECK-NEXT: vmv2r.v v10, v22 +; CHECK-NEXT: vmv2r.v v14, v24 +; CHECK-NEXT: vmv2r.v v18, v26 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , , , , , } @llvm.vector.deinterleave7.nxv112i8( %vec) + ret {, , , , , , } %retval +} + + +define {, , , , , , } @vector_deinterleave_nxv8i16_nxv56i16( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv56i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv1r.v v30, v21 +; CHECK-NEXT: vmv1r.v v28, v19 +; CHECK-NEXT: vmv1r.v v29, v20 +; CHECK-NEXT: vmv1r.v v26, v17 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vmv1r.v v27, v18 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vmv1r.v v24, v15 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vmv1r.v v25, v16 +; CHECK-NEXT: vs8r.v v24, (a1) +; CHECK-NEXT: vlseg7e16.v v14, (a0) +; CHECK-NEXT: vlseg7e16.v v22, (a1) +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv1r.v v9, v22 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vmv2r.v v12, v16 +; CHECK-NEXT: vmv1r.v v13, v24 +; CHECK-NEXT: vmv1r.v v24, v17 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: vmv1r.v v17, v26 +; CHECK-NEXT: vmv1r.v v26, v19 +; CHECK-NEXT: vmv1r.v v21, v28 +; CHECK-NEXT: vmv2r.v v10, v22 +; CHECK-NEXT: vmv2r.v v14, v24 +; CHECK-NEXT: vmv2r.v v18, v26 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , , , , , } @llvm.vector.deinterleave7.nxv56i16( %vec) + ret {, , , , , , } %retval +} + + +define {, , , , , , } @vector_deinterleave_nxv4i32_nxv28i32( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv28i32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vmv1r.v v30, v21 +; CHECK-NEXT: vmv1r.v v28, v19 +; CHECK-NEXT: vmv1r.v v29, v20 +; CHECK-NEXT: vmv1r.v v26, v17 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vmv1r.v v27, v18 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vmv1r.v v24, v15 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vmv1r.v v25, v16 +; CHECK-NEXT: vs8r.v v24, (a1) +; CHECK-NEXT: vlseg7e32.v v14, (a0) +; CHECK-NEXT: vlseg7e32.v v22, (a1) +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv1r.v v9, v22 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vmv2r.v v12, v16 +; CHECK-NEXT: vmv1r.v v13, v24 +; CHECK-NEXT: vmv1r.v v24, v17 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: vmv1r.v v17, v26 +; CHECK-NEXT: vmv1r.v v26, v19 +; CHECK-NEXT: vmv1r.v v21, v28 +; CHECK-NEXT: vmv2r.v v10, v22 +; CHECK-NEXT: vmv2r.v v14, v24 +; CHECK-NEXT: vmv2r.v v18, v26 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , , , , , } @llvm.vector.deinterleave7.nxv28i32( %vec) + ret {, , , , , , } %retval +} + + +define {, , , , , , } @vector_deinterleave_nxv2i64_nxv14i64( %vec) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv14i64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vmv1r.v v30, v21 +; CHECK-NEXT: vmv1r.v v28, v19 +; CHECK-NEXT: vmv1r.v v29, v20 +; CHECK-NEXT: vmv1r.v v26, v17 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vmv1r.v v27, v18 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vmv1r.v v24, v15 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vmv1r.v v25, v16 +; CHECK-NEXT: vs8r.v v24, (a1) +; CHECK-NEXT: vlseg7e64.v v14, (a0) +; CHECK-NEXT: vlseg7e64.v v22, (a1) +; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv1r.v v9, v22 +; CHECK-NEXT: vmv1r.v v22, v15 +; CHECK-NEXT: vmv2r.v v12, v16 +; CHECK-NEXT: vmv1r.v v13, v24 +; CHECK-NEXT: vmv1r.v v24, v17 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: vmv1r.v v17, v26 +; CHECK-NEXT: vmv1r.v v26, v19 +; CHECK-NEXT: vmv1r.v v21, v28 +; CHECK-NEXT: vmv2r.v v10, v22 +; CHECK-NEXT: vmv2r.v v14, v24 +; CHECK-NEXT: vmv2r.v v18, v26 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %retval = call {, , , , , , } @llvm.vector.deinterleave7.nxv14i64( %vec) + ret {, , , , , , } %retval +} ; Floats @@ -692,46 +1315,53 @@ define {, } @vector_deinterleave_nxv8f ret {, } %retval } -define {, , } @vector_deinterleave_nxv16i1_nxv48i1( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv48i1: +define {, , } @vector_deinterleave_nxv2f16_nxv6f16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv6f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmv1r.v v8, v0 -; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vmerge.vim v16, v10, 1, v0 -; CHECK-NEXT: srli a1, a0, 2 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v0, a1 -; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vim v18, v10, 1, v0 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v8, a0 +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v10, v8, a0 +; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vim v20, v10, 1, v0 -; CHECK-NEXT: vs8r.v v16, (a0) -; CHECK-NEXT: vlseg3e8.v v8, (a0) -; CHECK-NEXT: vmsne.vi v0, v8, 0 -; CHECK-NEXT: vmsne.vi v8, v10, 0 -; CHECK-NEXT: vmsne.vi v9, v12, 0 +; CHECK-NEXT: vs2r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v8, (a0) ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret - %retval = call {, , } @llvm.vector.deinterleave3.nxv48i1( %vec) - ret {, , } %retval + %res = call {, , } @llvm.vector.deinterleave3.nxv6f16( %arg) + ret {, , } %res } +define {, , } @vector_deinterleave_nxv4f16_nxv12f16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv12f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs4r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , } @llvm.vector.deinterleave3.nxv12f16( %arg) + ret {, , } %res +} -define {, , } @vector_deinterleave_nxv16i8_nxv48i8( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv48i8: +define {, , } @vector_deinterleave_nxv8f16_nxv24f16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv24f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb @@ -739,20 +1369,64 @@ define {, , } @vector_dein ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma -; CHECK-NEXT: vlseg3e8.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vlseg3e16.v v8, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret - %retval = call {, , } @llvm.vector.deinterleave3.nxv48i8( %vec) - ret {, , } %retval + %res = call {, , } @llvm.vector.deinterleave3.nxv24f16( %arg) + ret {, , } %res +} + +define {, , } @vector_deinterleave_nxv2bf16_nxv6bf16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv6bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v10, v8, a0 +; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs2r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vlseg3e16.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , } @llvm.vector.deinterleave3.nxv6bf16( %arg) + ret {, , } %res } +define {, , } @vector_deinterleave_nxv4bf16_nxv12bf16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv12bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs4r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vlseg3e16.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , } @llvm.vector.deinterleave3.nxv12bf16( %arg) + ret {, , } %res +} -define {, , } @vector_deinterleave_nxv8i16_nxv24i16( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv24i16: +define {, , } @vector_deinterleave_nxv8bf16_nxv24bf16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv24bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb @@ -767,13 +1441,57 @@ define {, , } @vector_dein ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret - %retval = call {, , } @llvm.vector.deinterleave3.nxv24i16( %vec) - ret {, , } %retval + %res = call {, , } @llvm.vector.deinterleave3.nxv24bf16( %arg) + ret {, , } %res } +define {, , } @vector_deinterleave_nxv1f32_nxv3f32( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv1f32_nxv3f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 3 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v10, v8, a0 +; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs2r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-NEXT: vlseg3e32.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , } @llvm.vector.deinterleave3.nxv3f32( %arg) + ret {, , } %res +} -define {, , } @vector_deinterleave_nxv4i32_nxv12i32( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv12i32: +define {, , } @vector_deinterleave_nxv2f32_nxv6f32( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2f32_nxv6f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs4r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , } @llvm.vector.deinterleave3.nxv6f32( %arg) + ret {, , } %res +} + +define {, , } @vector_deinterleave_nxv4f32_nxv12f32( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv4f32_nxv12f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb @@ -788,13 +1506,32 @@ define {, , } @vector_dein ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret - %retval = call {, , } @llvm.vector.deinterleave3.nxv12i32( %vec) - ret {, , } %retval + %res = call {, , } @llvm.vector.deinterleave3.nxv12f32( %arg) + ret {, , } %res } +define {, , } @vector_deinterleave_nxv1f64_nxv3f64( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv1f64_nxv3f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs4r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-NEXT: vlseg3e64.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , } @llvm.vector.deinterleave3.nxv3f64( %arg) + ret {, , } %res +} -define {, , } @vector_deinterleave_nxv2i64_nxv6i64( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv6i64: +define {, , } @vector_deinterleave_nxv2f64_nxv6f64( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2f64_nxv6f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb @@ -809,87 +1546,65 @@ define {, , } @vector_dein ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret - %retval = call {, , } @llvm.vector.deinterleave3.nxv6i64( %vec) - ret {, , } %retval + %res = call {, , } @llvm.vector.deinterleave3.nxv6f64( %arg) + ret {, , } %res } -define {, , , , } @vector_deinterleave_nxv16i1_nxv80i1( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv80i1: +define {, , , , } @vector_deinterleave_nxv2f16_nxv10f16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv10f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 -; CHECK-NEXT: srli a1, a0, 2 -; CHECK-NEXT: srli a2, a0, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v0, a1 -; CHECK-NEXT: srli a1, a0, 3 -; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a3, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vim v18, v12, 1, v0 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v9, a2 -; CHECK-NEXT: sub a0, a0, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vim v20, v12, 1, v0 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v9, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vim v14, v12, 1, v0 -; CHECK-NEXT: vmv1r.v v10, v15 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 -; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v11, v9, a0 +; CHECK-NEXT: vslideup.vx v9, v11, a0 +; CHECK-NEXT: vslidedown.vx v11, v8, a0 +; CHECK-NEXT: vslideup.vx v8, v11, a0 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vmv1r.v v8, v21 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vmv1r.v v9, v14 -; CHECK-NEXT: vs8r.v v16, (a0) -; CHECK-NEXT: vmv1r.v v12, v13 -; CHECK-NEXT: vs8r.v v8, (a1) -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma -; CHECK-NEXT: vlseg5e8.v v8, (a0) -; CHECK-NEXT: vlseg5e8.v v14, (a1) -; CHECK-NEXT: vmv2r.v v20, v8 -; CHECK-NEXT: vmv2r.v v22, v10 -; CHECK-NEXT: vmv1r.v v21, v14 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmsne.vi v0, v20, 0 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmsne.vi v8, v14, 0 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vmsne.vi v9, v22, 0 -; CHECK-NEXT: vmv1r.v v16, v11 -; CHECK-NEXT: vmsne.vi v10, v16, 0 -; CHECK-NEXT: vmv1r.v v13, v18 -; CHECK-NEXT: vmsne.vi v11, v12, 0 +; CHECK-NEXT: vs4r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v8, (a0) ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret - %retval = call {, , , , } @llvm.vector.deinterleave5.nxv80i1( %vec) - ret {, , , , } %retval + %res = call {, , , , } @llvm.vector.deinterleave5.nxv10f16( %arg) + ret {, , , , } %res +} + +define {, , , , } @vector_deinterleave_nxv4f16_nxv20f16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv20f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , , , } @llvm.vector.deinterleave5.nxv20f16( %arg) + ret {, , , , } %res } - -define {, , , , } @vector_deinterleave_nxv16i8_nxv80i8( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv80i8: +define {, , , , } @vector_deinterleave_nxv8f16_nxv40f16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv40f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv1r.v v26, v15 ; CHECK-NEXT: vmv1r.v v27, v16 ; CHECK-NEXT: addi a0, sp, 16 @@ -902,8 +1617,8 @@ define {, , , , , , , , , , } @llvm.vector.deinterleave5.nxv80i8( %vec) - ret {, , , , } %retval + %res = call {, , , , } @llvm.vector.deinterleave5.nxv40f16( %arg) + ret {, , , , } %res } +define {, , , , } @vector_deinterleave_nxv2bf16_nxv10bf16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv10bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v11, v9, a0 +; CHECK-NEXT: vslideup.vx v9, v11, a0 +; CHECK-NEXT: vslidedown.vx v11, v8, a0 +; CHECK-NEXT: vslideup.vx v8, v11, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs4r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vlseg5e16.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , , , } @llvm.vector.deinterleave5.nxv10bf16( %arg) + ret {, , , , } %res +} -define {, , , , } @vector_deinterleave_nxv8i16_nxv40i16( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv40i16: +define {, , , , } @vector_deinterleave_nxv4bf16_nxv20bf16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv20bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vlseg5e16.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , , , } @llvm.vector.deinterleave5.nxv20bf16( %arg) + ret {, , , , } %res +} + +define {, , , , } @vector_deinterleave_nxv8bf16_nxv40bf16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv40bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb @@ -959,13 +1720,59 @@ define {, , , , , , , } @llvm.vector.deinterleave5.nxv40i16( %vec) - ret {, , , , } %retval + %res = call {, , , , } @llvm.vector.deinterleave5.nxv40bf16( %arg) + ret {, , , , } %res } +define {, , , , } @vector_deinterleave_nxv1f32_nxv5f32( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv1f32_nxv5f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 3 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v11, v9, a0 +; CHECK-NEXT: vslideup.vx v9, v11, a0 +; CHECK-NEXT: vslidedown.vx v11, v8, a0 +; CHECK-NEXT: vslideup.vx v8, v11, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs4r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-NEXT: vlseg5e32.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , , , } @llvm.vector.deinterleave5.nxv5f32( %arg) + ret {, , , , } %res +} -define {, , , , } @vector_deinterleave_nxv4i32_nxv20i32( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv20i32: +define {, , , , } @vector_deinterleave_nxv2f32_nxv10f32( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2f32_nxv10f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vlseg5e32.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , , , } @llvm.vector.deinterleave5.nxv10f32( %arg) + ret {, , , , } %res +} + +define {, , , , } @vector_deinterleave_nxv4f32_nxv20f32( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv4f32_nxv20f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb @@ -1000,13 +1807,32 @@ define {, , , , , , , } @llvm.vector.deinterleave5.nxv20i32( %vec) - ret {, , , , } %retval + %res = call {, , , , } @llvm.vector.deinterleave5.nxv20f32( %arg) + ret {, , , , } %res } +define {, , , , } @vector_deinterleave_nxv1f64_nxv5f64( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv1f64_nxv5f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-NEXT: vlseg5e64.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , , , } @llvm.vector.deinterleave5.nxv5f64( %arg) + ret {, , , , } %res +} -define {, , , , } @vector_deinterleave_nxv2i64_nxv10i64( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv10i64: +define {, , , , } @vector_deinterleave_nxv2f64_nxv10f64( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2f64_nxv10f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb @@ -1041,102 +1867,67 @@ define {, , , , , , , } @llvm.vector.deinterleave5.nxv10i64( %vec) - ret {, , , , } %retval + %res = call {, , , , } @llvm.vector.deinterleave5.nxv10f64( %arg) + ret {, , , , } %res } -define {, , , , , , } @vector_deinterleave_nxv16i1_nxv112i1( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv112i1: +define {, , , , , , } @vector_deinterleave_nxv2f16_nxv14f16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv14f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 -; CHECK-NEXT: srli a1, a0, 2 -; CHECK-NEXT: srli a2, a0, 1 -; CHECK-NEXT: srli a3, a0, 3 -; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v0, a1 -; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli a4, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vim v18, v12, 1, v0 -; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v9, a2 -; CHECK-NEXT: sub a0, a0, a3 -; CHECK-NEXT: vsetvli a3, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vim v20, v12, 1, v0 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v9, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vim v22, v12, 1, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmerge.vim v14, v12, 1, v0 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v8, a1 -; CHECK-NEXT: vmv1r.v v10, v15 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vim v24, v12, 1, v0 -; CHECK-NEXT: vmv1r.v v11, v24 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v8, a2 -; CHECK-NEXT: vmv1r.v v8, v23 +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v12, v9, a0 +; CHECK-NEXT: vslideup.vx v9, v12, a0 +; CHECK-NEXT: vslidedown.vx v12, v8, a0 +; CHECK-NEXT: vslideup.vx v8, v12, a0 +; CHECK-NEXT: vslidedown.vx v12, v10, a0 +; CHECK-NEXT: vslideup.vx v10, v12, a0 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vmv1r.v v9, v14 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma -; CHECK-NEXT: vmerge.vim v14, v12, 1, v0 -; CHECK-NEXT: vmv1r.v v12, v25 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vmv1r.v v13, v14 -; CHECK-NEXT: vs8r.v v16, (a0) -; CHECK-NEXT: vmv1r.v v14, v15 -; CHECK-NEXT: vs8r.v v8, (a1) -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma -; CHECK-NEXT: vlseg7e8.v v8, (a0) -; CHECK-NEXT: vlseg7e8.v v16, (a1) -; CHECK-NEXT: vmv2r.v v24, v8 -; CHECK-NEXT: vmv2r.v v26, v10 -; CHECK-NEXT: vmv2r.v v28, v12 -; CHECK-NEXT: vmv1r.v v25, v16 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vmsne.vi v8, v16, 0 -; CHECK-NEXT: vmv1r.v v27, v18 -; CHECK-NEXT: vmsne.vi v9, v26, 0 -; CHECK-NEXT: vmv1r.v v18, v11 -; CHECK-NEXT: vmsne.vi v10, v18, 0 -; CHECK-NEXT: vmv1r.v v29, v20 -; CHECK-NEXT: vmsne.vi v11, v28, 0 -; CHECK-NEXT: vmv1r.v v20, v13 -; CHECK-NEXT: vmsne.vi v12, v20, 0 -; CHECK-NEXT: vmv1r.v v15, v22 -; CHECK-NEXT: vmsne.vi v13, v14, 0 +; CHECK-NEXT: vs4r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vlseg7e16.v v8, (a0) ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret - %retval = call {, , , , , , } @llvm.vector.deinterleave7.nxv112i1( %vec) - ret {, , , , , , } %retval + %res = call {, , , , , , } @llvm.vector.deinterleave7.nxv14f16( %arg) + ret {, , , , , , } %res } +define {, , , , , , } @vector_deinterleave_nxv4f16_nxv28f16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv28f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , , , , , } @llvm.vector.deinterleave7.nxv28f16( %arg) + ret {, , , , , , } %res +} -define {, , , , , , } @vector_deinterleave_nxv16i8_nxv112i8( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv112i8: +define {, , , , , , } @vector_deinterleave_nxv8f16_nxv56f16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv56f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv1r.v v30, v21 ; CHECK-NEXT: vmv1r.v v28, v19 ; CHECK-NEXT: vmv1r.v v29, v20 @@ -1151,8 +1942,8 @@ define {, , , , , , , , , , , , } @llvm.vector.deinterleave7.nxv112i8( %vec) - ret {, , , , , , } %retval + %res = call {, , , , , , } @llvm.vector.deinterleave7.nxv56f16( %arg) + ret {, , , , , , } %res +} + +define {, , , , , , } @vector_deinterleave_nxv2bf16_nxv14bf16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv14bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v12, v9, a0 +; CHECK-NEXT: vslideup.vx v9, v12, a0 +; CHECK-NEXT: vslidedown.vx v12, v8, a0 +; CHECK-NEXT: vslideup.vx v8, v12, a0 +; CHECK-NEXT: vslidedown.vx v12, v10, a0 +; CHECK-NEXT: vslideup.vx v10, v12, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs4r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vlseg7e16.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , , , , , } @llvm.vector.deinterleave7.nxv14bf16( %arg) + ret {, , , , , , } %res } +define {, , , , , , } @vector_deinterleave_nxv4bf16_nxv28bf16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv28bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vlseg7e16.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , , , , , } @llvm.vector.deinterleave7.nxv28bf16( %arg) + ret {, , , , , , } %res +} -define {, , , , , , } @vector_deinterleave_nxv8i16_nxv56i16( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv56i16: +define {, , , , , , } @vector_deinterleave_nxv8bf16_nxv56bf16( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv56bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb @@ -1218,13 +2057,61 @@ define {, , , , , , , , , } @llvm.vector.deinterleave7.nxv56i16( %vec) - ret {, , , , , , } %retval + %res = call {, , , , , , } @llvm.vector.deinterleave7.nxv56bf16( %arg) + ret {, , , , , , } %res } +define {, , , , , , } @vector_deinterleave_nxv1f32_nxv7f32( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv1f32_nxv7f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 3 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v12, v9, a0 +; CHECK-NEXT: vslideup.vx v9, v12, a0 +; CHECK-NEXT: vslidedown.vx v12, v8, a0 +; CHECK-NEXT: vslideup.vx v8, v12, a0 +; CHECK-NEXT: vslidedown.vx v12, v10, a0 +; CHECK-NEXT: vslideup.vx v10, v12, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs4r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-NEXT: vlseg7e32.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , , , , , } @llvm.vector.deinterleave7.nxv7f32( %arg) + ret {, , , , , , } %res +} -define {, , , , , , } @vector_deinterleave_nxv4i32_nxv28i32( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv28i32: +define {, , , , , , } @vector_deinterleave_nxv2f32_nxv14f32( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2f32_nxv14f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vlseg7e32.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , , , , , } @llvm.vector.deinterleave7.nxv14f32( %arg) + ret {, , , , , , } %res +} + +define {, , , , , , } @vector_deinterleave_nxv4f32_nxv28f32( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv4f32_nxv28f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb @@ -1265,13 +2152,32 @@ define {, , , , , , , , , } @llvm.vector.deinterleave7.nxv28i32( %vec) - ret {, , , , , , } %retval + %res = call {, , , , , , } @llvm.vector.deinterleave7.nxv28f32( %arg) + ret {, , , , , , } %res } +define {, , , , , , } @vector_deinterleave_nxv1f64_nxv7f64( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv1f64_nxv7f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-NEXT: vlseg7e64.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call {, , , , , , } @llvm.vector.deinterleave7.nxv7f64( %arg) + ret {, , , , , , } %res +} -define {, , , , , , } @vector_deinterleave_nxv2i64_nxv14i64( %vec) nounwind { -; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv14i64: +define {, , , , , , } @vector_deinterleave_nxv2f64_nxv14f64( %arg) nounwind { +; CHECK-LABEL: vector_deinterleave_nxv2f64_nxv14f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb @@ -1312,6 +2218,6 @@ define {, , , , , , , , , } @llvm.vector.deinterleave7.nxv14i64( %vec) - ret {, , , , , , } %retval + %res = call {, , , , , , } @llvm.vector.deinterleave7.nxv14f64( %arg) + ret {, , , , , , } %res } diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll index 469263a3247ce..7347000bf5c71 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll @@ -446,587 +446,312 @@ define @vector_interleave_nxv16i64_nxv8i64( %res } - -; Floats - -define @vector_interleave_nxv4bf16_nxv2bf16( %a, %b) { -; V-LABEL: vector_interleave_nxv4bf16_nxv2bf16: -; V: # %bb.0: -; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; V-NEXT: vwaddu.vv v10, v8, v9 -; V-NEXT: li a0, -1 -; V-NEXT: csrr a1, vlenb -; V-NEXT: vwmaccu.vx v10, a0, v9 -; V-NEXT: srli a1, a1, 2 -; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; V-NEXT: vslidedown.vx v8, v10, a1 -; V-NEXT: add a0, a1, a1 -; V-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; V-NEXT: vslideup.vx v10, v8, a1 -; V-NEXT: vmv.v.v v8, v10 -; V-NEXT: ret +define @vector_interleave_nxv8i32_nxv4i32_poison( %a) { +; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vzext.vf2 v8, v12 +; CHECK-NEXT: ret ; -; ZVBB-LABEL: vector_interleave_nxv4bf16_nxv2bf16: +; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison: ; ZVBB: # %bb.0: -; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; ZVBB-NEXT: vwsll.vi v10, v9, 16 -; ZVBB-NEXT: csrr a0, vlenb -; ZVBB-NEXT: vwaddu.wv v10, v10, v8 -; ZVBB-NEXT: srli a0, a0, 2 -; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma -; ZVBB-NEXT: vslidedown.vx v8, v10, a0 -; ZVBB-NEXT: add a1, a0, a0 -; ZVBB-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; ZVBB-NEXT: vslideup.vx v10, v8, a0 -; ZVBB-NEXT: vmv.v.v v8, v10 +; ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; ZVBB-NEXT: vmv2r.v v12, v8 +; ZVBB-NEXT: vzext.vf2 v8, v12 ; ZVBB-NEXT: ret -; -; ZIP-LABEL: vector_interleave_nxv4bf16_nxv2bf16: -; ZIP: # %bb.0: -; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; ZIP-NEXT: ri.vzip2b.vv v11, v8, v9 -; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9 -; ZIP-NEXT: csrr a0, vlenb -; ZIP-NEXT: srli a0, a0, 2 -; ZIP-NEXT: add a1, a0, a0 -; ZIP-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; ZIP-NEXT: vslideup.vx v10, v11, a0 -; ZIP-NEXT: vmv.v.v v8, v10 -; ZIP-NEXT: ret - %res = call @llvm.vector.interleave2.nxv4bf16( %a, %b) - ret %res + %res = call @llvm.vector.interleave2.nxv8i32( %a, poison) + ret %res } -define @vector_interleave_nxv8bf16_nxv4bf16( %a, %b) { -; V-LABEL: vector_interleave_nxv8bf16_nxv4bf16: -; V: # %bb.0: -; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; V-NEXT: vmv1r.v v10, v9 -; V-NEXT: vmv1r.v v11, v8 -; V-NEXT: vwaddu.vv v8, v11, v10 -; V-NEXT: li a0, -1 -; V-NEXT: vwmaccu.vx v8, a0, v10 -; V-NEXT: ret +define @vector_interleave_nxv8i32_nxv4i32_poison2( %a) { +; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vzext.vf2 v12, v8 +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsll.vx v8, v12, a0 +; CHECK-NEXT: ret ; -; ZVBB-LABEL: vector_interleave_nxv8bf16_nxv4bf16: +; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2: ; ZVBB: # %bb.0: -; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; ZVBB-NEXT: vmv1r.v v10, v9 -; ZVBB-NEXT: vmv1r.v v11, v8 -; ZVBB-NEXT: vwsll.vi v8, v10, 16 -; ZVBB-NEXT: vwaddu.wv v8, v8, v11 +; ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; ZVBB-NEXT: vmv2r.v v12, v8 +; ZVBB-NEXT: li a0, 32 +; ZVBB-NEXT: vwsll.vx v8, v12, a0 ; ZVBB-NEXT: ret -; -; ZIP-LABEL: vector_interleave_nxv8bf16_nxv4bf16: -; ZIP: # %bb.0: -; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; ZIP-NEXT: vmv1r.v v10, v9 -; ZIP-NEXT: vmv1r.v v11, v8 -; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10 -; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10 -; ZIP-NEXT: ret - %res = call @llvm.vector.interleave2.nxv8bf16( %a, %b) - ret %res + %res = call @llvm.vector.interleave2.nxv8i32( poison, %a) + ret %res } -define @vector_interleave_nxv4f16_nxv2f16( %a, %b) { -; V-LABEL: vector_interleave_nxv4f16_nxv2f16: -; V: # %bb.0: -; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; V-NEXT: vwaddu.vv v10, v8, v9 -; V-NEXT: li a0, -1 -; V-NEXT: csrr a1, vlenb -; V-NEXT: vwmaccu.vx v10, a0, v9 -; V-NEXT: srli a1, a1, 2 -; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; V-NEXT: vslidedown.vx v8, v10, a1 -; V-NEXT: add a0, a1, a1 -; V-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; V-NEXT: vslideup.vx v10, v8, a1 -; V-NEXT: vmv.v.v v8, v10 -; V-NEXT: ret +define @vector_interleave_nxv48i1_nxv16i1( %a, %b, %c) nounwind { +; CHECK-LABEL: vector_interleave_nxv48i1_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 +; CHECK-NEXT: slli a2, a1, 1 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmerge.vim v14, v12, 1, v0 +; CHECK-NEXT: add a3, a0, a2 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmerge.vim v18, v12, 1, v0 +; CHECK-NEXT: add a2, a3, a2 +; CHECK-NEXT: vsseg3e8.v v14, (a0) +; CHECK-NEXT: vl2r.v v8, (a2) +; CHECK-NEXT: srli a2, a1, 2 +; CHECK-NEXT: srli a1, a1, 1 +; CHECK-NEXT: vl2r.v v10, (a3) +; CHECK-NEXT: vl2r.v v12, (a0) +; CHECK-NEXT: add a0, a2, a2 +; CHECK-NEXT: vmsne.vi v14, v8, 0 +; CHECK-NEXT: vmsne.vi v8, v10, 0 +; CHECK-NEXT: vmsne.vi v0, v12, 0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vslideup.vx v0, v8, a2 +; CHECK-NEXT: add a0, a1, a1 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vslideup.vx v0, v14, a1 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret ; -; ZVBB-LABEL: vector_interleave_nxv4f16_nxv2f16: +; ZVBB-LABEL: vector_interleave_nxv48i1_nxv16i1: ; ZVBB: # %bb.0: -; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; ZVBB-NEXT: vwsll.vi v10, v9, 16 +; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb -; ZVBB-NEXT: vwaddu.wv v10, v10, v8 -; ZVBB-NEXT: srli a0, a0, 2 -; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma -; ZVBB-NEXT: vslidedown.vx v8, v10, a0 -; ZVBB-NEXT: add a1, a0, a0 -; ZVBB-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; ZVBB-NEXT: vslideup.vx v10, v8, a0 -; ZVBB-NEXT: vmv.v.v v8, v10 +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; ZVBB-NEXT: vmv1r.v v10, v0 +; ZVBB-NEXT: vmv1r.v v0, v8 +; ZVBB-NEXT: vmv.v.i v12, 0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: vmerge.vim v16, v12, 1, v0 +; ZVBB-NEXT: slli a2, a1, 1 +; ZVBB-NEXT: vmv1r.v v0, v10 +; ZVBB-NEXT: vmerge.vim v14, v12, 1, v0 +; ZVBB-NEXT: add a3, a0, a2 +; ZVBB-NEXT: vmv1r.v v0, v9 +; ZVBB-NEXT: vmerge.vim v18, v12, 1, v0 +; ZVBB-NEXT: add a2, a3, a2 +; ZVBB-NEXT: vsseg3e8.v v14, (a0) +; ZVBB-NEXT: vl2r.v v8, (a2) +; ZVBB-NEXT: srli a2, a1, 2 +; ZVBB-NEXT: srli a1, a1, 1 +; ZVBB-NEXT: vl2r.v v10, (a3) +; ZVBB-NEXT: vl2r.v v12, (a0) +; ZVBB-NEXT: add a0, a2, a2 +; ZVBB-NEXT: vmsne.vi v14, v8, 0 +; ZVBB-NEXT: vmsne.vi v8, v10, 0 +; ZVBB-NEXT: vmsne.vi v0, v12, 0 +; ZVBB-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; ZVBB-NEXT: vslideup.vx v0, v8, a2 +; ZVBB-NEXT: add a0, a1, a1 +; ZVBB-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v0, v14, a1 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret -; -; ZIP-LABEL: vector_interleave_nxv4f16_nxv2f16: -; ZIP: # %bb.0: -; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; ZIP-NEXT: ri.vzip2b.vv v11, v8, v9 -; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9 -; ZIP-NEXT: csrr a0, vlenb -; ZIP-NEXT: srli a0, a0, 2 -; ZIP-NEXT: add a1, a0, a0 -; ZIP-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; ZIP-NEXT: vslideup.vx v10, v11, a0 -; ZIP-NEXT: vmv.v.v v8, v10 -; ZIP-NEXT: ret - %res = call @llvm.vector.interleave2.nxv4f16( %a, %b) - ret %res + %res = call @llvm.vector.interleave3.nxv48i1( %a, %b, %c) + ret %res } -define @vector_interleave_nxv8f16_nxv4f16( %a, %b) { -; V-LABEL: vector_interleave_nxv8f16_nxv4f16: -; V: # %bb.0: -; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; V-NEXT: vmv1r.v v10, v9 -; V-NEXT: vmv1r.v v11, v8 -; V-NEXT: vwaddu.vv v8, v11, v10 -; V-NEXT: li a0, -1 -; V-NEXT: vwmaccu.vx v8, a0, v10 -; V-NEXT: ret -; -; ZVBB-LABEL: vector_interleave_nxv8f16_nxv4f16: -; ZVBB: # %bb.0: -; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; ZVBB-NEXT: vmv1r.v v10, v9 -; ZVBB-NEXT: vmv1r.v v11, v8 -; ZVBB-NEXT: vwsll.vi v8, v10, 16 -; ZVBB-NEXT: vwaddu.wv v8, v8, v11 -; ZVBB-NEXT: ret -; -; ZIP-LABEL: vector_interleave_nxv8f16_nxv4f16: -; ZIP: # %bb.0: -; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; ZIP-NEXT: vmv1r.v v10, v9 -; ZIP-NEXT: vmv1r.v v11, v8 -; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10 -; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10 -; ZIP-NEXT: ret - %res = call @llvm.vector.interleave2.nxv8f16( %a, %b) - ret %res -} -define @vector_interleave_nxv4f32_nxv2f32( %a, %b) { -; V-LABEL: vector_interleave_nxv4f32_nxv2f32: -; V: # %bb.0: -; V-NEXT: vsetvli a0, zero, e32, m1, ta, ma -; V-NEXT: vmv1r.v v10, v9 -; V-NEXT: vmv1r.v v11, v8 -; V-NEXT: vwaddu.vv v8, v11, v10 -; V-NEXT: li a0, -1 -; V-NEXT: vwmaccu.vx v8, a0, v10 -; V-NEXT: ret +define @vector_interleave_nxv48i8_nxv16i8( %a, %b, %c) nounwind { +; CHECK-LABEL: vector_interleave_nxv48i8_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: vl2r.v v8, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2r.v v10, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2r.v v12, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret ; -; ZVBB-LABEL: vector_interleave_nxv4f32_nxv2f32: +; ZVBB-LABEL: vector_interleave_nxv48i8_nxv16i8: ; ZVBB: # %bb.0: -; ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma -; ZVBB-NEXT: vmv1r.v v10, v9 -; ZVBB-NEXT: vmv1r.v v11, v8 -; ZVBB-NEXT: li a0, 32 -; ZVBB-NEXT: vwsll.vx v8, v10, a0 -; ZVBB-NEXT: vwaddu.wv v8, v8, v11 +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: slli a1, a1, 1 +; ZVBB-NEXT: vsetvli a2, zero, e8, m2, ta, ma +; ZVBB-NEXT: vsseg3e8.v v8, (a0) +; ZVBB-NEXT: vl2r.v v8, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2r.v v10, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2r.v v12, (a0) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret -; -; ZIP-LABEL: vector_interleave_nxv4f32_nxv2f32: -; ZIP: # %bb.0: -; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma -; ZIP-NEXT: vmv1r.v v10, v9 -; ZIP-NEXT: vmv1r.v v11, v8 -; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10 -; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10 -; ZIP-NEXT: ret - %res = call @llvm.vector.interleave2.nxv4f32( %a, %b) - ret %res + %res = call @llvm.vector.interleave3.nxv48i8( %a, %b, %c) + ret %res } -define @vector_interleave_nxv16bf16_nxv8bf16( %a, %b) { -; V-LABEL: vector_interleave_nxv16bf16_nxv8bf16: -; V: # %bb.0: -; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; V-NEXT: vmv2r.v v12, v10 -; V-NEXT: vmv2r.v v14, v8 -; V-NEXT: vwaddu.vv v8, v14, v12 -; V-NEXT: li a0, -1 -; V-NEXT: vwmaccu.vx v8, a0, v12 -; V-NEXT: ret -; -; ZVBB-LABEL: vector_interleave_nxv16bf16_nxv8bf16: -; ZVBB: # %bb.0: -; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; ZVBB-NEXT: vmv2r.v v12, v10 -; ZVBB-NEXT: vmv2r.v v14, v8 -; ZVBB-NEXT: vwsll.vi v8, v12, 16 -; ZVBB-NEXT: vwaddu.wv v8, v8, v14 -; ZVBB-NEXT: ret -; -; ZIP-LABEL: vector_interleave_nxv16bf16_nxv8bf16: -; ZIP: # %bb.0: -; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; ZIP-NEXT: vmv2r.v v12, v10 -; ZIP-NEXT: vmv2r.v v14, v8 -; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 -; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 -; ZIP-NEXT: ret - %res = call @llvm.vector.interleave2.nxv16bf16( %a, %b) - ret %res -} -define @vector_interleave_nxv16f16_nxv8f16( %a, %b) { -; V-LABEL: vector_interleave_nxv16f16_nxv8f16: -; V: # %bb.0: -; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; V-NEXT: vmv2r.v v12, v10 -; V-NEXT: vmv2r.v v14, v8 -; V-NEXT: vwaddu.vv v8, v14, v12 -; V-NEXT: li a0, -1 -; V-NEXT: vwmaccu.vx v8, a0, v12 -; V-NEXT: ret +define @vector_interleave_nxv24i16_nxv8i16( %a, %b, %c) nounwind { +; CHECK-LABEL: vector_interleave_nxv24i16_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vl2re16.v v8, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2re16.v v10, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2re16.v v12, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret ; -; ZVBB-LABEL: vector_interleave_nxv16f16_nxv8f16: +; ZVBB-LABEL: vector_interleave_nxv24i16_nxv8i16: ; ZVBB: # %bb.0: -; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; ZVBB-NEXT: vmv2r.v v12, v10 -; ZVBB-NEXT: vmv2r.v v14, v8 -; ZVBB-NEXT: vwsll.vi v8, v12, 16 -; ZVBB-NEXT: vwaddu.wv v8, v8, v14 +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: slli a1, a1, 1 +; ZVBB-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVBB-NEXT: vsseg3e16.v v8, (a0) +; ZVBB-NEXT: vl2re16.v v8, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2re16.v v10, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2re16.v v12, (a0) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret -; -; ZIP-LABEL: vector_interleave_nxv16f16_nxv8f16: -; ZIP: # %bb.0: -; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; ZIP-NEXT: vmv2r.v v12, v10 -; ZIP-NEXT: vmv2r.v v14, v8 -; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 -; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 -; ZIP-NEXT: ret - %res = call @llvm.vector.interleave2.nxv16f16( %a, %b) - ret %res + %res = call @llvm.vector.interleave3.nxv24i16( %a, %b, %c) + ret %res } -define @vector_interleave_nxv8f32_nxv4f32( %a, %b) { -; V-LABEL: vector_interleave_nxv8f32_nxv4f32: -; V: # %bb.0: -; V-NEXT: vsetvli a0, zero, e32, m2, ta, ma -; V-NEXT: vmv2r.v v12, v10 -; V-NEXT: vmv2r.v v14, v8 -; V-NEXT: vwaddu.vv v8, v14, v12 -; V-NEXT: li a0, -1 -; V-NEXT: vwmaccu.vx v8, a0, v12 -; V-NEXT: ret -; -; ZVBB-LABEL: vector_interleave_nxv8f32_nxv4f32: -; ZVBB: # %bb.0: -; ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma -; ZVBB-NEXT: vmv2r.v v12, v10 -; ZVBB-NEXT: vmv2r.v v14, v8 -; ZVBB-NEXT: li a0, 32 -; ZVBB-NEXT: vwsll.vx v8, v12, a0 -; ZVBB-NEXT: vwaddu.wv v8, v8, v14 -; ZVBB-NEXT: ret -; -; ZIP-LABEL: vector_interleave_nxv8f32_nxv4f32: -; ZIP: # %bb.0: -; ZIP-NEXT: vsetvli a0, zero, e32, m2, ta, ma -; ZIP-NEXT: vmv2r.v v12, v10 -; ZIP-NEXT: vmv2r.v v14, v8 -; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 -; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 -; ZIP-NEXT: ret - %res = call @llvm.vector.interleave2.nxv8f32( %a, %b) - ret %res -} -define @vector_interleave_nxv4f64_nxv2f64( %a, %b) { -; V-LABEL: vector_interleave_nxv4f64_nxv2f64: -; V: # %bb.0: -; V-NEXT: csrr a0, vlenb -; V-NEXT: vsetvli a1, zero, e16, m1, ta, mu -; V-NEXT: vid.v v12 -; V-NEXT: srli a0, a0, 2 -; V-NEXT: vand.vi v13, v12, 1 -; V-NEXT: vmsne.vi v0, v13, 0 -; V-NEXT: vsrl.vi v16, v12, 1 -; V-NEXT: vadd.vx v16, v16, a0, v0.t -; V-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; V-NEXT: vrgatherei16.vv v12, v8, v16 -; V-NEXT: vmv.v.v v8, v12 -; V-NEXT: ret +define @vector_interleave_nxv12i32_nxv4i32( %a, %b, %c) nounwind { +; CHECK-LABEL: vector_interleave_nxv12i32_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: vl2re32.v v8, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2re32.v v10, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2re32.v v12, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret ; -; ZVBB-LABEL: vector_interleave_nxv4f64_nxv2f64: +; ZVBB-LABEL: vector_interleave_nxv12i32_nxv4i32: ; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb -; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu -; ZVBB-NEXT: vid.v v12 -; ZVBB-NEXT: srli a0, a0, 2 -; ZVBB-NEXT: vand.vi v13, v12, 1 -; ZVBB-NEXT: vmsne.vi v0, v13, 0 -; ZVBB-NEXT: vsrl.vi v16, v12, 1 -; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t -; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16 -; ZVBB-NEXT: vmv.v.v v8, v12 -; ZVBB-NEXT: ret -; -; ZIP-LABEL: vector_interleave_nxv4f64_nxv2f64: -; ZIP: # %bb.0: -; ZIP-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; ZIP-NEXT: vmv2r.v v12, v10 -; ZIP-NEXT: vmv2r.v v14, v8 -; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 -; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 -; ZIP-NEXT: ret - %res = call @llvm.vector.interleave2.nxv4f64( %a, %b) - ret %res -} - - - -define @vector_interleave_nxv64bf16_nxv32bf16( %a, %b) { -; V-LABEL: vector_interleave_nxv64bf16_nxv32bf16: -; V: # %bb.0: -; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; V-NEXT: vmv8r.v v24, v8 -; V-NEXT: vwaddu.vv v8, v24, v16 -; V-NEXT: li a0, -1 -; V-NEXT: vwaddu.vv v0, v28, v20 -; V-NEXT: vwmaccu.vx v8, a0, v16 -; V-NEXT: vwmaccu.vx v0, a0, v20 -; V-NEXT: vmv8r.v v16, v0 -; V-NEXT: ret -; -; ZVBB-LABEL: vector_interleave_nxv64bf16_nxv32bf16: -; ZVBB: # %bb.0: -; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; ZVBB-NEXT: vwsll.vi v24, v16, 16 -; ZVBB-NEXT: vwsll.vi v0, v20, 16 -; ZVBB-NEXT: vwaddu.wv v24, v24, v8 -; ZVBB-NEXT: vwaddu.wv v0, v0, v12 -; ZVBB-NEXT: vmv8r.v v8, v24 -; ZVBB-NEXT: vmv8r.v v16, v0 -; ZVBB-NEXT: ret -; -; ZIP-LABEL: vector_interleave_nxv64bf16_nxv32bf16: -; ZIP: # %bb.0: -; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 -; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 -; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 -; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 -; ZIP-NEXT: vmv8r.v v8, v24 -; ZIP-NEXT: vmv8r.v v16, v0 -; ZIP-NEXT: ret - %res = call @llvm.vector.interleave2.nxv64bf16( %a, %b) - ret %res -} - -define @vector_interleave_nxv64f16_nxv32f16( %a, %b) { -; V-LABEL: vector_interleave_nxv64f16_nxv32f16: -; V: # %bb.0: -; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; V-NEXT: vmv8r.v v24, v8 -; V-NEXT: vwaddu.vv v8, v24, v16 -; V-NEXT: li a0, -1 -; V-NEXT: vwaddu.vv v0, v28, v20 -; V-NEXT: vwmaccu.vx v8, a0, v16 -; V-NEXT: vwmaccu.vx v0, a0, v20 -; V-NEXT: vmv8r.v v16, v0 -; V-NEXT: ret -; -; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16: -; ZVBB: # %bb.0: -; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; ZVBB-NEXT: vwsll.vi v24, v16, 16 -; ZVBB-NEXT: vwsll.vi v0, v20, 16 -; ZVBB-NEXT: vwaddu.wv v24, v24, v8 -; ZVBB-NEXT: vwaddu.wv v0, v0, v12 -; ZVBB-NEXT: vmv8r.v v8, v24 -; ZVBB-NEXT: vmv8r.v v16, v0 -; ZVBB-NEXT: ret -; -; ZIP-LABEL: vector_interleave_nxv64f16_nxv32f16: -; ZIP: # %bb.0: -; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 -; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 -; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 -; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 -; ZIP-NEXT: vmv8r.v v8, v24 -; ZIP-NEXT: vmv8r.v v16, v0 -; ZIP-NEXT: ret - %res = call @llvm.vector.interleave2.nxv64f16( %a, %b) - ret %res -} - -define @vector_interleave_nxv32f32_nxv16f32( %a, %b) { -; V-LABEL: vector_interleave_nxv32f32_nxv16f32: -; V: # %bb.0: -; V-NEXT: vsetvli a0, zero, e32, m4, ta, ma -; V-NEXT: vmv8r.v v24, v8 -; V-NEXT: vwaddu.vv v8, v24, v16 -; V-NEXT: li a0, -1 -; V-NEXT: vwaddu.vv v0, v28, v20 -; V-NEXT: vwmaccu.vx v8, a0, v16 -; V-NEXT: vwmaccu.vx v0, a0, v20 -; V-NEXT: vmv8r.v v16, v0 -; V-NEXT: ret -; -; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32: -; ZVBB: # %bb.0: -; ZVBB-NEXT: li a0, 32 -; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma -; ZVBB-NEXT: vwsll.vx v24, v16, a0 -; ZVBB-NEXT: vwsll.vx v0, v20, a0 -; ZVBB-NEXT: vwaddu.wv v24, v24, v8 -; ZVBB-NEXT: vwaddu.wv v0, v0, v12 -; ZVBB-NEXT: vmv8r.v v8, v24 -; ZVBB-NEXT: vmv8r.v v16, v0 -; ZVBB-NEXT: ret -; -; ZIP-LABEL: vector_interleave_nxv32f32_nxv16f32: -; ZIP: # %bb.0: -; ZIP-NEXT: vsetvli a0, zero, e32, m4, ta, ma -; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 -; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 -; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 -; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 -; ZIP-NEXT: vmv8r.v v8, v24 -; ZIP-NEXT: vmv8r.v v16, v0 -; ZIP-NEXT: ret - %res = call @llvm.vector.interleave2.nxv32f32( %a, %b) - ret %res -} - -define @vector_interleave_nxv16f64_nxv8f64( %a, %b) { -; V-LABEL: vector_interleave_nxv16f64_nxv8f64: -; V: # %bb.0: -; V-NEXT: csrr a0, vlenb -; V-NEXT: vsetvli a1, zero, e16, m2, ta, mu -; V-NEXT: vid.v v6 -; V-NEXT: vmv8r.v v24, v8 -; V-NEXT: srli a0, a0, 1 -; V-NEXT: vmv4r.v v28, v16 -; V-NEXT: vmv4r.v v16, v12 -; V-NEXT: vand.vi v8, v6, 1 -; V-NEXT: vmsne.vi v0, v8, 0 -; V-NEXT: vsrl.vi v6, v6, 1 -; V-NEXT: vadd.vx v6, v6, a0, v0.t -; V-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; V-NEXT: vrgatherei16.vv v8, v24, v6 -; V-NEXT: vrgatherei16.vv v24, v16, v6 -; V-NEXT: vmv.v.v v16, v24 -; V-NEXT: ret -; -; ZVBB-LABEL: vector_interleave_nxv16f64_nxv8f64: -; ZVBB: # %bb.0: +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: slli a1, a1, 1 +; ZVBB-NEXT: vsetvli a2, zero, e32, m2, ta, ma +; ZVBB-NEXT: vsseg3e32.v v8, (a0) +; ZVBB-NEXT: vl2re32.v v8, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2re32.v v10, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2re32.v v12, (a0) ; ZVBB-NEXT: csrr a0, vlenb -; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu -; ZVBB-NEXT: vid.v v6 -; ZVBB-NEXT: vmv8r.v v24, v8 -; ZVBB-NEXT: srli a0, a0, 1 -; ZVBB-NEXT: vmv4r.v v28, v16 -; ZVBB-NEXT: vmv4r.v v16, v12 -; ZVBB-NEXT: vand.vi v8, v6, 1 -; ZVBB-NEXT: vmsne.vi v0, v8, 0 -; ZVBB-NEXT: vsrl.vi v6, v6, 1 -; ZVBB-NEXT: vadd.vx v6, v6, a0, v0.t -; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; ZVBB-NEXT: vrgatherei16.vv v8, v24, v6 -; ZVBB-NEXT: vrgatherei16.vv v24, v16, v6 -; ZVBB-NEXT: vmv.v.v v16, v24 -; ZVBB-NEXT: ret -; -; ZIP-LABEL: vector_interleave_nxv16f64_nxv8f64: -; ZIP: # %bb.0: -; ZIP-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 -; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 -; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 -; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 -; ZIP-NEXT: vmv8r.v v8, v24 -; ZIP-NEXT: vmv8r.v v16, v0 -; ZIP-NEXT: ret - %res = call @llvm.vector.interleave2.nxv16f64( %a, %b) - ret %res -} - -define @vector_interleave_nxv8i32_nxv4i32_poison( %a) { -; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vzext.vf2 v8, v12 -; CHECK-NEXT: ret -; -; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison: -; ZVBB: # %bb.0: -; ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; ZVBB-NEXT: vmv2r.v v12, v8 -; ZVBB-NEXT: vzext.vf2 v8, v12 +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret - %res = call @llvm.vector.interleave2.nxv8i32( %a, poison) - ret %res + %res = call @llvm.vector.interleave3.nxv12i32( %a, %b, %c) + ret %res } -define @vector_interleave_nxv8i32_nxv4i32_poison2( %a) { -; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; CHECK-NEXT: vzext.vf2 v12, v8 -; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsll.vx v8, v12, a0 -; CHECK-NEXT: ret -; -; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2: -; ZVBB: # %bb.0: -; ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma -; ZVBB-NEXT: vmv2r.v v12, v8 -; ZVBB-NEXT: li a0, 32 -; ZVBB-NEXT: vwsll.vx v8, v12, a0 -; ZVBB-NEXT: ret - %res = call @llvm.vector.interleave2.nxv8i32( poison, %a) - ret %res -} -define @vector_interleave_nxv48i1_nxv16i1( %a, %b, %c) nounwind { -; CHECK-LABEL: vector_interleave_nxv48i1_nxv16i1: +define @vector_interleave_nxv6i64_nxv2i64( %a, %b, %c) nounwind { +; CHECK-LABEL: vector_interleave_nxv6i64_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 -; CHECK-NEXT: slli a2, a1, 1 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmerge.vim v14, v12, 1, v0 -; CHECK-NEXT: add a3, a0, a2 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmerge.vim v18, v12, 1, v0 -; CHECK-NEXT: add a2, a3, a2 -; CHECK-NEXT: vsseg3e8.v v14, (a0) -; CHECK-NEXT: vl2r.v v8, (a2) -; CHECK-NEXT: srli a2, a1, 2 -; CHECK-NEXT: srli a1, a1, 1 -; CHECK-NEXT: vl2r.v v10, (a3) -; CHECK-NEXT: vl2r.v v12, (a0) -; CHECK-NEXT: add a0, a2, a2 -; CHECK-NEXT: vmsne.vi v14, v8, 0 -; CHECK-NEXT: vmsne.vi v8, v10, 0 -; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vslideup.vx v0, v8, a2 -; CHECK-NEXT: add a0, a1, a1 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vslideup.vx v0, v14, a1 +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: vl2re64.v v8, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2re64.v v10, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2re64.v v12, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 @@ -1034,133 +759,4071 @@ define @vector_interleave_nxv48i1_nxv16i1( ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; -; ZVBB-LABEL: vector_interleave_nxv48i1_nxv16i1: +; ZVBB-LABEL: vector_interleave_nxv6i64_nxv2i64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 -; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; ZVBB-NEXT: vmv1r.v v10, v0 -; ZVBB-NEXT: vmv1r.v v0, v8 -; ZVBB-NEXT: vmv.v.i v12, 0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb -; ZVBB-NEXT: vmerge.vim v16, v12, 1, v0 -; ZVBB-NEXT: slli a2, a1, 1 -; ZVBB-NEXT: vmv1r.v v0, v10 -; ZVBB-NEXT: vmerge.vim v14, v12, 1, v0 -; ZVBB-NEXT: add a3, a0, a2 -; ZVBB-NEXT: vmv1r.v v0, v9 -; ZVBB-NEXT: vmerge.vim v18, v12, 1, v0 -; ZVBB-NEXT: add a2, a3, a2 -; ZVBB-NEXT: vsseg3e8.v v14, (a0) -; ZVBB-NEXT: vl2r.v v8, (a2) -; ZVBB-NEXT: srli a2, a1, 2 -; ZVBB-NEXT: srli a1, a1, 1 -; ZVBB-NEXT: vl2r.v v10, (a3) -; ZVBB-NEXT: vl2r.v v12, (a0) -; ZVBB-NEXT: add a0, a2, a2 -; ZVBB-NEXT: vmsne.vi v14, v8, 0 -; ZVBB-NEXT: vmsne.vi v8, v10, 0 -; ZVBB-NEXT: vmsne.vi v0, v12, 0 -; ZVBB-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; ZVBB-NEXT: vslideup.vx v0, v8, a2 -; ZVBB-NEXT: add a0, a1, a1 -; ZVBB-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; ZVBB-NEXT: vslideup.vx v0, v14, a1 +; ZVBB-NEXT: slli a1, a1, 1 +; ZVBB-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; ZVBB-NEXT: vsseg3e64.v v8, (a0) +; ZVBB-NEXT: vl2re64.v v8, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2re64.v v10, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2re64.v v12, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret - %res = call @llvm.vector.interleave3.nxv48i1( %a, %b, %c) - ret %res + %res = call @llvm.vector.interleave3.nxv6i64( %a, %b, %c) + ret %res } - -define @vector_interleave_nxv48i8_nxv16i8( %a, %b, %c) nounwind { -; CHECK-LABEL: vector_interleave_nxv48i8_nxv16i8: +define @vector_interleave_nxv80i1_nxv16i1( %a, %b, %c, %d, %e) nounwind { +; CHECK-LABEL: vector_interleave_nxv80i1_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: li a1, 10 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: addi a4, sp, 16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 2 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0) -; CHECK-NEXT: vl2r.v v8, (a0) -; CHECK-NEXT: add a0, a0, a1 -; CHECK-NEXT: vl2r.v v10, (a0) -; CHECK-NEXT: add a0, a0, a1 -; CHECK-NEXT: vl2r.v v12, (a0) +; CHECK-NEXT: vmerge.vim v14, v12, 1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v18, v12, 1, v0 +; CHECK-NEXT: add a2, a4, a1 +; CHECK-NEXT: srli a3, a1, 2 +; CHECK-NEXT: vmv2r.v v20, v14 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 +; CHECK-NEXT: vmv1r.v v21, v18 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmerge.vim v8, v12, 1, v0 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v16, v19 +; CHECK-NEXT: add a5, a2, a1 +; CHECK-NEXT: vmv1r.v v23, v8 +; CHECK-NEXT: vmv1r.v v18, v9 +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmerge.vim v24, v12, 1, v0 +; CHECK-NEXT: vsetvli a6, zero, e8, m1, ta, ma +; CHECK-NEXT: vsseg5e8.v v20, (a4) +; CHECK-NEXT: vmv1r.v v19, v25 +; CHECK-NEXT: vsseg5e8.v v15, (a0) +; CHECK-NEXT: vl1r.v v8, (a5) +; CHECK-NEXT: add a5, a5, a1 +; CHECK-NEXT: vl1r.v v10, (a4) +; CHECK-NEXT: add a4, a5, a1 +; CHECK-NEXT: vl1r.v v12, (a4) +; CHECK-NEXT: add a4, a0, a1 +; CHECK-NEXT: vl1r.v v14, (a4) +; CHECK-NEXT: add a4, a4, a1 +; CHECK-NEXT: vl1r.v v9, (a5) +; CHECK-NEXT: add a5, a4, a1 +; CHECK-NEXT: vl1r.v v16, (a5) +; CHECK-NEXT: add a5, a5, a1 +; CHECK-NEXT: srli a1, a1, 1 +; CHECK-NEXT: vl1r.v v11, (a2) +; CHECK-NEXT: add a2, a3, a3 +; CHECK-NEXT: vl1r.v v15, (a4) +; CHECK-NEXT: add a4, a1, a1 +; CHECK-NEXT: vl1r.v v13, (a0) +; CHECK-NEXT: vl1r.v v17, (a5) +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmsne.vi v18, v8, 0 +; CHECK-NEXT: vmsne.vi v0, v10, 0 +; CHECK-NEXT: vmsne.vi v8, v14, 0 +; CHECK-NEXT: vmsne.vi v9, v12, 0 +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vslideup.vx v0, v18, a3 +; CHECK-NEXT: vslideup.vx v9, v8, a3 +; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, ma +; CHECK-NEXT: vslideup.vx v0, v9, a1 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmsne.vi v8, v16, 0 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: li a1, 10 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; -; ZVBB-LABEL: vector_interleave_nxv48i8_nxv16i8: +; ZVBB-LABEL: vector_interleave_nxv80i1_nxv16i1: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb -; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: li a1, 10 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 -; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; ZVBB-NEXT: vmv.v.i v12, 0 +; ZVBB-NEXT: addi a4, sp, 16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 2 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: add a0, sp, a0 +; ZVBB-NEXT: addi a0, a0, 16 ; ZVBB-NEXT: csrr a1, vlenb -; ZVBB-NEXT: slli a1, a1, 1 -; ZVBB-NEXT: vsetvli a2, zero, e8, m2, ta, ma -; ZVBB-NEXT: vsseg3e8.v v8, (a0) -; ZVBB-NEXT: vl2r.v v8, (a0) -; ZVBB-NEXT: add a0, a0, a1 -; ZVBB-NEXT: vl2r.v v10, (a0) -; ZVBB-NEXT: add a0, a0, a1 -; ZVBB-NEXT: vl2r.v v12, (a0) +; ZVBB-NEXT: vmerge.vim v14, v12, 1, v0 +; ZVBB-NEXT: vmv1r.v v0, v8 +; ZVBB-NEXT: vmerge.vim v18, v12, 1, v0 +; ZVBB-NEXT: add a2, a4, a1 +; ZVBB-NEXT: srli a3, a1, 2 +; ZVBB-NEXT: vmv2r.v v20, v14 +; ZVBB-NEXT: vmv1r.v v0, v9 +; ZVBB-NEXT: vmerge.vim v16, v12, 1, v0 +; ZVBB-NEXT: vmv1r.v v21, v18 +; ZVBB-NEXT: vmv1r.v v0, v10 +; ZVBB-NEXT: vmerge.vim v8, v12, 1, v0 +; ZVBB-NEXT: vmv1r.v v22, v16 +; ZVBB-NEXT: vmv1r.v v16, v19 +; ZVBB-NEXT: add a5, a2, a1 +; ZVBB-NEXT: vmv1r.v v23, v8 +; ZVBB-NEXT: vmv1r.v v18, v9 +; ZVBB-NEXT: vmv1r.v v0, v11 +; ZVBB-NEXT: vmerge.vim v24, v12, 1, v0 +; ZVBB-NEXT: vsetvli a6, zero, e8, m1, ta, ma +; ZVBB-NEXT: vsseg5e8.v v20, (a4) +; ZVBB-NEXT: vmv1r.v v19, v25 +; ZVBB-NEXT: vsseg5e8.v v15, (a0) +; ZVBB-NEXT: vl1r.v v8, (a5) +; ZVBB-NEXT: add a5, a5, a1 +; ZVBB-NEXT: vl1r.v v10, (a4) +; ZVBB-NEXT: add a4, a5, a1 +; ZVBB-NEXT: vl1r.v v12, (a4) +; ZVBB-NEXT: add a4, a0, a1 +; ZVBB-NEXT: vl1r.v v14, (a4) +; ZVBB-NEXT: add a4, a4, a1 +; ZVBB-NEXT: vl1r.v v9, (a5) +; ZVBB-NEXT: add a5, a4, a1 +; ZVBB-NEXT: vl1r.v v16, (a5) +; ZVBB-NEXT: add a5, a5, a1 +; ZVBB-NEXT: srli a1, a1, 1 +; ZVBB-NEXT: vl1r.v v11, (a2) +; ZVBB-NEXT: add a2, a3, a3 +; ZVBB-NEXT: vl1r.v v15, (a4) +; ZVBB-NEXT: add a4, a1, a1 +; ZVBB-NEXT: vl1r.v v13, (a0) +; ZVBB-NEXT: vl1r.v v17, (a5) +; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; ZVBB-NEXT: vmsne.vi v18, v8, 0 +; ZVBB-NEXT: vmsne.vi v0, v10, 0 +; ZVBB-NEXT: vmsne.vi v8, v14, 0 +; ZVBB-NEXT: vmsne.vi v9, v12, 0 +; ZVBB-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; ZVBB-NEXT: vslideup.vx v0, v18, a3 +; ZVBB-NEXT: vslideup.vx v9, v8, a3 +; ZVBB-NEXT: vsetvli zero, a4, e8, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v0, v9, a1 +; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; ZVBB-NEXT: vmsne.vi v8, v16, 0 ; ZVBB-NEXT: csrr a0, vlenb -; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: li a1, 10 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret - %res = call @llvm.vector.interleave3.nxv48i8( %a, %b, %c) - ret %res + %res = call @llvm.vector.interleave5.nxv80i1( %a, %b, %c, %d, %e) + ret %res } -define @vector_interleave_nxv24i16_nxv8i16( %a, %b, %c) nounwind { -; CHECK-LABEL: vector_interleave_nxv24i16_nxv8i16: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 6 -; CHECK-NEXT: mul a0, a0, a1 -; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma -; CHECK-NEXT: vsseg3e16.v v8, (a0) -; CHECK-NEXT: vl2re16.v v8, (a0) -; CHECK-NEXT: add a0, a0, a1 -; CHECK-NEXT: vl2re16.v v10, (a0) -; CHECK-NEXT: add a0, a0, a1 -; CHECK-NEXT: vl2re16.v v12, (a0) -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 6 -; CHECK-NEXT: mul a0, a0, a1 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: ret +define @vector_interleave_nxv80i8_nxv16i8( %a, %b, %c, %d, %e) nounwind { ; -; ZVBB-LABEL: vector_interleave_nxv24i16_nxv8i16: -; ZVBB: # %bb.0: +; RV32-LABEL: vector_interleave_nxv80i8_nxv16i8: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -80 +; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: addi s0, sp, 80 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 28 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; RV32-NEXT: vmv2r.v v20, v16 +; RV32-NEXT: addi a0, sp, 64 +; RV32-NEXT: vmv2r.v v18, v12 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a2, a1, 2 +; RV32-NEXT: add a1, a2, a1 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 64 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: vmv2r.v v16, v8 +; RV32-NEXT: vmv2r.v v22, v16 +; RV32-NEXT: vmv2r.v v24, v18 +; RV32-NEXT: vmv1r.v v26, v20 +; RV32-NEXT: add a3, a0, a2 +; RV32-NEXT: vmv1r.v v23, v10 +; RV32-NEXT: add a4, a1, a2 +; RV32-NEXT: add a5, a4, a2 +; RV32-NEXT: vmv1r.v v25, v14 +; RV32-NEXT: add a6, a5, a2 +; RV32-NEXT: vmv1r.v v18, v11 +; RV32-NEXT: vsseg5e8.v v22, (a0) +; RV32-NEXT: vmv1r.v v20, v15 +; RV32-NEXT: vsseg5e8.v v17, (a1) +; RV32-NEXT: vl1r.v v16, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1r.v v17, (a6) +; RV32-NEXT: add a6, a3, a2 +; RV32-NEXT: vl1r.v v10, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1r.v v11, (a6) +; RV32-NEXT: vl1r.v v8, (a0) +; RV32-NEXT: vl1r.v v9, (a3) +; RV32-NEXT: vl1r.v v14, (a4) +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a3, 10 +; RV32-NEXT: mul a0, a0, a3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 64 +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1r.v v15, (a5) +; RV32-NEXT: vl1r.v v12, (a6) +; RV32-NEXT: vl1r.v v13, (a1) +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: add a2, a0, a2 +; RV32-NEXT: vs2r.v v16, (a2) +; RV32-NEXT: vs8r.v v8, (a0) +; RV32-NEXT: vl8r.v v16, (a2) +; RV32-NEXT: vl8r.v v8, (a0) +; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 80 +; RV32-NEXT: ret +; +; RV64-LABEL: vector_interleave_nxv80i8_nxv16i8: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-NEXT: addi s0, sp, 80 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: li a1, 28 +; RV64-NEXT: mul a0, a0, a1 +; RV64-NEXT: sub sp, sp, a0 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; RV64-NEXT: vmv2r.v v20, v16 +; RV64-NEXT: addi a0, sp, 64 +; RV64-NEXT: vmv2r.v v18, v12 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a2, a1, 2 +; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 64 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: vmv2r.v v16, v8 +; RV64-NEXT: vmv2r.v v22, v16 +; RV64-NEXT: vmv2r.v v24, v18 +; RV64-NEXT: vmv1r.v v26, v20 +; RV64-NEXT: add a3, a0, a2 +; RV64-NEXT: vmv1r.v v23, v10 +; RV64-NEXT: add a4, a1, a2 +; RV64-NEXT: add a5, a4, a2 +; RV64-NEXT: vmv1r.v v25, v14 +; RV64-NEXT: add a6, a5, a2 +; RV64-NEXT: vmv1r.v v18, v11 +; RV64-NEXT: vsseg5e8.v v22, (a0) +; RV64-NEXT: vmv1r.v v20, v15 +; RV64-NEXT: vsseg5e8.v v17, (a1) +; RV64-NEXT: vl1r.v v16, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1r.v v17, (a6) +; RV64-NEXT: add a6, a3, a2 +; RV64-NEXT: vl1r.v v10, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1r.v v11, (a6) +; RV64-NEXT: vl1r.v v8, (a0) +; RV64-NEXT: vl1r.v v9, (a3) +; RV64-NEXT: vl1r.v v14, (a4) +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: li a3, 10 +; RV64-NEXT: mul a0, a0, a3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 64 +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1r.v v15, (a5) +; RV64-NEXT: vl1r.v v12, (a6) +; RV64-NEXT: vl1r.v v13, (a1) +; RV64-NEXT: slli a2, a2, 3 +; RV64-NEXT: add a2, a0, a2 +; RV64-NEXT: vs2r.v v16, (a2) +; RV64-NEXT: vs8r.v v8, (a0) +; RV64-NEXT: vl8r.v v16, (a2) +; RV64-NEXT: vl8r.v v8, (a0) +; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret +; +; ZVBB-RV32-LABEL: vector_interleave_nxv80i8_nxv16i8: +; ZVBB-RV32: # %bb.0: +; ZVBB-RV32-NEXT: addi sp, sp, -80 +; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: addi s0, sp, 80 +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: li a1, 28 +; ZVBB-RV32-NEXT: mul a0, a0, a1 +; ZVBB-RV32-NEXT: sub sp, sp, a0 +; ZVBB-RV32-NEXT: andi sp, sp, -64 +; ZVBB-RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; ZVBB-RV32-NEXT: vmv2r.v v20, v16 +; ZVBB-RV32-NEXT: addi a0, sp, 64 +; ZVBB-RV32-NEXT: vmv2r.v v18, v12 +; ZVBB-RV32-NEXT: csrr a1, vlenb +; ZVBB-RV32-NEXT: slli a2, a1, 2 +; ZVBB-RV32-NEXT: add a1, a2, a1 +; ZVBB-RV32-NEXT: add a1, sp, a1 +; ZVBB-RV32-NEXT: addi a1, a1, 64 +; ZVBB-RV32-NEXT: csrr a2, vlenb +; ZVBB-RV32-NEXT: vmv2r.v v16, v8 +; ZVBB-RV32-NEXT: vmv2r.v v22, v16 +; ZVBB-RV32-NEXT: vmv2r.v v24, v18 +; ZVBB-RV32-NEXT: vmv1r.v v26, v20 +; ZVBB-RV32-NEXT: add a3, a0, a2 +; ZVBB-RV32-NEXT: vmv1r.v v23, v10 +; ZVBB-RV32-NEXT: add a4, a1, a2 +; ZVBB-RV32-NEXT: add a5, a4, a2 +; ZVBB-RV32-NEXT: vmv1r.v v25, v14 +; ZVBB-RV32-NEXT: add a6, a5, a2 +; ZVBB-RV32-NEXT: vmv1r.v v18, v11 +; ZVBB-RV32-NEXT: vsseg5e8.v v22, (a0) +; ZVBB-RV32-NEXT: vmv1r.v v20, v15 +; ZVBB-RV32-NEXT: vsseg5e8.v v17, (a1) +; ZVBB-RV32-NEXT: vl1r.v v16, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1r.v v17, (a6) +; ZVBB-RV32-NEXT: add a6, a3, a2 +; ZVBB-RV32-NEXT: vl1r.v v10, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1r.v v11, (a6) +; ZVBB-RV32-NEXT: vl1r.v v8, (a0) +; ZVBB-RV32-NEXT: vl1r.v v9, (a3) +; ZVBB-RV32-NEXT: vl1r.v v14, (a4) +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: li a3, 10 +; ZVBB-RV32-NEXT: mul a0, a0, a3 +; ZVBB-RV32-NEXT: add a0, sp, a0 +; ZVBB-RV32-NEXT: addi a0, a0, 64 +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1r.v v15, (a5) +; ZVBB-RV32-NEXT: vl1r.v v12, (a6) +; ZVBB-RV32-NEXT: vl1r.v v13, (a1) +; ZVBB-RV32-NEXT: slli a2, a2, 3 +; ZVBB-RV32-NEXT: add a2, a0, a2 +; ZVBB-RV32-NEXT: vs2r.v v16, (a2) +; ZVBB-RV32-NEXT: vs8r.v v8, (a0) +; ZVBB-RV32-NEXT: vl8r.v v16, (a2) +; ZVBB-RV32-NEXT: vl8r.v v8, (a0) +; ZVBB-RV32-NEXT: addi sp, s0, -80 +; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: addi sp, sp, 80 +; ZVBB-RV32-NEXT: ret +; +; ZVBB-RV64-LABEL: vector_interleave_nxv80i8_nxv16i8: +; ZVBB-RV64: # %bb.0: +; ZVBB-RV64-NEXT: addi sp, sp, -80 +; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: addi s0, sp, 80 +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: li a1, 28 +; ZVBB-RV64-NEXT: mul a0, a0, a1 +; ZVBB-RV64-NEXT: sub sp, sp, a0 +; ZVBB-RV64-NEXT: andi sp, sp, -64 +; ZVBB-RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; ZVBB-RV64-NEXT: vmv2r.v v20, v16 +; ZVBB-RV64-NEXT: addi a0, sp, 64 +; ZVBB-RV64-NEXT: vmv2r.v v18, v12 +; ZVBB-RV64-NEXT: csrr a1, vlenb +; ZVBB-RV64-NEXT: slli a2, a1, 2 +; ZVBB-RV64-NEXT: add a1, a2, a1 +; ZVBB-RV64-NEXT: add a1, sp, a1 +; ZVBB-RV64-NEXT: addi a1, a1, 64 +; ZVBB-RV64-NEXT: csrr a2, vlenb +; ZVBB-RV64-NEXT: vmv2r.v v16, v8 +; ZVBB-RV64-NEXT: vmv2r.v v22, v16 +; ZVBB-RV64-NEXT: vmv2r.v v24, v18 +; ZVBB-RV64-NEXT: vmv1r.v v26, v20 +; ZVBB-RV64-NEXT: add a3, a0, a2 +; ZVBB-RV64-NEXT: vmv1r.v v23, v10 +; ZVBB-RV64-NEXT: add a4, a1, a2 +; ZVBB-RV64-NEXT: add a5, a4, a2 +; ZVBB-RV64-NEXT: vmv1r.v v25, v14 +; ZVBB-RV64-NEXT: add a6, a5, a2 +; ZVBB-RV64-NEXT: vmv1r.v v18, v11 +; ZVBB-RV64-NEXT: vsseg5e8.v v22, (a0) +; ZVBB-RV64-NEXT: vmv1r.v v20, v15 +; ZVBB-RV64-NEXT: vsseg5e8.v v17, (a1) +; ZVBB-RV64-NEXT: vl1r.v v16, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1r.v v17, (a6) +; ZVBB-RV64-NEXT: add a6, a3, a2 +; ZVBB-RV64-NEXT: vl1r.v v10, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1r.v v11, (a6) +; ZVBB-RV64-NEXT: vl1r.v v8, (a0) +; ZVBB-RV64-NEXT: vl1r.v v9, (a3) +; ZVBB-RV64-NEXT: vl1r.v v14, (a4) +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: li a3, 10 +; ZVBB-RV64-NEXT: mul a0, a0, a3 +; ZVBB-RV64-NEXT: add a0, sp, a0 +; ZVBB-RV64-NEXT: addi a0, a0, 64 +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1r.v v15, (a5) +; ZVBB-RV64-NEXT: vl1r.v v12, (a6) +; ZVBB-RV64-NEXT: vl1r.v v13, (a1) +; ZVBB-RV64-NEXT: slli a2, a2, 3 +; ZVBB-RV64-NEXT: add a2, a0, a2 +; ZVBB-RV64-NEXT: vs2r.v v16, (a2) +; ZVBB-RV64-NEXT: vs8r.v v8, (a0) +; ZVBB-RV64-NEXT: vl8r.v v16, (a2) +; ZVBB-RV64-NEXT: vl8r.v v8, (a0) +; ZVBB-RV64-NEXT: addi sp, s0, -80 +; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: addi sp, sp, 80 +; ZVBB-RV64-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv80i8_nxv16i8: +; ZIP: # %bb.0: +; ZIP-NEXT: addi sp, sp, -80 +; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZIP-NEXT: addi s0, sp, 80 +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: li a1, 28 +; ZIP-NEXT: mul a0, a0, a1 +; ZIP-NEXT: sub sp, sp, a0 +; ZIP-NEXT: andi sp, sp, -64 +; ZIP-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; ZIP-NEXT: vmv2r.v v20, v16 +; ZIP-NEXT: addi a0, sp, 64 +; ZIP-NEXT: vmv2r.v v18, v12 +; ZIP-NEXT: csrr a1, vlenb +; ZIP-NEXT: slli a2, a1, 2 +; ZIP-NEXT: add a1, a2, a1 +; ZIP-NEXT: add a1, sp, a1 +; ZIP-NEXT: addi a1, a1, 64 +; ZIP-NEXT: csrr a2, vlenb +; ZIP-NEXT: vmv2r.v v16, v8 +; ZIP-NEXT: vmv2r.v v22, v16 +; ZIP-NEXT: vmv2r.v v24, v18 +; ZIP-NEXT: vmv1r.v v26, v20 +; ZIP-NEXT: add a3, a0, a2 +; ZIP-NEXT: vmv1r.v v23, v10 +; ZIP-NEXT: add a4, a1, a2 +; ZIP-NEXT: add a5, a4, a2 +; ZIP-NEXT: vmv1r.v v25, v14 +; ZIP-NEXT: add a6, a5, a2 +; ZIP-NEXT: vmv1r.v v18, v11 +; ZIP-NEXT: vsseg5e8.v v22, (a0) +; ZIP-NEXT: vmv1r.v v20, v15 +; ZIP-NEXT: vsseg5e8.v v17, (a1) +; ZIP-NEXT: vl1r.v v16, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1r.v v17, (a6) +; ZIP-NEXT: add a6, a3, a2 +; ZIP-NEXT: vl1r.v v10, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1r.v v11, (a6) +; ZIP-NEXT: vl1r.v v8, (a0) +; ZIP-NEXT: vl1r.v v9, (a3) +; ZIP-NEXT: vl1r.v v14, (a4) +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: li a3, 10 +; ZIP-NEXT: mul a0, a0, a3 +; ZIP-NEXT: add a0, sp, a0 +; ZIP-NEXT: addi a0, a0, 64 +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1r.v v15, (a5) +; ZIP-NEXT: vl1r.v v12, (a6) +; ZIP-NEXT: vl1r.v v13, (a1) +; ZIP-NEXT: slli a2, a2, 3 +; ZIP-NEXT: add a2, a0, a2 +; ZIP-NEXT: vs2r.v v16, (a2) +; ZIP-NEXT: vs8r.v v8, (a0) +; ZIP-NEXT: vl8r.v v16, (a2) +; ZIP-NEXT: vl8r.v v8, (a0) +; ZIP-NEXT: addi sp, s0, -80 +; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZIP-NEXT: addi sp, sp, 80 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave5.nxv80i8( %a, %b, %c, %d, %e) + ret %res +} + + +define @vector_interleave_nxv40i8_nxv8i8( %a, %b, %c, %d, %e) nounwind { +; CHECK-LABEL: vector_interleave_nxv40i8_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 2 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: add a2, a0, a1 +; CHECK-NEXT: add a3, a2, a1 +; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: vl1r.v v10, (a3) +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: vl1r.v v11, (a3) +; CHECK-NEXT: vl1r.v v8, (a0) +; CHECK-NEXT: vl1r.v v9, (a2) +; CHECK-NEXT: add a1, a3, a1 +; CHECK-NEXT: vl1r.v v12, (a1) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 2 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv40i8_nxv8i8: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 2 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: add a2, a0, a1 +; ZVBB-NEXT: add a3, a2, a1 +; ZVBB-NEXT: vsetvli a4, zero, e8, m1, ta, ma +; ZVBB-NEXT: vsseg5e8.v v8, (a0) +; ZVBB-NEXT: vl1r.v v10, (a3) +; ZVBB-NEXT: add a3, a3, a1 +; ZVBB-NEXT: vl1r.v v11, (a3) +; ZVBB-NEXT: vl1r.v v8, (a0) +; ZVBB-NEXT: vl1r.v v9, (a2) +; ZVBB-NEXT: add a1, a3, a1 +; ZVBB-NEXT: vl1r.v v12, (a1) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 2 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave5.nxv40i8( %a, %b, %c, %d, %e) + ret %res +} + + +define @vector_interleave_nxv20i32_nxv4i32( %a, %b, %c, %d, %e) nounwind { +; +; RV32-LABEL: vector_interleave_nxv20i32_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -80 +; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: addi s0, sp, 80 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 28 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; RV32-NEXT: vmv2r.v v20, v16 +; RV32-NEXT: addi a0, sp, 64 +; RV32-NEXT: vmv2r.v v18, v12 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a2, a1, 2 +; RV32-NEXT: add a1, a2, a1 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 64 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: vmv2r.v v16, v8 +; RV32-NEXT: vmv2r.v v22, v16 +; RV32-NEXT: vmv2r.v v24, v18 +; RV32-NEXT: vmv1r.v v26, v20 +; RV32-NEXT: add a3, a0, a2 +; RV32-NEXT: vmv1r.v v23, v10 +; RV32-NEXT: add a4, a1, a2 +; RV32-NEXT: add a5, a4, a2 +; RV32-NEXT: vmv1r.v v25, v14 +; RV32-NEXT: add a6, a5, a2 +; RV32-NEXT: vmv1r.v v18, v11 +; RV32-NEXT: vsseg5e32.v v22, (a0) +; RV32-NEXT: vmv1r.v v20, v15 +; RV32-NEXT: vsseg5e32.v v17, (a1) +; RV32-NEXT: vl1re32.v v16, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re32.v v17, (a6) +; RV32-NEXT: add a6, a3, a2 +; RV32-NEXT: vl1re32.v v10, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re32.v v11, (a6) +; RV32-NEXT: vl1re32.v v8, (a0) +; RV32-NEXT: vl1re32.v v9, (a3) +; RV32-NEXT: vl1re32.v v14, (a4) +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a3, 10 +; RV32-NEXT: mul a0, a0, a3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 64 +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re32.v v15, (a5) +; RV32-NEXT: vl1re32.v v12, (a6) +; RV32-NEXT: vl1re32.v v13, (a1) +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: add a2, a0, a2 +; RV32-NEXT: vs2r.v v16, (a2) +; RV32-NEXT: vs8r.v v8, (a0) +; RV32-NEXT: vl8re32.v v16, (a2) +; RV32-NEXT: vl8re32.v v8, (a0) +; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 80 +; RV32-NEXT: ret +; +; RV64-LABEL: vector_interleave_nxv20i32_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-NEXT: addi s0, sp, 80 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: li a1, 28 +; RV64-NEXT: mul a0, a0, a1 +; RV64-NEXT: sub sp, sp, a0 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; RV64-NEXT: vmv2r.v v20, v16 +; RV64-NEXT: addi a0, sp, 64 +; RV64-NEXT: vmv2r.v v18, v12 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a2, a1, 2 +; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 64 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: vmv2r.v v16, v8 +; RV64-NEXT: vmv2r.v v22, v16 +; RV64-NEXT: vmv2r.v v24, v18 +; RV64-NEXT: vmv1r.v v26, v20 +; RV64-NEXT: add a3, a0, a2 +; RV64-NEXT: vmv1r.v v23, v10 +; RV64-NEXT: add a4, a1, a2 +; RV64-NEXT: add a5, a4, a2 +; RV64-NEXT: vmv1r.v v25, v14 +; RV64-NEXT: add a6, a5, a2 +; RV64-NEXT: vmv1r.v v18, v11 +; RV64-NEXT: vsseg5e32.v v22, (a0) +; RV64-NEXT: vmv1r.v v20, v15 +; RV64-NEXT: vsseg5e32.v v17, (a1) +; RV64-NEXT: vl1re32.v v16, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re32.v v17, (a6) +; RV64-NEXT: add a6, a3, a2 +; RV64-NEXT: vl1re32.v v10, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re32.v v11, (a6) +; RV64-NEXT: vl1re32.v v8, (a0) +; RV64-NEXT: vl1re32.v v9, (a3) +; RV64-NEXT: vl1re32.v v14, (a4) +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: li a3, 10 +; RV64-NEXT: mul a0, a0, a3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 64 +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re32.v v15, (a5) +; RV64-NEXT: vl1re32.v v12, (a6) +; RV64-NEXT: vl1re32.v v13, (a1) +; RV64-NEXT: slli a2, a2, 3 +; RV64-NEXT: add a2, a0, a2 +; RV64-NEXT: vs2r.v v16, (a2) +; RV64-NEXT: vs8r.v v8, (a0) +; RV64-NEXT: vl8re32.v v16, (a2) +; RV64-NEXT: vl8re32.v v8, (a0) +; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret +; +; ZVBB-RV32-LABEL: vector_interleave_nxv20i32_nxv4i32: +; ZVBB-RV32: # %bb.0: +; ZVBB-RV32-NEXT: addi sp, sp, -80 +; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: addi s0, sp, 80 +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: li a1, 28 +; ZVBB-RV32-NEXT: mul a0, a0, a1 +; ZVBB-RV32-NEXT: sub sp, sp, a0 +; ZVBB-RV32-NEXT: andi sp, sp, -64 +; ZVBB-RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; ZVBB-RV32-NEXT: vmv2r.v v20, v16 +; ZVBB-RV32-NEXT: addi a0, sp, 64 +; ZVBB-RV32-NEXT: vmv2r.v v18, v12 +; ZVBB-RV32-NEXT: csrr a1, vlenb +; ZVBB-RV32-NEXT: slli a2, a1, 2 +; ZVBB-RV32-NEXT: add a1, a2, a1 +; ZVBB-RV32-NEXT: add a1, sp, a1 +; ZVBB-RV32-NEXT: addi a1, a1, 64 +; ZVBB-RV32-NEXT: csrr a2, vlenb +; ZVBB-RV32-NEXT: vmv2r.v v16, v8 +; ZVBB-RV32-NEXT: vmv2r.v v22, v16 +; ZVBB-RV32-NEXT: vmv2r.v v24, v18 +; ZVBB-RV32-NEXT: vmv1r.v v26, v20 +; ZVBB-RV32-NEXT: add a3, a0, a2 +; ZVBB-RV32-NEXT: vmv1r.v v23, v10 +; ZVBB-RV32-NEXT: add a4, a1, a2 +; ZVBB-RV32-NEXT: add a5, a4, a2 +; ZVBB-RV32-NEXT: vmv1r.v v25, v14 +; ZVBB-RV32-NEXT: add a6, a5, a2 +; ZVBB-RV32-NEXT: vmv1r.v v18, v11 +; ZVBB-RV32-NEXT: vsseg5e32.v v22, (a0) +; ZVBB-RV32-NEXT: vmv1r.v v20, v15 +; ZVBB-RV32-NEXT: vsseg5e32.v v17, (a1) +; ZVBB-RV32-NEXT: vl1re32.v v16, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re32.v v17, (a6) +; ZVBB-RV32-NEXT: add a6, a3, a2 +; ZVBB-RV32-NEXT: vl1re32.v v10, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re32.v v11, (a6) +; ZVBB-RV32-NEXT: vl1re32.v v8, (a0) +; ZVBB-RV32-NEXT: vl1re32.v v9, (a3) +; ZVBB-RV32-NEXT: vl1re32.v v14, (a4) +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: li a3, 10 +; ZVBB-RV32-NEXT: mul a0, a0, a3 +; ZVBB-RV32-NEXT: add a0, sp, a0 +; ZVBB-RV32-NEXT: addi a0, a0, 64 +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re32.v v15, (a5) +; ZVBB-RV32-NEXT: vl1re32.v v12, (a6) +; ZVBB-RV32-NEXT: vl1re32.v v13, (a1) +; ZVBB-RV32-NEXT: slli a2, a2, 3 +; ZVBB-RV32-NEXT: add a2, a0, a2 +; ZVBB-RV32-NEXT: vs2r.v v16, (a2) +; ZVBB-RV32-NEXT: vs8r.v v8, (a0) +; ZVBB-RV32-NEXT: vl8re32.v v16, (a2) +; ZVBB-RV32-NEXT: vl8re32.v v8, (a0) +; ZVBB-RV32-NEXT: addi sp, s0, -80 +; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: addi sp, sp, 80 +; ZVBB-RV32-NEXT: ret +; +; ZVBB-RV64-LABEL: vector_interleave_nxv20i32_nxv4i32: +; ZVBB-RV64: # %bb.0: +; ZVBB-RV64-NEXT: addi sp, sp, -80 +; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: addi s0, sp, 80 +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: li a1, 28 +; ZVBB-RV64-NEXT: mul a0, a0, a1 +; ZVBB-RV64-NEXT: sub sp, sp, a0 +; ZVBB-RV64-NEXT: andi sp, sp, -64 +; ZVBB-RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; ZVBB-RV64-NEXT: vmv2r.v v20, v16 +; ZVBB-RV64-NEXT: addi a0, sp, 64 +; ZVBB-RV64-NEXT: vmv2r.v v18, v12 +; ZVBB-RV64-NEXT: csrr a1, vlenb +; ZVBB-RV64-NEXT: slli a2, a1, 2 +; ZVBB-RV64-NEXT: add a1, a2, a1 +; ZVBB-RV64-NEXT: add a1, sp, a1 +; ZVBB-RV64-NEXT: addi a1, a1, 64 +; ZVBB-RV64-NEXT: csrr a2, vlenb +; ZVBB-RV64-NEXT: vmv2r.v v16, v8 +; ZVBB-RV64-NEXT: vmv2r.v v22, v16 +; ZVBB-RV64-NEXT: vmv2r.v v24, v18 +; ZVBB-RV64-NEXT: vmv1r.v v26, v20 +; ZVBB-RV64-NEXT: add a3, a0, a2 +; ZVBB-RV64-NEXT: vmv1r.v v23, v10 +; ZVBB-RV64-NEXT: add a4, a1, a2 +; ZVBB-RV64-NEXT: add a5, a4, a2 +; ZVBB-RV64-NEXT: vmv1r.v v25, v14 +; ZVBB-RV64-NEXT: add a6, a5, a2 +; ZVBB-RV64-NEXT: vmv1r.v v18, v11 +; ZVBB-RV64-NEXT: vsseg5e32.v v22, (a0) +; ZVBB-RV64-NEXT: vmv1r.v v20, v15 +; ZVBB-RV64-NEXT: vsseg5e32.v v17, (a1) +; ZVBB-RV64-NEXT: vl1re32.v v16, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re32.v v17, (a6) +; ZVBB-RV64-NEXT: add a6, a3, a2 +; ZVBB-RV64-NEXT: vl1re32.v v10, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re32.v v11, (a6) +; ZVBB-RV64-NEXT: vl1re32.v v8, (a0) +; ZVBB-RV64-NEXT: vl1re32.v v9, (a3) +; ZVBB-RV64-NEXT: vl1re32.v v14, (a4) +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: li a3, 10 +; ZVBB-RV64-NEXT: mul a0, a0, a3 +; ZVBB-RV64-NEXT: add a0, sp, a0 +; ZVBB-RV64-NEXT: addi a0, a0, 64 +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re32.v v15, (a5) +; ZVBB-RV64-NEXT: vl1re32.v v12, (a6) +; ZVBB-RV64-NEXT: vl1re32.v v13, (a1) +; ZVBB-RV64-NEXT: slli a2, a2, 3 +; ZVBB-RV64-NEXT: add a2, a0, a2 +; ZVBB-RV64-NEXT: vs2r.v v16, (a2) +; ZVBB-RV64-NEXT: vs8r.v v8, (a0) +; ZVBB-RV64-NEXT: vl8re32.v v16, (a2) +; ZVBB-RV64-NEXT: vl8re32.v v8, (a0) +; ZVBB-RV64-NEXT: addi sp, s0, -80 +; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: addi sp, sp, 80 +; ZVBB-RV64-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv20i32_nxv4i32: +; ZIP: # %bb.0: +; ZIP-NEXT: addi sp, sp, -80 +; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZIP-NEXT: addi s0, sp, 80 +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: li a1, 28 +; ZIP-NEXT: mul a0, a0, a1 +; ZIP-NEXT: sub sp, sp, a0 +; ZIP-NEXT: andi sp, sp, -64 +; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; ZIP-NEXT: vmv2r.v v20, v16 +; ZIP-NEXT: addi a0, sp, 64 +; ZIP-NEXT: vmv2r.v v18, v12 +; ZIP-NEXT: csrr a1, vlenb +; ZIP-NEXT: slli a2, a1, 2 +; ZIP-NEXT: add a1, a2, a1 +; ZIP-NEXT: add a1, sp, a1 +; ZIP-NEXT: addi a1, a1, 64 +; ZIP-NEXT: csrr a2, vlenb +; ZIP-NEXT: vmv2r.v v16, v8 +; ZIP-NEXT: vmv2r.v v22, v16 +; ZIP-NEXT: vmv2r.v v24, v18 +; ZIP-NEXT: vmv1r.v v26, v20 +; ZIP-NEXT: add a3, a0, a2 +; ZIP-NEXT: vmv1r.v v23, v10 +; ZIP-NEXT: add a4, a1, a2 +; ZIP-NEXT: add a5, a4, a2 +; ZIP-NEXT: vmv1r.v v25, v14 +; ZIP-NEXT: add a6, a5, a2 +; ZIP-NEXT: vmv1r.v v18, v11 +; ZIP-NEXT: vsseg5e32.v v22, (a0) +; ZIP-NEXT: vmv1r.v v20, v15 +; ZIP-NEXT: vsseg5e32.v v17, (a1) +; ZIP-NEXT: vl1re32.v v16, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re32.v v17, (a6) +; ZIP-NEXT: add a6, a3, a2 +; ZIP-NEXT: vl1re32.v v10, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re32.v v11, (a6) +; ZIP-NEXT: vl1re32.v v8, (a0) +; ZIP-NEXT: vl1re32.v v9, (a3) +; ZIP-NEXT: vl1re32.v v14, (a4) +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: li a3, 10 +; ZIP-NEXT: mul a0, a0, a3 +; ZIP-NEXT: add a0, sp, a0 +; ZIP-NEXT: addi a0, a0, 64 +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re32.v v15, (a5) +; ZIP-NEXT: vl1re32.v v12, (a6) +; ZIP-NEXT: vl1re32.v v13, (a1) +; ZIP-NEXT: slli a2, a2, 3 +; ZIP-NEXT: add a2, a0, a2 +; ZIP-NEXT: vs2r.v v16, (a2) +; ZIP-NEXT: vs8r.v v8, (a0) +; ZIP-NEXT: vl8re32.v v16, (a2) +; ZIP-NEXT: vl8re32.v v8, (a0) +; ZIP-NEXT: addi sp, s0, -80 +; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZIP-NEXT: addi sp, sp, 80 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave5.nxv20i32( %a, %b, %c, %d, %e) + ret %res +} + + +define @vector_interleave_nxv10i64_nxv2i64( %a, %b, %c, %d, %e) nounwind { +; +; RV32-LABEL: vector_interleave_nxv10i64_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -80 +; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: addi s0, sp, 80 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 28 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32-NEXT: vmv2r.v v20, v16 +; RV32-NEXT: addi a0, sp, 64 +; RV32-NEXT: vmv2r.v v18, v12 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a2, a1, 2 +; RV32-NEXT: add a1, a2, a1 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 64 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: vmv2r.v v16, v8 +; RV32-NEXT: vmv2r.v v22, v16 +; RV32-NEXT: vmv2r.v v24, v18 +; RV32-NEXT: vmv1r.v v26, v20 +; RV32-NEXT: add a3, a0, a2 +; RV32-NEXT: vmv1r.v v23, v10 +; RV32-NEXT: add a4, a1, a2 +; RV32-NEXT: add a5, a4, a2 +; RV32-NEXT: vmv1r.v v25, v14 +; RV32-NEXT: add a6, a5, a2 +; RV32-NEXT: vmv1r.v v18, v11 +; RV32-NEXT: vsseg5e64.v v22, (a0) +; RV32-NEXT: vmv1r.v v20, v15 +; RV32-NEXT: vsseg5e64.v v17, (a1) +; RV32-NEXT: vl1re64.v v16, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re64.v v17, (a6) +; RV32-NEXT: add a6, a3, a2 +; RV32-NEXT: vl1re64.v v10, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re64.v v11, (a6) +; RV32-NEXT: vl1re64.v v8, (a0) +; RV32-NEXT: vl1re64.v v9, (a3) +; RV32-NEXT: vl1re64.v v14, (a4) +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a3, 10 +; RV32-NEXT: mul a0, a0, a3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 64 +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re64.v v15, (a5) +; RV32-NEXT: vl1re64.v v12, (a6) +; RV32-NEXT: vl1re64.v v13, (a1) +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: add a2, a0, a2 +; RV32-NEXT: vs2r.v v16, (a2) +; RV32-NEXT: vs8r.v v8, (a0) +; RV32-NEXT: vl8re64.v v16, (a2) +; RV32-NEXT: vl8re64.v v8, (a0) +; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 80 +; RV32-NEXT: ret +; +; RV64-LABEL: vector_interleave_nxv10i64_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-NEXT: addi s0, sp, 80 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: li a1, 28 +; RV64-NEXT: mul a0, a0, a1 +; RV64-NEXT: sub sp, sp, a0 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64-NEXT: vmv2r.v v20, v16 +; RV64-NEXT: addi a0, sp, 64 +; RV64-NEXT: vmv2r.v v18, v12 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a2, a1, 2 +; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 64 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: vmv2r.v v16, v8 +; RV64-NEXT: vmv2r.v v22, v16 +; RV64-NEXT: vmv2r.v v24, v18 +; RV64-NEXT: vmv1r.v v26, v20 +; RV64-NEXT: add a3, a0, a2 +; RV64-NEXT: vmv1r.v v23, v10 +; RV64-NEXT: add a4, a1, a2 +; RV64-NEXT: add a5, a4, a2 +; RV64-NEXT: vmv1r.v v25, v14 +; RV64-NEXT: add a6, a5, a2 +; RV64-NEXT: vmv1r.v v18, v11 +; RV64-NEXT: vsseg5e64.v v22, (a0) +; RV64-NEXT: vmv1r.v v20, v15 +; RV64-NEXT: vsseg5e64.v v17, (a1) +; RV64-NEXT: vl1re64.v v16, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re64.v v17, (a6) +; RV64-NEXT: add a6, a3, a2 +; RV64-NEXT: vl1re64.v v10, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re64.v v11, (a6) +; RV64-NEXT: vl1re64.v v8, (a0) +; RV64-NEXT: vl1re64.v v9, (a3) +; RV64-NEXT: vl1re64.v v14, (a4) +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: li a3, 10 +; RV64-NEXT: mul a0, a0, a3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 64 +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re64.v v15, (a5) +; RV64-NEXT: vl1re64.v v12, (a6) +; RV64-NEXT: vl1re64.v v13, (a1) +; RV64-NEXT: slli a2, a2, 3 +; RV64-NEXT: add a2, a0, a2 +; RV64-NEXT: vs2r.v v16, (a2) +; RV64-NEXT: vs8r.v v8, (a0) +; RV64-NEXT: vl8re64.v v16, (a2) +; RV64-NEXT: vl8re64.v v8, (a0) +; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret +; +; ZVBB-RV32-LABEL: vector_interleave_nxv10i64_nxv2i64: +; ZVBB-RV32: # %bb.0: +; ZVBB-RV32-NEXT: addi sp, sp, -80 +; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: addi s0, sp, 80 +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: li a1, 28 +; ZVBB-RV32-NEXT: mul a0, a0, a1 +; ZVBB-RV32-NEXT: sub sp, sp, a0 +; ZVBB-RV32-NEXT: andi sp, sp, -64 +; ZVBB-RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; ZVBB-RV32-NEXT: vmv2r.v v20, v16 +; ZVBB-RV32-NEXT: addi a0, sp, 64 +; ZVBB-RV32-NEXT: vmv2r.v v18, v12 +; ZVBB-RV32-NEXT: csrr a1, vlenb +; ZVBB-RV32-NEXT: slli a2, a1, 2 +; ZVBB-RV32-NEXT: add a1, a2, a1 +; ZVBB-RV32-NEXT: add a1, sp, a1 +; ZVBB-RV32-NEXT: addi a1, a1, 64 +; ZVBB-RV32-NEXT: csrr a2, vlenb +; ZVBB-RV32-NEXT: vmv2r.v v16, v8 +; ZVBB-RV32-NEXT: vmv2r.v v22, v16 +; ZVBB-RV32-NEXT: vmv2r.v v24, v18 +; ZVBB-RV32-NEXT: vmv1r.v v26, v20 +; ZVBB-RV32-NEXT: add a3, a0, a2 +; ZVBB-RV32-NEXT: vmv1r.v v23, v10 +; ZVBB-RV32-NEXT: add a4, a1, a2 +; ZVBB-RV32-NEXT: add a5, a4, a2 +; ZVBB-RV32-NEXT: vmv1r.v v25, v14 +; ZVBB-RV32-NEXT: add a6, a5, a2 +; ZVBB-RV32-NEXT: vmv1r.v v18, v11 +; ZVBB-RV32-NEXT: vsseg5e64.v v22, (a0) +; ZVBB-RV32-NEXT: vmv1r.v v20, v15 +; ZVBB-RV32-NEXT: vsseg5e64.v v17, (a1) +; ZVBB-RV32-NEXT: vl1re64.v v16, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re64.v v17, (a6) +; ZVBB-RV32-NEXT: add a6, a3, a2 +; ZVBB-RV32-NEXT: vl1re64.v v10, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re64.v v11, (a6) +; ZVBB-RV32-NEXT: vl1re64.v v8, (a0) +; ZVBB-RV32-NEXT: vl1re64.v v9, (a3) +; ZVBB-RV32-NEXT: vl1re64.v v14, (a4) +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: li a3, 10 +; ZVBB-RV32-NEXT: mul a0, a0, a3 +; ZVBB-RV32-NEXT: add a0, sp, a0 +; ZVBB-RV32-NEXT: addi a0, a0, 64 +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re64.v v15, (a5) +; ZVBB-RV32-NEXT: vl1re64.v v12, (a6) +; ZVBB-RV32-NEXT: vl1re64.v v13, (a1) +; ZVBB-RV32-NEXT: slli a2, a2, 3 +; ZVBB-RV32-NEXT: add a2, a0, a2 +; ZVBB-RV32-NEXT: vs2r.v v16, (a2) +; ZVBB-RV32-NEXT: vs8r.v v8, (a0) +; ZVBB-RV32-NEXT: vl8re64.v v16, (a2) +; ZVBB-RV32-NEXT: vl8re64.v v8, (a0) +; ZVBB-RV32-NEXT: addi sp, s0, -80 +; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: addi sp, sp, 80 +; ZVBB-RV32-NEXT: ret +; +; ZVBB-RV64-LABEL: vector_interleave_nxv10i64_nxv2i64: +; ZVBB-RV64: # %bb.0: +; ZVBB-RV64-NEXT: addi sp, sp, -80 +; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: addi s0, sp, 80 +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: li a1, 28 +; ZVBB-RV64-NEXT: mul a0, a0, a1 +; ZVBB-RV64-NEXT: sub sp, sp, a0 +; ZVBB-RV64-NEXT: andi sp, sp, -64 +; ZVBB-RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; ZVBB-RV64-NEXT: vmv2r.v v20, v16 +; ZVBB-RV64-NEXT: addi a0, sp, 64 +; ZVBB-RV64-NEXT: vmv2r.v v18, v12 +; ZVBB-RV64-NEXT: csrr a1, vlenb +; ZVBB-RV64-NEXT: slli a2, a1, 2 +; ZVBB-RV64-NEXT: add a1, a2, a1 +; ZVBB-RV64-NEXT: add a1, sp, a1 +; ZVBB-RV64-NEXT: addi a1, a1, 64 +; ZVBB-RV64-NEXT: csrr a2, vlenb +; ZVBB-RV64-NEXT: vmv2r.v v16, v8 +; ZVBB-RV64-NEXT: vmv2r.v v22, v16 +; ZVBB-RV64-NEXT: vmv2r.v v24, v18 +; ZVBB-RV64-NEXT: vmv1r.v v26, v20 +; ZVBB-RV64-NEXT: add a3, a0, a2 +; ZVBB-RV64-NEXT: vmv1r.v v23, v10 +; ZVBB-RV64-NEXT: add a4, a1, a2 +; ZVBB-RV64-NEXT: add a5, a4, a2 +; ZVBB-RV64-NEXT: vmv1r.v v25, v14 +; ZVBB-RV64-NEXT: add a6, a5, a2 +; ZVBB-RV64-NEXT: vmv1r.v v18, v11 +; ZVBB-RV64-NEXT: vsseg5e64.v v22, (a0) +; ZVBB-RV64-NEXT: vmv1r.v v20, v15 +; ZVBB-RV64-NEXT: vsseg5e64.v v17, (a1) +; ZVBB-RV64-NEXT: vl1re64.v v16, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re64.v v17, (a6) +; ZVBB-RV64-NEXT: add a6, a3, a2 +; ZVBB-RV64-NEXT: vl1re64.v v10, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re64.v v11, (a6) +; ZVBB-RV64-NEXT: vl1re64.v v8, (a0) +; ZVBB-RV64-NEXT: vl1re64.v v9, (a3) +; ZVBB-RV64-NEXT: vl1re64.v v14, (a4) +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: li a3, 10 +; ZVBB-RV64-NEXT: mul a0, a0, a3 +; ZVBB-RV64-NEXT: add a0, sp, a0 +; ZVBB-RV64-NEXT: addi a0, a0, 64 +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re64.v v15, (a5) +; ZVBB-RV64-NEXT: vl1re64.v v12, (a6) +; ZVBB-RV64-NEXT: vl1re64.v v13, (a1) +; ZVBB-RV64-NEXT: slli a2, a2, 3 +; ZVBB-RV64-NEXT: add a2, a0, a2 +; ZVBB-RV64-NEXT: vs2r.v v16, (a2) +; ZVBB-RV64-NEXT: vs8r.v v8, (a0) +; ZVBB-RV64-NEXT: vl8re64.v v16, (a2) +; ZVBB-RV64-NEXT: vl8re64.v v8, (a0) +; ZVBB-RV64-NEXT: addi sp, s0, -80 +; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: addi sp, sp, 80 +; ZVBB-RV64-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv10i64_nxv2i64: +; ZIP: # %bb.0: +; ZIP-NEXT: addi sp, sp, -80 +; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZIP-NEXT: addi s0, sp, 80 +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: li a1, 28 +; ZIP-NEXT: mul a0, a0, a1 +; ZIP-NEXT: sub sp, sp, a0 +; ZIP-NEXT: andi sp, sp, -64 +; ZIP-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; ZIP-NEXT: vmv2r.v v20, v16 +; ZIP-NEXT: addi a0, sp, 64 +; ZIP-NEXT: vmv2r.v v18, v12 +; ZIP-NEXT: csrr a1, vlenb +; ZIP-NEXT: slli a2, a1, 2 +; ZIP-NEXT: add a1, a2, a1 +; ZIP-NEXT: add a1, sp, a1 +; ZIP-NEXT: addi a1, a1, 64 +; ZIP-NEXT: csrr a2, vlenb +; ZIP-NEXT: vmv2r.v v16, v8 +; ZIP-NEXT: vmv2r.v v22, v16 +; ZIP-NEXT: vmv2r.v v24, v18 +; ZIP-NEXT: vmv1r.v v26, v20 +; ZIP-NEXT: add a3, a0, a2 +; ZIP-NEXT: vmv1r.v v23, v10 +; ZIP-NEXT: add a4, a1, a2 +; ZIP-NEXT: add a5, a4, a2 +; ZIP-NEXT: vmv1r.v v25, v14 +; ZIP-NEXT: add a6, a5, a2 +; ZIP-NEXT: vmv1r.v v18, v11 +; ZIP-NEXT: vsseg5e64.v v22, (a0) +; ZIP-NEXT: vmv1r.v v20, v15 +; ZIP-NEXT: vsseg5e64.v v17, (a1) +; ZIP-NEXT: vl1re64.v v16, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re64.v v17, (a6) +; ZIP-NEXT: add a6, a3, a2 +; ZIP-NEXT: vl1re64.v v10, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re64.v v11, (a6) +; ZIP-NEXT: vl1re64.v v8, (a0) +; ZIP-NEXT: vl1re64.v v9, (a3) +; ZIP-NEXT: vl1re64.v v14, (a4) +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: li a3, 10 +; ZIP-NEXT: mul a0, a0, a3 +; ZIP-NEXT: add a0, sp, a0 +; ZIP-NEXT: addi a0, a0, 64 +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re64.v v15, (a5) +; ZIP-NEXT: vl1re64.v v12, (a6) +; ZIP-NEXT: vl1re64.v v13, (a1) +; ZIP-NEXT: slli a2, a2, 3 +; ZIP-NEXT: add a2, a0, a2 +; ZIP-NEXT: vs2r.v v16, (a2) +; ZIP-NEXT: vs8r.v v8, (a0) +; ZIP-NEXT: vl8re64.v v16, (a2) +; ZIP-NEXT: vl8re64.v v8, (a0) +; ZIP-NEXT: addi sp, s0, -80 +; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZIP-NEXT: addi sp, sp, 80 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave5.nxv10i64( %a, %b, %c, %d, %e) + ret %res +} + +define @vector_interleave_nxv112i1_nxv16i1( %a, %b, %c, %d, %e, %f, %g) nounwind { +; CHECK-LABEL: vector_interleave_nxv112i1_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 14 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmv.v.i v14, 0 +; CHECK-NEXT: addi a4, sp, 16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 3 +; CHECK-NEXT: sub a0, a1, a0 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: vmerge.vim v16, v14, 1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v22, v14, 1, v0 +; CHECK-NEXT: add a3, a4, a2 +; CHECK-NEXT: srli a1, a2, 2 +; CHECK-NEXT: add a5, a0, a2 +; CHECK-NEXT: vmv4r.v v24, v16 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmerge.vim v18, v14, 1, v0 +; CHECK-NEXT: add a6, a3, a2 +; CHECK-NEXT: vmv1r.v v25, v22 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmerge.vim v8, v14, 1, v0 +; CHECK-NEXT: vmv1r.v v26, v18 +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmerge.vim v20, v14, 1, v0 +; CHECK-NEXT: vmv1r.v v27, v8 +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmerge.vim v10, v14, 1, v0 +; CHECK-NEXT: vmv1r.v v28, v20 +; CHECK-NEXT: vmv1r.v v18, v23 +; CHECK-NEXT: add a7, a6, a2 +; CHECK-NEXT: vmv1r.v v29, v10 +; CHECK-NEXT: vmv1r.v v20, v9 +; CHECK-NEXT: vmv1r.v v0, v13 +; CHECK-NEXT: vmerge.vim v30, v14, 1, v0 +; CHECK-NEXT: vmv1r.v v22, v11 +; CHECK-NEXT: vsetvli t0, zero, e8, m1, ta, ma +; CHECK-NEXT: vsseg7e8.v v24, (a4) +; CHECK-NEXT: vmv1r.v v23, v31 +; CHECK-NEXT: vsseg7e8.v v17, (a0) +; CHECK-NEXT: vl1r.v v8, (a6) +; CHECK-NEXT: add a6, a7, a2 +; CHECK-NEXT: vl1r.v v10, (a4) +; CHECK-NEXT: add a4, a6, a2 +; CHECK-NEXT: vl1r.v v12, (a6) +; CHECK-NEXT: add a6, a4, a2 +; CHECK-NEXT: vl1r.v v14, (a6) +; CHECK-NEXT: add a6, a5, a2 +; CHECK-NEXT: vl1r.v v16, (a5) +; CHECK-NEXT: add a5, a6, a2 +; CHECK-NEXT: vl1r.v v18, (a5) +; CHECK-NEXT: add a5, a5, a2 +; CHECK-NEXT: vl1r.v v9, (a7) +; CHECK-NEXT: add a7, a5, a2 +; CHECK-NEXT: vl1r.v v20, (a7) +; CHECK-NEXT: add a7, a7, a2 +; CHECK-NEXT: srli a2, a2, 1 +; CHECK-NEXT: vl1r.v v11, (a3) +; CHECK-NEXT: add a3, a1, a1 +; CHECK-NEXT: vl1r.v v13, (a4) +; CHECK-NEXT: add a4, a2, a2 +; CHECK-NEXT: vl1r.v v15, (a0) +; CHECK-NEXT: vl1r.v v19, (a5) +; CHECK-NEXT: vl1r.v v17, (a6) +; CHECK-NEXT: vl1r.v v21, (a7) +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmsne.vi v22, v8, 0 +; CHECK-NEXT: vmsne.vi v0, v10, 0 +; CHECK-NEXT: vmsne.vi v9, v12, 0 +; CHECK-NEXT: vmsne.vi v10, v14, 0 +; CHECK-NEXT: vmsne.vi v11, v18, 0 +; CHECK-NEXT: vmsne.vi v8, v16, 0 +; CHECK-NEXT: vmsne.vi v12, v20, 0 +; CHECK-NEXT: vsetvli zero, a3, e8, mf2, ta, ma +; CHECK-NEXT: vslideup.vx v0, v22, a1 +; CHECK-NEXT: vslideup.vx v9, v10, a1 +; CHECK-NEXT: vslideup.vx v8, v11, a1 +; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, ma +; CHECK-NEXT: vslideup.vx v0, v9, a2 +; CHECK-NEXT: vslideup.vx v8, v12, a2 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 14 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv112i1_nxv16i1: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: li a1, 14 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; ZVBB-NEXT: vmv.v.i v14, 0 +; ZVBB-NEXT: addi a4, sp, 16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 3 +; ZVBB-NEXT: sub a0, a1, a0 +; ZVBB-NEXT: add a0, sp, a0 +; ZVBB-NEXT: addi a0, a0, 16 +; ZVBB-NEXT: csrr a2, vlenb +; ZVBB-NEXT: vmerge.vim v16, v14, 1, v0 +; ZVBB-NEXT: vmv1r.v v0, v8 +; ZVBB-NEXT: vmerge.vim v22, v14, 1, v0 +; ZVBB-NEXT: add a3, a4, a2 +; ZVBB-NEXT: srli a1, a2, 2 +; ZVBB-NEXT: add a5, a0, a2 +; ZVBB-NEXT: vmv4r.v v24, v16 +; ZVBB-NEXT: vmv1r.v v0, v9 +; ZVBB-NEXT: vmerge.vim v18, v14, 1, v0 +; ZVBB-NEXT: add a6, a3, a2 +; ZVBB-NEXT: vmv1r.v v25, v22 +; ZVBB-NEXT: vmv1r.v v0, v10 +; ZVBB-NEXT: vmerge.vim v8, v14, 1, v0 +; ZVBB-NEXT: vmv1r.v v26, v18 +; ZVBB-NEXT: vmv1r.v v0, v11 +; ZVBB-NEXT: vmerge.vim v20, v14, 1, v0 +; ZVBB-NEXT: vmv1r.v v27, v8 +; ZVBB-NEXT: vmv1r.v v0, v12 +; ZVBB-NEXT: vmerge.vim v10, v14, 1, v0 +; ZVBB-NEXT: vmv1r.v v28, v20 +; ZVBB-NEXT: vmv1r.v v18, v23 +; ZVBB-NEXT: add a7, a6, a2 +; ZVBB-NEXT: vmv1r.v v29, v10 +; ZVBB-NEXT: vmv1r.v v20, v9 +; ZVBB-NEXT: vmv1r.v v0, v13 +; ZVBB-NEXT: vmerge.vim v30, v14, 1, v0 +; ZVBB-NEXT: vmv1r.v v22, v11 +; ZVBB-NEXT: vsetvli t0, zero, e8, m1, ta, ma +; ZVBB-NEXT: vsseg7e8.v v24, (a4) +; ZVBB-NEXT: vmv1r.v v23, v31 +; ZVBB-NEXT: vsseg7e8.v v17, (a0) +; ZVBB-NEXT: vl1r.v v8, (a6) +; ZVBB-NEXT: add a6, a7, a2 +; ZVBB-NEXT: vl1r.v v10, (a4) +; ZVBB-NEXT: add a4, a6, a2 +; ZVBB-NEXT: vl1r.v v12, (a6) +; ZVBB-NEXT: add a6, a4, a2 +; ZVBB-NEXT: vl1r.v v14, (a6) +; ZVBB-NEXT: add a6, a5, a2 +; ZVBB-NEXT: vl1r.v v16, (a5) +; ZVBB-NEXT: add a5, a6, a2 +; ZVBB-NEXT: vl1r.v v18, (a5) +; ZVBB-NEXT: add a5, a5, a2 +; ZVBB-NEXT: vl1r.v v9, (a7) +; ZVBB-NEXT: add a7, a5, a2 +; ZVBB-NEXT: vl1r.v v20, (a7) +; ZVBB-NEXT: add a7, a7, a2 +; ZVBB-NEXT: srli a2, a2, 1 +; ZVBB-NEXT: vl1r.v v11, (a3) +; ZVBB-NEXT: add a3, a1, a1 +; ZVBB-NEXT: vl1r.v v13, (a4) +; ZVBB-NEXT: add a4, a2, a2 +; ZVBB-NEXT: vl1r.v v15, (a0) +; ZVBB-NEXT: vl1r.v v19, (a5) +; ZVBB-NEXT: vl1r.v v17, (a6) +; ZVBB-NEXT: vl1r.v v21, (a7) +; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; ZVBB-NEXT: vmsne.vi v22, v8, 0 +; ZVBB-NEXT: vmsne.vi v0, v10, 0 +; ZVBB-NEXT: vmsne.vi v9, v12, 0 +; ZVBB-NEXT: vmsne.vi v10, v14, 0 +; ZVBB-NEXT: vmsne.vi v11, v18, 0 +; ZVBB-NEXT: vmsne.vi v8, v16, 0 +; ZVBB-NEXT: vmsne.vi v12, v20, 0 +; ZVBB-NEXT: vsetvli zero, a3, e8, mf2, ta, ma +; ZVBB-NEXT: vslideup.vx v0, v22, a1 +; ZVBB-NEXT: vslideup.vx v9, v10, a1 +; ZVBB-NEXT: vslideup.vx v8, v11, a1 +; ZVBB-NEXT: vsetvli zero, a4, e8, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v0, v9, a2 +; ZVBB-NEXT: vslideup.vx v8, v12, a2 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: li a1, 14 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave7.nxv112i1( %a, %b, %c, %d, %e, %f, %g) + ret %res +} + + +define @vector_interleave_nxv112i8_nxv16i8( %a, %b, %c, %d, %e, %f, %g) nounwind { +; +; RV32-LABEL: vector_interleave_nxv112i8_nxv16i8: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -80 +; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: addi s0, sp, 80 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; RV32-NEXT: vmv2r.v v26, v20 +; RV32-NEXT: addi a0, sp, 64 +; RV32-NEXT: vmv2r.v v24, v16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a2, a1, 3 +; RV32-NEXT: sub a1, a2, a1 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 64 +; RV32-NEXT: vmv2r.v v22, v12 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: vmv2r.v v20, v8 +; RV32-NEXT: vmv1r.v v1, v20 +; RV32-NEXT: vmv1r.v v3, v22 +; RV32-NEXT: vmv1r.v v5, v24 +; RV32-NEXT: vmv1r.v v7, v26 +; RV32-NEXT: add a3, a0, a2 +; RV32-NEXT: vmv1r.v v2, v10 +; RV32-NEXT: add a4, a1, a2 +; RV32-NEXT: slli a5, a2, 2 +; RV32-NEXT: vmv1r.v v4, v14 +; RV32-NEXT: slli a6, a2, 4 +; RV32-NEXT: add a7, a4, a2 +; RV32-NEXT: vmv1r.v v6, v18 +; RV32-NEXT: sub a5, a6, a5 +; RV32-NEXT: vmv1r.v v22, v11 +; RV32-NEXT: add a6, a7, a2 +; RV32-NEXT: vmv1r.v v24, v15 +; RV32-NEXT: vsseg7e8.v v1, (a0) +; RV32-NEXT: vmv1r.v v26, v19 +; RV32-NEXT: vsseg7e8.v v21, (a1) +; RV32-NEXT: vl1r.v v18, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1r.v v19, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1r.v v20, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1r.v v21, (a6) +; RV32-NEXT: add a6, a3, a2 +; RV32-NEXT: vl1r.v v10, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1r.v v11, (a6) +; RV32-NEXT: vl1r.v v8, (a0) +; RV32-NEXT: vl1r.v v16, (a4) +; RV32-NEXT: vl1r.v v9, (a3) +; RV32-NEXT: vl1r.v v17, (a7) +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a3, 14 +; RV32-NEXT: mul a0, a0, a3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 64 +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1r.v v12, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1r.v v13, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: add a2, a0, a2 +; RV32-NEXT: vl1r.v v14, (a6) +; RV32-NEXT: vl1r.v v15, (a1) +; RV32-NEXT: add a5, a0, a5 +; RV32-NEXT: vs2r.v v20, (a5) +; RV32-NEXT: vs4r.v v16, (a2) +; RV32-NEXT: vs8r.v v8, (a0) +; RV32-NEXT: vl8r.v v16, (a2) +; RV32-NEXT: vl8r.v v8, (a0) +; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 80 +; RV32-NEXT: ret +; +; RV64-LABEL: vector_interleave_nxv112i8_nxv16i8: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-NEXT: addi s0, sp, 80 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 5 +; RV64-NEXT: sub sp, sp, a0 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; RV64-NEXT: vmv2r.v v26, v20 +; RV64-NEXT: addi a0, sp, 64 +; RV64-NEXT: vmv2r.v v24, v16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a2, a1, 3 +; RV64-NEXT: sub a1, a2, a1 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 64 +; RV64-NEXT: vmv2r.v v22, v12 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: vmv2r.v v20, v8 +; RV64-NEXT: vmv1r.v v1, v20 +; RV64-NEXT: vmv1r.v v3, v22 +; RV64-NEXT: vmv1r.v v5, v24 +; RV64-NEXT: vmv1r.v v7, v26 +; RV64-NEXT: add a3, a0, a2 +; RV64-NEXT: vmv1r.v v2, v10 +; RV64-NEXT: add a4, a1, a2 +; RV64-NEXT: slli a5, a2, 2 +; RV64-NEXT: vmv1r.v v4, v14 +; RV64-NEXT: slli a6, a2, 4 +; RV64-NEXT: add a7, a4, a2 +; RV64-NEXT: vmv1r.v v6, v18 +; RV64-NEXT: sub a5, a6, a5 +; RV64-NEXT: vmv1r.v v22, v11 +; RV64-NEXT: add a6, a7, a2 +; RV64-NEXT: vmv1r.v v24, v15 +; RV64-NEXT: vsseg7e8.v v1, (a0) +; RV64-NEXT: vmv1r.v v26, v19 +; RV64-NEXT: vsseg7e8.v v21, (a1) +; RV64-NEXT: vl1r.v v18, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1r.v v19, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1r.v v20, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1r.v v21, (a6) +; RV64-NEXT: add a6, a3, a2 +; RV64-NEXT: vl1r.v v10, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1r.v v11, (a6) +; RV64-NEXT: vl1r.v v8, (a0) +; RV64-NEXT: vl1r.v v16, (a4) +; RV64-NEXT: vl1r.v v9, (a3) +; RV64-NEXT: vl1r.v v17, (a7) +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: li a3, 14 +; RV64-NEXT: mul a0, a0, a3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 64 +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1r.v v12, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1r.v v13, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: slli a2, a2, 3 +; RV64-NEXT: add a2, a0, a2 +; RV64-NEXT: vl1r.v v14, (a6) +; RV64-NEXT: vl1r.v v15, (a1) +; RV64-NEXT: add a5, a0, a5 +; RV64-NEXT: vs2r.v v20, (a5) +; RV64-NEXT: vs4r.v v16, (a2) +; RV64-NEXT: vs8r.v v8, (a0) +; RV64-NEXT: vl8r.v v16, (a2) +; RV64-NEXT: vl8r.v v8, (a0) +; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret +; +; ZVBB-RV32-LABEL: vector_interleave_nxv112i8_nxv16i8: +; ZVBB-RV32: # %bb.0: +; ZVBB-RV32-NEXT: addi sp, sp, -80 +; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: addi s0, sp, 80 +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: slli a0, a0, 5 +; ZVBB-RV32-NEXT: sub sp, sp, a0 +; ZVBB-RV32-NEXT: andi sp, sp, -64 +; ZVBB-RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; ZVBB-RV32-NEXT: vmv2r.v v26, v20 +; ZVBB-RV32-NEXT: addi a0, sp, 64 +; ZVBB-RV32-NEXT: vmv2r.v v24, v16 +; ZVBB-RV32-NEXT: csrr a1, vlenb +; ZVBB-RV32-NEXT: slli a2, a1, 3 +; ZVBB-RV32-NEXT: sub a1, a2, a1 +; ZVBB-RV32-NEXT: add a1, sp, a1 +; ZVBB-RV32-NEXT: addi a1, a1, 64 +; ZVBB-RV32-NEXT: vmv2r.v v22, v12 +; ZVBB-RV32-NEXT: csrr a2, vlenb +; ZVBB-RV32-NEXT: vmv2r.v v20, v8 +; ZVBB-RV32-NEXT: vmv1r.v v1, v20 +; ZVBB-RV32-NEXT: vmv1r.v v3, v22 +; ZVBB-RV32-NEXT: vmv1r.v v5, v24 +; ZVBB-RV32-NEXT: vmv1r.v v7, v26 +; ZVBB-RV32-NEXT: add a3, a0, a2 +; ZVBB-RV32-NEXT: vmv1r.v v2, v10 +; ZVBB-RV32-NEXT: add a4, a1, a2 +; ZVBB-RV32-NEXT: slli a5, a2, 2 +; ZVBB-RV32-NEXT: vmv1r.v v4, v14 +; ZVBB-RV32-NEXT: slli a6, a2, 4 +; ZVBB-RV32-NEXT: add a7, a4, a2 +; ZVBB-RV32-NEXT: vmv1r.v v6, v18 +; ZVBB-RV32-NEXT: sub a5, a6, a5 +; ZVBB-RV32-NEXT: vmv1r.v v22, v11 +; ZVBB-RV32-NEXT: add a6, a7, a2 +; ZVBB-RV32-NEXT: vmv1r.v v24, v15 +; ZVBB-RV32-NEXT: vsseg7e8.v v1, (a0) +; ZVBB-RV32-NEXT: vmv1r.v v26, v19 +; ZVBB-RV32-NEXT: vsseg7e8.v v21, (a1) +; ZVBB-RV32-NEXT: vl1r.v v18, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1r.v v19, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1r.v v20, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1r.v v21, (a6) +; ZVBB-RV32-NEXT: add a6, a3, a2 +; ZVBB-RV32-NEXT: vl1r.v v10, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1r.v v11, (a6) +; ZVBB-RV32-NEXT: vl1r.v v8, (a0) +; ZVBB-RV32-NEXT: vl1r.v v16, (a4) +; ZVBB-RV32-NEXT: vl1r.v v9, (a3) +; ZVBB-RV32-NEXT: vl1r.v v17, (a7) +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: li a3, 14 +; ZVBB-RV32-NEXT: mul a0, a0, a3 +; ZVBB-RV32-NEXT: add a0, sp, a0 +; ZVBB-RV32-NEXT: addi a0, a0, 64 +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1r.v v12, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1r.v v13, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: slli a2, a2, 3 +; ZVBB-RV32-NEXT: add a2, a0, a2 +; ZVBB-RV32-NEXT: vl1r.v v14, (a6) +; ZVBB-RV32-NEXT: vl1r.v v15, (a1) +; ZVBB-RV32-NEXT: add a5, a0, a5 +; ZVBB-RV32-NEXT: vs2r.v v20, (a5) +; ZVBB-RV32-NEXT: vs4r.v v16, (a2) +; ZVBB-RV32-NEXT: vs8r.v v8, (a0) +; ZVBB-RV32-NEXT: vl8r.v v16, (a2) +; ZVBB-RV32-NEXT: vl8r.v v8, (a0) +; ZVBB-RV32-NEXT: addi sp, s0, -80 +; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: addi sp, sp, 80 +; ZVBB-RV32-NEXT: ret +; +; ZVBB-RV64-LABEL: vector_interleave_nxv112i8_nxv16i8: +; ZVBB-RV64: # %bb.0: +; ZVBB-RV64-NEXT: addi sp, sp, -80 +; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: addi s0, sp, 80 +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: slli a0, a0, 5 +; ZVBB-RV64-NEXT: sub sp, sp, a0 +; ZVBB-RV64-NEXT: andi sp, sp, -64 +; ZVBB-RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; ZVBB-RV64-NEXT: vmv2r.v v26, v20 +; ZVBB-RV64-NEXT: addi a0, sp, 64 +; ZVBB-RV64-NEXT: vmv2r.v v24, v16 +; ZVBB-RV64-NEXT: csrr a1, vlenb +; ZVBB-RV64-NEXT: slli a2, a1, 3 +; ZVBB-RV64-NEXT: sub a1, a2, a1 +; ZVBB-RV64-NEXT: add a1, sp, a1 +; ZVBB-RV64-NEXT: addi a1, a1, 64 +; ZVBB-RV64-NEXT: vmv2r.v v22, v12 +; ZVBB-RV64-NEXT: csrr a2, vlenb +; ZVBB-RV64-NEXT: vmv2r.v v20, v8 +; ZVBB-RV64-NEXT: vmv1r.v v1, v20 +; ZVBB-RV64-NEXT: vmv1r.v v3, v22 +; ZVBB-RV64-NEXT: vmv1r.v v5, v24 +; ZVBB-RV64-NEXT: vmv1r.v v7, v26 +; ZVBB-RV64-NEXT: add a3, a0, a2 +; ZVBB-RV64-NEXT: vmv1r.v v2, v10 +; ZVBB-RV64-NEXT: add a4, a1, a2 +; ZVBB-RV64-NEXT: slli a5, a2, 2 +; ZVBB-RV64-NEXT: vmv1r.v v4, v14 +; ZVBB-RV64-NEXT: slli a6, a2, 4 +; ZVBB-RV64-NEXT: add a7, a4, a2 +; ZVBB-RV64-NEXT: vmv1r.v v6, v18 +; ZVBB-RV64-NEXT: sub a5, a6, a5 +; ZVBB-RV64-NEXT: vmv1r.v v22, v11 +; ZVBB-RV64-NEXT: add a6, a7, a2 +; ZVBB-RV64-NEXT: vmv1r.v v24, v15 +; ZVBB-RV64-NEXT: vsseg7e8.v v1, (a0) +; ZVBB-RV64-NEXT: vmv1r.v v26, v19 +; ZVBB-RV64-NEXT: vsseg7e8.v v21, (a1) +; ZVBB-RV64-NEXT: vl1r.v v18, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1r.v v19, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1r.v v20, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1r.v v21, (a6) +; ZVBB-RV64-NEXT: add a6, a3, a2 +; ZVBB-RV64-NEXT: vl1r.v v10, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1r.v v11, (a6) +; ZVBB-RV64-NEXT: vl1r.v v8, (a0) +; ZVBB-RV64-NEXT: vl1r.v v16, (a4) +; ZVBB-RV64-NEXT: vl1r.v v9, (a3) +; ZVBB-RV64-NEXT: vl1r.v v17, (a7) +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: li a3, 14 +; ZVBB-RV64-NEXT: mul a0, a0, a3 +; ZVBB-RV64-NEXT: add a0, sp, a0 +; ZVBB-RV64-NEXT: addi a0, a0, 64 +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1r.v v12, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1r.v v13, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: slli a2, a2, 3 +; ZVBB-RV64-NEXT: add a2, a0, a2 +; ZVBB-RV64-NEXT: vl1r.v v14, (a6) +; ZVBB-RV64-NEXT: vl1r.v v15, (a1) +; ZVBB-RV64-NEXT: add a5, a0, a5 +; ZVBB-RV64-NEXT: vs2r.v v20, (a5) +; ZVBB-RV64-NEXT: vs4r.v v16, (a2) +; ZVBB-RV64-NEXT: vs8r.v v8, (a0) +; ZVBB-RV64-NEXT: vl8r.v v16, (a2) +; ZVBB-RV64-NEXT: vl8r.v v8, (a0) +; ZVBB-RV64-NEXT: addi sp, s0, -80 +; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: addi sp, sp, 80 +; ZVBB-RV64-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv112i8_nxv16i8: +; ZIP: # %bb.0: +; ZIP-NEXT: addi sp, sp, -80 +; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZIP-NEXT: addi s0, sp, 80 +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: slli a0, a0, 5 +; ZIP-NEXT: sub sp, sp, a0 +; ZIP-NEXT: andi sp, sp, -64 +; ZIP-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; ZIP-NEXT: vmv2r.v v26, v20 +; ZIP-NEXT: addi a0, sp, 64 +; ZIP-NEXT: vmv2r.v v24, v16 +; ZIP-NEXT: csrr a1, vlenb +; ZIP-NEXT: slli a2, a1, 3 +; ZIP-NEXT: sub a1, a2, a1 +; ZIP-NEXT: add a1, sp, a1 +; ZIP-NEXT: addi a1, a1, 64 +; ZIP-NEXT: vmv2r.v v22, v12 +; ZIP-NEXT: csrr a2, vlenb +; ZIP-NEXT: vmv2r.v v20, v8 +; ZIP-NEXT: vmv1r.v v1, v20 +; ZIP-NEXT: vmv1r.v v3, v22 +; ZIP-NEXT: vmv1r.v v5, v24 +; ZIP-NEXT: vmv1r.v v7, v26 +; ZIP-NEXT: add a3, a0, a2 +; ZIP-NEXT: vmv1r.v v2, v10 +; ZIP-NEXT: add a4, a1, a2 +; ZIP-NEXT: slli a5, a2, 2 +; ZIP-NEXT: vmv1r.v v4, v14 +; ZIP-NEXT: slli a6, a2, 4 +; ZIP-NEXT: add a7, a4, a2 +; ZIP-NEXT: vmv1r.v v6, v18 +; ZIP-NEXT: sub a5, a6, a5 +; ZIP-NEXT: vmv1r.v v22, v11 +; ZIP-NEXT: add a6, a7, a2 +; ZIP-NEXT: vmv1r.v v24, v15 +; ZIP-NEXT: vsseg7e8.v v1, (a0) +; ZIP-NEXT: vmv1r.v v26, v19 +; ZIP-NEXT: vsseg7e8.v v21, (a1) +; ZIP-NEXT: vl1r.v v18, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1r.v v19, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1r.v v20, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1r.v v21, (a6) +; ZIP-NEXT: add a6, a3, a2 +; ZIP-NEXT: vl1r.v v10, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1r.v v11, (a6) +; ZIP-NEXT: vl1r.v v8, (a0) +; ZIP-NEXT: vl1r.v v16, (a4) +; ZIP-NEXT: vl1r.v v9, (a3) +; ZIP-NEXT: vl1r.v v17, (a7) +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: li a3, 14 +; ZIP-NEXT: mul a0, a0, a3 +; ZIP-NEXT: add a0, sp, a0 +; ZIP-NEXT: addi a0, a0, 64 +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1r.v v12, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1r.v v13, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: slli a2, a2, 3 +; ZIP-NEXT: add a2, a0, a2 +; ZIP-NEXT: vl1r.v v14, (a6) +; ZIP-NEXT: vl1r.v v15, (a1) +; ZIP-NEXT: add a5, a0, a5 +; ZIP-NEXT: vs2r.v v20, (a5) +; ZIP-NEXT: vs4r.v v16, (a2) +; ZIP-NEXT: vs8r.v v8, (a0) +; ZIP-NEXT: vl8r.v v16, (a2) +; ZIP-NEXT: vl8r.v v8, (a0) +; ZIP-NEXT: addi sp, s0, -80 +; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZIP-NEXT: addi sp, sp, 80 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave7.nxv112i8( %a, %b, %c, %d, %e, %f, %g) + ret %res +} + + +define @vector_interleave_nxv56i16_nxv8i16( %a, %b, %c, %d, %e, %f, %g) nounwind { +; +; RV32-LABEL: vector_interleave_nxv56i16_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -80 +; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: addi s0, sp, 80 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; RV32-NEXT: vmv2r.v v26, v20 +; RV32-NEXT: addi a0, sp, 64 +; RV32-NEXT: vmv2r.v v24, v16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a2, a1, 3 +; RV32-NEXT: sub a1, a2, a1 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 64 +; RV32-NEXT: vmv2r.v v22, v12 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: vmv2r.v v20, v8 +; RV32-NEXT: vmv1r.v v1, v20 +; RV32-NEXT: vmv1r.v v3, v22 +; RV32-NEXT: vmv1r.v v5, v24 +; RV32-NEXT: vmv1r.v v7, v26 +; RV32-NEXT: add a3, a0, a2 +; RV32-NEXT: vmv1r.v v2, v10 +; RV32-NEXT: add a4, a1, a2 +; RV32-NEXT: slli a5, a2, 2 +; RV32-NEXT: vmv1r.v v4, v14 +; RV32-NEXT: slli a6, a2, 4 +; RV32-NEXT: add a7, a4, a2 +; RV32-NEXT: vmv1r.v v6, v18 +; RV32-NEXT: sub a5, a6, a5 +; RV32-NEXT: vmv1r.v v22, v11 +; RV32-NEXT: add a6, a7, a2 +; RV32-NEXT: vmv1r.v v24, v15 +; RV32-NEXT: vsseg7e16.v v1, (a0) +; RV32-NEXT: vmv1r.v v26, v19 +; RV32-NEXT: vsseg7e16.v v21, (a1) +; RV32-NEXT: vl1re16.v v18, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re16.v v19, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re16.v v20, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re16.v v21, (a6) +; RV32-NEXT: add a6, a3, a2 +; RV32-NEXT: vl1re16.v v10, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re16.v v11, (a6) +; RV32-NEXT: vl1re16.v v8, (a0) +; RV32-NEXT: vl1re16.v v16, (a4) +; RV32-NEXT: vl1re16.v v9, (a3) +; RV32-NEXT: vl1re16.v v17, (a7) +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a3, 14 +; RV32-NEXT: mul a0, a0, a3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 64 +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re16.v v12, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re16.v v13, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: add a2, a0, a2 +; RV32-NEXT: vl1re16.v v14, (a6) +; RV32-NEXT: vl1re16.v v15, (a1) +; RV32-NEXT: add a5, a0, a5 +; RV32-NEXT: vs2r.v v20, (a5) +; RV32-NEXT: vs4r.v v16, (a2) +; RV32-NEXT: vs8r.v v8, (a0) +; RV32-NEXT: vl8re16.v v16, (a2) +; RV32-NEXT: vl8re16.v v8, (a0) +; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 80 +; RV32-NEXT: ret +; +; RV64-LABEL: vector_interleave_nxv56i16_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-NEXT: addi s0, sp, 80 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 5 +; RV64-NEXT: sub sp, sp, a0 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; RV64-NEXT: vmv2r.v v26, v20 +; RV64-NEXT: addi a0, sp, 64 +; RV64-NEXT: vmv2r.v v24, v16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a2, a1, 3 +; RV64-NEXT: sub a1, a2, a1 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 64 +; RV64-NEXT: vmv2r.v v22, v12 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: vmv2r.v v20, v8 +; RV64-NEXT: vmv1r.v v1, v20 +; RV64-NEXT: vmv1r.v v3, v22 +; RV64-NEXT: vmv1r.v v5, v24 +; RV64-NEXT: vmv1r.v v7, v26 +; RV64-NEXT: add a3, a0, a2 +; RV64-NEXT: vmv1r.v v2, v10 +; RV64-NEXT: add a4, a1, a2 +; RV64-NEXT: slli a5, a2, 2 +; RV64-NEXT: vmv1r.v v4, v14 +; RV64-NEXT: slli a6, a2, 4 +; RV64-NEXT: add a7, a4, a2 +; RV64-NEXT: vmv1r.v v6, v18 +; RV64-NEXT: sub a5, a6, a5 +; RV64-NEXT: vmv1r.v v22, v11 +; RV64-NEXT: add a6, a7, a2 +; RV64-NEXT: vmv1r.v v24, v15 +; RV64-NEXT: vsseg7e16.v v1, (a0) +; RV64-NEXT: vmv1r.v v26, v19 +; RV64-NEXT: vsseg7e16.v v21, (a1) +; RV64-NEXT: vl1re16.v v18, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re16.v v19, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re16.v v20, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re16.v v21, (a6) +; RV64-NEXT: add a6, a3, a2 +; RV64-NEXT: vl1re16.v v10, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re16.v v11, (a6) +; RV64-NEXT: vl1re16.v v8, (a0) +; RV64-NEXT: vl1re16.v v16, (a4) +; RV64-NEXT: vl1re16.v v9, (a3) +; RV64-NEXT: vl1re16.v v17, (a7) +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: li a3, 14 +; RV64-NEXT: mul a0, a0, a3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 64 +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re16.v v12, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re16.v v13, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: slli a2, a2, 3 +; RV64-NEXT: add a2, a0, a2 +; RV64-NEXT: vl1re16.v v14, (a6) +; RV64-NEXT: vl1re16.v v15, (a1) +; RV64-NEXT: add a5, a0, a5 +; RV64-NEXT: vs2r.v v20, (a5) +; RV64-NEXT: vs4r.v v16, (a2) +; RV64-NEXT: vs8r.v v8, (a0) +; RV64-NEXT: vl8re16.v v16, (a2) +; RV64-NEXT: vl8re16.v v8, (a0) +; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret +; +; ZVBB-RV32-LABEL: vector_interleave_nxv56i16_nxv8i16: +; ZVBB-RV32: # %bb.0: +; ZVBB-RV32-NEXT: addi sp, sp, -80 +; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: addi s0, sp, 80 +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: slli a0, a0, 5 +; ZVBB-RV32-NEXT: sub sp, sp, a0 +; ZVBB-RV32-NEXT: andi sp, sp, -64 +; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVBB-RV32-NEXT: vmv2r.v v26, v20 +; ZVBB-RV32-NEXT: addi a0, sp, 64 +; ZVBB-RV32-NEXT: vmv2r.v v24, v16 +; ZVBB-RV32-NEXT: csrr a1, vlenb +; ZVBB-RV32-NEXT: slli a2, a1, 3 +; ZVBB-RV32-NEXT: sub a1, a2, a1 +; ZVBB-RV32-NEXT: add a1, sp, a1 +; ZVBB-RV32-NEXT: addi a1, a1, 64 +; ZVBB-RV32-NEXT: vmv2r.v v22, v12 +; ZVBB-RV32-NEXT: csrr a2, vlenb +; ZVBB-RV32-NEXT: vmv2r.v v20, v8 +; ZVBB-RV32-NEXT: vmv1r.v v1, v20 +; ZVBB-RV32-NEXT: vmv1r.v v3, v22 +; ZVBB-RV32-NEXT: vmv1r.v v5, v24 +; ZVBB-RV32-NEXT: vmv1r.v v7, v26 +; ZVBB-RV32-NEXT: add a3, a0, a2 +; ZVBB-RV32-NEXT: vmv1r.v v2, v10 +; ZVBB-RV32-NEXT: add a4, a1, a2 +; ZVBB-RV32-NEXT: slli a5, a2, 2 +; ZVBB-RV32-NEXT: vmv1r.v v4, v14 +; ZVBB-RV32-NEXT: slli a6, a2, 4 +; ZVBB-RV32-NEXT: add a7, a4, a2 +; ZVBB-RV32-NEXT: vmv1r.v v6, v18 +; ZVBB-RV32-NEXT: sub a5, a6, a5 +; ZVBB-RV32-NEXT: vmv1r.v v22, v11 +; ZVBB-RV32-NEXT: add a6, a7, a2 +; ZVBB-RV32-NEXT: vmv1r.v v24, v15 +; ZVBB-RV32-NEXT: vsseg7e16.v v1, (a0) +; ZVBB-RV32-NEXT: vmv1r.v v26, v19 +; ZVBB-RV32-NEXT: vsseg7e16.v v21, (a1) +; ZVBB-RV32-NEXT: vl1re16.v v18, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re16.v v19, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re16.v v20, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re16.v v21, (a6) +; ZVBB-RV32-NEXT: add a6, a3, a2 +; ZVBB-RV32-NEXT: vl1re16.v v10, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re16.v v11, (a6) +; ZVBB-RV32-NEXT: vl1re16.v v8, (a0) +; ZVBB-RV32-NEXT: vl1re16.v v16, (a4) +; ZVBB-RV32-NEXT: vl1re16.v v9, (a3) +; ZVBB-RV32-NEXT: vl1re16.v v17, (a7) +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: li a3, 14 +; ZVBB-RV32-NEXT: mul a0, a0, a3 +; ZVBB-RV32-NEXT: add a0, sp, a0 +; ZVBB-RV32-NEXT: addi a0, a0, 64 +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re16.v v12, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re16.v v13, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: slli a2, a2, 3 +; ZVBB-RV32-NEXT: add a2, a0, a2 +; ZVBB-RV32-NEXT: vl1re16.v v14, (a6) +; ZVBB-RV32-NEXT: vl1re16.v v15, (a1) +; ZVBB-RV32-NEXT: add a5, a0, a5 +; ZVBB-RV32-NEXT: vs2r.v v20, (a5) +; ZVBB-RV32-NEXT: vs4r.v v16, (a2) +; ZVBB-RV32-NEXT: vs8r.v v8, (a0) +; ZVBB-RV32-NEXT: vl8re16.v v16, (a2) +; ZVBB-RV32-NEXT: vl8re16.v v8, (a0) +; ZVBB-RV32-NEXT: addi sp, s0, -80 +; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: addi sp, sp, 80 +; ZVBB-RV32-NEXT: ret +; +; ZVBB-RV64-LABEL: vector_interleave_nxv56i16_nxv8i16: +; ZVBB-RV64: # %bb.0: +; ZVBB-RV64-NEXT: addi sp, sp, -80 +; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: addi s0, sp, 80 +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: slli a0, a0, 5 +; ZVBB-RV64-NEXT: sub sp, sp, a0 +; ZVBB-RV64-NEXT: andi sp, sp, -64 +; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVBB-RV64-NEXT: vmv2r.v v26, v20 +; ZVBB-RV64-NEXT: addi a0, sp, 64 +; ZVBB-RV64-NEXT: vmv2r.v v24, v16 +; ZVBB-RV64-NEXT: csrr a1, vlenb +; ZVBB-RV64-NEXT: slli a2, a1, 3 +; ZVBB-RV64-NEXT: sub a1, a2, a1 +; ZVBB-RV64-NEXT: add a1, sp, a1 +; ZVBB-RV64-NEXT: addi a1, a1, 64 +; ZVBB-RV64-NEXT: vmv2r.v v22, v12 +; ZVBB-RV64-NEXT: csrr a2, vlenb +; ZVBB-RV64-NEXT: vmv2r.v v20, v8 +; ZVBB-RV64-NEXT: vmv1r.v v1, v20 +; ZVBB-RV64-NEXT: vmv1r.v v3, v22 +; ZVBB-RV64-NEXT: vmv1r.v v5, v24 +; ZVBB-RV64-NEXT: vmv1r.v v7, v26 +; ZVBB-RV64-NEXT: add a3, a0, a2 +; ZVBB-RV64-NEXT: vmv1r.v v2, v10 +; ZVBB-RV64-NEXT: add a4, a1, a2 +; ZVBB-RV64-NEXT: slli a5, a2, 2 +; ZVBB-RV64-NEXT: vmv1r.v v4, v14 +; ZVBB-RV64-NEXT: slli a6, a2, 4 +; ZVBB-RV64-NEXT: add a7, a4, a2 +; ZVBB-RV64-NEXT: vmv1r.v v6, v18 +; ZVBB-RV64-NEXT: sub a5, a6, a5 +; ZVBB-RV64-NEXT: vmv1r.v v22, v11 +; ZVBB-RV64-NEXT: add a6, a7, a2 +; ZVBB-RV64-NEXT: vmv1r.v v24, v15 +; ZVBB-RV64-NEXT: vsseg7e16.v v1, (a0) +; ZVBB-RV64-NEXT: vmv1r.v v26, v19 +; ZVBB-RV64-NEXT: vsseg7e16.v v21, (a1) +; ZVBB-RV64-NEXT: vl1re16.v v18, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re16.v v19, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re16.v v20, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re16.v v21, (a6) +; ZVBB-RV64-NEXT: add a6, a3, a2 +; ZVBB-RV64-NEXT: vl1re16.v v10, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re16.v v11, (a6) +; ZVBB-RV64-NEXT: vl1re16.v v8, (a0) +; ZVBB-RV64-NEXT: vl1re16.v v16, (a4) +; ZVBB-RV64-NEXT: vl1re16.v v9, (a3) +; ZVBB-RV64-NEXT: vl1re16.v v17, (a7) +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: li a3, 14 +; ZVBB-RV64-NEXT: mul a0, a0, a3 +; ZVBB-RV64-NEXT: add a0, sp, a0 +; ZVBB-RV64-NEXT: addi a0, a0, 64 +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re16.v v12, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re16.v v13, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: slli a2, a2, 3 +; ZVBB-RV64-NEXT: add a2, a0, a2 +; ZVBB-RV64-NEXT: vl1re16.v v14, (a6) +; ZVBB-RV64-NEXT: vl1re16.v v15, (a1) +; ZVBB-RV64-NEXT: add a5, a0, a5 +; ZVBB-RV64-NEXT: vs2r.v v20, (a5) +; ZVBB-RV64-NEXT: vs4r.v v16, (a2) +; ZVBB-RV64-NEXT: vs8r.v v8, (a0) +; ZVBB-RV64-NEXT: vl8re16.v v16, (a2) +; ZVBB-RV64-NEXT: vl8re16.v v8, (a0) +; ZVBB-RV64-NEXT: addi sp, s0, -80 +; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: addi sp, sp, 80 +; ZVBB-RV64-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv56i16_nxv8i16: +; ZIP: # %bb.0: +; ZIP-NEXT: addi sp, sp, -80 +; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZIP-NEXT: addi s0, sp, 80 +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: slli a0, a0, 5 +; ZIP-NEXT: sub sp, sp, a0 +; ZIP-NEXT: andi sp, sp, -64 +; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZIP-NEXT: vmv2r.v v26, v20 +; ZIP-NEXT: addi a0, sp, 64 +; ZIP-NEXT: vmv2r.v v24, v16 +; ZIP-NEXT: csrr a1, vlenb +; ZIP-NEXT: slli a2, a1, 3 +; ZIP-NEXT: sub a1, a2, a1 +; ZIP-NEXT: add a1, sp, a1 +; ZIP-NEXT: addi a1, a1, 64 +; ZIP-NEXT: vmv2r.v v22, v12 +; ZIP-NEXT: csrr a2, vlenb +; ZIP-NEXT: vmv2r.v v20, v8 +; ZIP-NEXT: vmv1r.v v1, v20 +; ZIP-NEXT: vmv1r.v v3, v22 +; ZIP-NEXT: vmv1r.v v5, v24 +; ZIP-NEXT: vmv1r.v v7, v26 +; ZIP-NEXT: add a3, a0, a2 +; ZIP-NEXT: vmv1r.v v2, v10 +; ZIP-NEXT: add a4, a1, a2 +; ZIP-NEXT: slli a5, a2, 2 +; ZIP-NEXT: vmv1r.v v4, v14 +; ZIP-NEXT: slli a6, a2, 4 +; ZIP-NEXT: add a7, a4, a2 +; ZIP-NEXT: vmv1r.v v6, v18 +; ZIP-NEXT: sub a5, a6, a5 +; ZIP-NEXT: vmv1r.v v22, v11 +; ZIP-NEXT: add a6, a7, a2 +; ZIP-NEXT: vmv1r.v v24, v15 +; ZIP-NEXT: vsseg7e16.v v1, (a0) +; ZIP-NEXT: vmv1r.v v26, v19 +; ZIP-NEXT: vsseg7e16.v v21, (a1) +; ZIP-NEXT: vl1re16.v v18, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re16.v v19, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re16.v v20, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re16.v v21, (a6) +; ZIP-NEXT: add a6, a3, a2 +; ZIP-NEXT: vl1re16.v v10, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re16.v v11, (a6) +; ZIP-NEXT: vl1re16.v v8, (a0) +; ZIP-NEXT: vl1re16.v v16, (a4) +; ZIP-NEXT: vl1re16.v v9, (a3) +; ZIP-NEXT: vl1re16.v v17, (a7) +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: li a3, 14 +; ZIP-NEXT: mul a0, a0, a3 +; ZIP-NEXT: add a0, sp, a0 +; ZIP-NEXT: addi a0, a0, 64 +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re16.v v12, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re16.v v13, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: slli a2, a2, 3 +; ZIP-NEXT: add a2, a0, a2 +; ZIP-NEXT: vl1re16.v v14, (a6) +; ZIP-NEXT: vl1re16.v v15, (a1) +; ZIP-NEXT: add a5, a0, a5 +; ZIP-NEXT: vs2r.v v20, (a5) +; ZIP-NEXT: vs4r.v v16, (a2) +; ZIP-NEXT: vs8r.v v8, (a0) +; ZIP-NEXT: vl8re16.v v16, (a2) +; ZIP-NEXT: vl8re16.v v8, (a0) +; ZIP-NEXT: addi sp, s0, -80 +; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZIP-NEXT: addi sp, sp, 80 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave7.nxv56i16( %a, %b, %c, %d, %e, %f, %g) + ret %res +} + + +define @vector_interleave_nxv28i32_nxv4i32( %a, %b, %c, %d, %e, %f, %g) nounwind { +; +; RV32-LABEL: vector_interleave_nxv28i32_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -80 +; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: addi s0, sp, 80 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; RV32-NEXT: vmv2r.v v26, v20 +; RV32-NEXT: addi a0, sp, 64 +; RV32-NEXT: vmv2r.v v24, v16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a2, a1, 3 +; RV32-NEXT: sub a1, a2, a1 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 64 +; RV32-NEXT: vmv2r.v v22, v12 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: vmv2r.v v20, v8 +; RV32-NEXT: vmv1r.v v1, v20 +; RV32-NEXT: vmv1r.v v3, v22 +; RV32-NEXT: vmv1r.v v5, v24 +; RV32-NEXT: vmv1r.v v7, v26 +; RV32-NEXT: add a3, a0, a2 +; RV32-NEXT: vmv1r.v v2, v10 +; RV32-NEXT: add a4, a1, a2 +; RV32-NEXT: slli a5, a2, 2 +; RV32-NEXT: vmv1r.v v4, v14 +; RV32-NEXT: slli a6, a2, 4 +; RV32-NEXT: add a7, a4, a2 +; RV32-NEXT: vmv1r.v v6, v18 +; RV32-NEXT: sub a5, a6, a5 +; RV32-NEXT: vmv1r.v v22, v11 +; RV32-NEXT: add a6, a7, a2 +; RV32-NEXT: vmv1r.v v24, v15 +; RV32-NEXT: vsseg7e32.v v1, (a0) +; RV32-NEXT: vmv1r.v v26, v19 +; RV32-NEXT: vsseg7e32.v v21, (a1) +; RV32-NEXT: vl1re32.v v18, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re32.v v19, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re32.v v20, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re32.v v21, (a6) +; RV32-NEXT: add a6, a3, a2 +; RV32-NEXT: vl1re32.v v10, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re32.v v11, (a6) +; RV32-NEXT: vl1re32.v v8, (a0) +; RV32-NEXT: vl1re32.v v16, (a4) +; RV32-NEXT: vl1re32.v v9, (a3) +; RV32-NEXT: vl1re32.v v17, (a7) +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a3, 14 +; RV32-NEXT: mul a0, a0, a3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 64 +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re32.v v12, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re32.v v13, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: add a2, a0, a2 +; RV32-NEXT: vl1re32.v v14, (a6) +; RV32-NEXT: vl1re32.v v15, (a1) +; RV32-NEXT: add a5, a0, a5 +; RV32-NEXT: vs2r.v v20, (a5) +; RV32-NEXT: vs4r.v v16, (a2) +; RV32-NEXT: vs8r.v v8, (a0) +; RV32-NEXT: vl8re32.v v16, (a2) +; RV32-NEXT: vl8re32.v v8, (a0) +; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 80 +; RV32-NEXT: ret +; +; RV64-LABEL: vector_interleave_nxv28i32_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-NEXT: addi s0, sp, 80 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 5 +; RV64-NEXT: sub sp, sp, a0 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; RV64-NEXT: vmv2r.v v26, v20 +; RV64-NEXT: addi a0, sp, 64 +; RV64-NEXT: vmv2r.v v24, v16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a2, a1, 3 +; RV64-NEXT: sub a1, a2, a1 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 64 +; RV64-NEXT: vmv2r.v v22, v12 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: vmv2r.v v20, v8 +; RV64-NEXT: vmv1r.v v1, v20 +; RV64-NEXT: vmv1r.v v3, v22 +; RV64-NEXT: vmv1r.v v5, v24 +; RV64-NEXT: vmv1r.v v7, v26 +; RV64-NEXT: add a3, a0, a2 +; RV64-NEXT: vmv1r.v v2, v10 +; RV64-NEXT: add a4, a1, a2 +; RV64-NEXT: slli a5, a2, 2 +; RV64-NEXT: vmv1r.v v4, v14 +; RV64-NEXT: slli a6, a2, 4 +; RV64-NEXT: add a7, a4, a2 +; RV64-NEXT: vmv1r.v v6, v18 +; RV64-NEXT: sub a5, a6, a5 +; RV64-NEXT: vmv1r.v v22, v11 +; RV64-NEXT: add a6, a7, a2 +; RV64-NEXT: vmv1r.v v24, v15 +; RV64-NEXT: vsseg7e32.v v1, (a0) +; RV64-NEXT: vmv1r.v v26, v19 +; RV64-NEXT: vsseg7e32.v v21, (a1) +; RV64-NEXT: vl1re32.v v18, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re32.v v19, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re32.v v20, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re32.v v21, (a6) +; RV64-NEXT: add a6, a3, a2 +; RV64-NEXT: vl1re32.v v10, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re32.v v11, (a6) +; RV64-NEXT: vl1re32.v v8, (a0) +; RV64-NEXT: vl1re32.v v16, (a4) +; RV64-NEXT: vl1re32.v v9, (a3) +; RV64-NEXT: vl1re32.v v17, (a7) +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: li a3, 14 +; RV64-NEXT: mul a0, a0, a3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 64 +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re32.v v12, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re32.v v13, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: slli a2, a2, 3 +; RV64-NEXT: add a2, a0, a2 +; RV64-NEXT: vl1re32.v v14, (a6) +; RV64-NEXT: vl1re32.v v15, (a1) +; RV64-NEXT: add a5, a0, a5 +; RV64-NEXT: vs2r.v v20, (a5) +; RV64-NEXT: vs4r.v v16, (a2) +; RV64-NEXT: vs8r.v v8, (a0) +; RV64-NEXT: vl8re32.v v16, (a2) +; RV64-NEXT: vl8re32.v v8, (a0) +; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret +; +; ZVBB-RV32-LABEL: vector_interleave_nxv28i32_nxv4i32: +; ZVBB-RV32: # %bb.0: +; ZVBB-RV32-NEXT: addi sp, sp, -80 +; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: addi s0, sp, 80 +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: slli a0, a0, 5 +; ZVBB-RV32-NEXT: sub sp, sp, a0 +; ZVBB-RV32-NEXT: andi sp, sp, -64 +; ZVBB-RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; ZVBB-RV32-NEXT: vmv2r.v v26, v20 +; ZVBB-RV32-NEXT: addi a0, sp, 64 +; ZVBB-RV32-NEXT: vmv2r.v v24, v16 +; ZVBB-RV32-NEXT: csrr a1, vlenb +; ZVBB-RV32-NEXT: slli a2, a1, 3 +; ZVBB-RV32-NEXT: sub a1, a2, a1 +; ZVBB-RV32-NEXT: add a1, sp, a1 +; ZVBB-RV32-NEXT: addi a1, a1, 64 +; ZVBB-RV32-NEXT: vmv2r.v v22, v12 +; ZVBB-RV32-NEXT: csrr a2, vlenb +; ZVBB-RV32-NEXT: vmv2r.v v20, v8 +; ZVBB-RV32-NEXT: vmv1r.v v1, v20 +; ZVBB-RV32-NEXT: vmv1r.v v3, v22 +; ZVBB-RV32-NEXT: vmv1r.v v5, v24 +; ZVBB-RV32-NEXT: vmv1r.v v7, v26 +; ZVBB-RV32-NEXT: add a3, a0, a2 +; ZVBB-RV32-NEXT: vmv1r.v v2, v10 +; ZVBB-RV32-NEXT: add a4, a1, a2 +; ZVBB-RV32-NEXT: slli a5, a2, 2 +; ZVBB-RV32-NEXT: vmv1r.v v4, v14 +; ZVBB-RV32-NEXT: slli a6, a2, 4 +; ZVBB-RV32-NEXT: add a7, a4, a2 +; ZVBB-RV32-NEXT: vmv1r.v v6, v18 +; ZVBB-RV32-NEXT: sub a5, a6, a5 +; ZVBB-RV32-NEXT: vmv1r.v v22, v11 +; ZVBB-RV32-NEXT: add a6, a7, a2 +; ZVBB-RV32-NEXT: vmv1r.v v24, v15 +; ZVBB-RV32-NEXT: vsseg7e32.v v1, (a0) +; ZVBB-RV32-NEXT: vmv1r.v v26, v19 +; ZVBB-RV32-NEXT: vsseg7e32.v v21, (a1) +; ZVBB-RV32-NEXT: vl1re32.v v18, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re32.v v19, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re32.v v20, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re32.v v21, (a6) +; ZVBB-RV32-NEXT: add a6, a3, a2 +; ZVBB-RV32-NEXT: vl1re32.v v10, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re32.v v11, (a6) +; ZVBB-RV32-NEXT: vl1re32.v v8, (a0) +; ZVBB-RV32-NEXT: vl1re32.v v16, (a4) +; ZVBB-RV32-NEXT: vl1re32.v v9, (a3) +; ZVBB-RV32-NEXT: vl1re32.v v17, (a7) +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: li a3, 14 +; ZVBB-RV32-NEXT: mul a0, a0, a3 +; ZVBB-RV32-NEXT: add a0, sp, a0 +; ZVBB-RV32-NEXT: addi a0, a0, 64 +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re32.v v12, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re32.v v13, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: slli a2, a2, 3 +; ZVBB-RV32-NEXT: add a2, a0, a2 +; ZVBB-RV32-NEXT: vl1re32.v v14, (a6) +; ZVBB-RV32-NEXT: vl1re32.v v15, (a1) +; ZVBB-RV32-NEXT: add a5, a0, a5 +; ZVBB-RV32-NEXT: vs2r.v v20, (a5) +; ZVBB-RV32-NEXT: vs4r.v v16, (a2) +; ZVBB-RV32-NEXT: vs8r.v v8, (a0) +; ZVBB-RV32-NEXT: vl8re32.v v16, (a2) +; ZVBB-RV32-NEXT: vl8re32.v v8, (a0) +; ZVBB-RV32-NEXT: addi sp, s0, -80 +; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: addi sp, sp, 80 +; ZVBB-RV32-NEXT: ret +; +; ZVBB-RV64-LABEL: vector_interleave_nxv28i32_nxv4i32: +; ZVBB-RV64: # %bb.0: +; ZVBB-RV64-NEXT: addi sp, sp, -80 +; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: addi s0, sp, 80 +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: slli a0, a0, 5 +; ZVBB-RV64-NEXT: sub sp, sp, a0 +; ZVBB-RV64-NEXT: andi sp, sp, -64 +; ZVBB-RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; ZVBB-RV64-NEXT: vmv2r.v v26, v20 +; ZVBB-RV64-NEXT: addi a0, sp, 64 +; ZVBB-RV64-NEXT: vmv2r.v v24, v16 +; ZVBB-RV64-NEXT: csrr a1, vlenb +; ZVBB-RV64-NEXT: slli a2, a1, 3 +; ZVBB-RV64-NEXT: sub a1, a2, a1 +; ZVBB-RV64-NEXT: add a1, sp, a1 +; ZVBB-RV64-NEXT: addi a1, a1, 64 +; ZVBB-RV64-NEXT: vmv2r.v v22, v12 +; ZVBB-RV64-NEXT: csrr a2, vlenb +; ZVBB-RV64-NEXT: vmv2r.v v20, v8 +; ZVBB-RV64-NEXT: vmv1r.v v1, v20 +; ZVBB-RV64-NEXT: vmv1r.v v3, v22 +; ZVBB-RV64-NEXT: vmv1r.v v5, v24 +; ZVBB-RV64-NEXT: vmv1r.v v7, v26 +; ZVBB-RV64-NEXT: add a3, a0, a2 +; ZVBB-RV64-NEXT: vmv1r.v v2, v10 +; ZVBB-RV64-NEXT: add a4, a1, a2 +; ZVBB-RV64-NEXT: slli a5, a2, 2 +; ZVBB-RV64-NEXT: vmv1r.v v4, v14 +; ZVBB-RV64-NEXT: slli a6, a2, 4 +; ZVBB-RV64-NEXT: add a7, a4, a2 +; ZVBB-RV64-NEXT: vmv1r.v v6, v18 +; ZVBB-RV64-NEXT: sub a5, a6, a5 +; ZVBB-RV64-NEXT: vmv1r.v v22, v11 +; ZVBB-RV64-NEXT: add a6, a7, a2 +; ZVBB-RV64-NEXT: vmv1r.v v24, v15 +; ZVBB-RV64-NEXT: vsseg7e32.v v1, (a0) +; ZVBB-RV64-NEXT: vmv1r.v v26, v19 +; ZVBB-RV64-NEXT: vsseg7e32.v v21, (a1) +; ZVBB-RV64-NEXT: vl1re32.v v18, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re32.v v19, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re32.v v20, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re32.v v21, (a6) +; ZVBB-RV64-NEXT: add a6, a3, a2 +; ZVBB-RV64-NEXT: vl1re32.v v10, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re32.v v11, (a6) +; ZVBB-RV64-NEXT: vl1re32.v v8, (a0) +; ZVBB-RV64-NEXT: vl1re32.v v16, (a4) +; ZVBB-RV64-NEXT: vl1re32.v v9, (a3) +; ZVBB-RV64-NEXT: vl1re32.v v17, (a7) +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: li a3, 14 +; ZVBB-RV64-NEXT: mul a0, a0, a3 +; ZVBB-RV64-NEXT: add a0, sp, a0 +; ZVBB-RV64-NEXT: addi a0, a0, 64 +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re32.v v12, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re32.v v13, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: slli a2, a2, 3 +; ZVBB-RV64-NEXT: add a2, a0, a2 +; ZVBB-RV64-NEXT: vl1re32.v v14, (a6) +; ZVBB-RV64-NEXT: vl1re32.v v15, (a1) +; ZVBB-RV64-NEXT: add a5, a0, a5 +; ZVBB-RV64-NEXT: vs2r.v v20, (a5) +; ZVBB-RV64-NEXT: vs4r.v v16, (a2) +; ZVBB-RV64-NEXT: vs8r.v v8, (a0) +; ZVBB-RV64-NEXT: vl8re32.v v16, (a2) +; ZVBB-RV64-NEXT: vl8re32.v v8, (a0) +; ZVBB-RV64-NEXT: addi sp, s0, -80 +; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: addi sp, sp, 80 +; ZVBB-RV64-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv28i32_nxv4i32: +; ZIP: # %bb.0: +; ZIP-NEXT: addi sp, sp, -80 +; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZIP-NEXT: addi s0, sp, 80 +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: slli a0, a0, 5 +; ZIP-NEXT: sub sp, sp, a0 +; ZIP-NEXT: andi sp, sp, -64 +; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; ZIP-NEXT: vmv2r.v v26, v20 +; ZIP-NEXT: addi a0, sp, 64 +; ZIP-NEXT: vmv2r.v v24, v16 +; ZIP-NEXT: csrr a1, vlenb +; ZIP-NEXT: slli a2, a1, 3 +; ZIP-NEXT: sub a1, a2, a1 +; ZIP-NEXT: add a1, sp, a1 +; ZIP-NEXT: addi a1, a1, 64 +; ZIP-NEXT: vmv2r.v v22, v12 +; ZIP-NEXT: csrr a2, vlenb +; ZIP-NEXT: vmv2r.v v20, v8 +; ZIP-NEXT: vmv1r.v v1, v20 +; ZIP-NEXT: vmv1r.v v3, v22 +; ZIP-NEXT: vmv1r.v v5, v24 +; ZIP-NEXT: vmv1r.v v7, v26 +; ZIP-NEXT: add a3, a0, a2 +; ZIP-NEXT: vmv1r.v v2, v10 +; ZIP-NEXT: add a4, a1, a2 +; ZIP-NEXT: slli a5, a2, 2 +; ZIP-NEXT: vmv1r.v v4, v14 +; ZIP-NEXT: slli a6, a2, 4 +; ZIP-NEXT: add a7, a4, a2 +; ZIP-NEXT: vmv1r.v v6, v18 +; ZIP-NEXT: sub a5, a6, a5 +; ZIP-NEXT: vmv1r.v v22, v11 +; ZIP-NEXT: add a6, a7, a2 +; ZIP-NEXT: vmv1r.v v24, v15 +; ZIP-NEXT: vsseg7e32.v v1, (a0) +; ZIP-NEXT: vmv1r.v v26, v19 +; ZIP-NEXT: vsseg7e32.v v21, (a1) +; ZIP-NEXT: vl1re32.v v18, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re32.v v19, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re32.v v20, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re32.v v21, (a6) +; ZIP-NEXT: add a6, a3, a2 +; ZIP-NEXT: vl1re32.v v10, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re32.v v11, (a6) +; ZIP-NEXT: vl1re32.v v8, (a0) +; ZIP-NEXT: vl1re32.v v16, (a4) +; ZIP-NEXT: vl1re32.v v9, (a3) +; ZIP-NEXT: vl1re32.v v17, (a7) +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: li a3, 14 +; ZIP-NEXT: mul a0, a0, a3 +; ZIP-NEXT: add a0, sp, a0 +; ZIP-NEXT: addi a0, a0, 64 +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re32.v v12, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re32.v v13, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: slli a2, a2, 3 +; ZIP-NEXT: add a2, a0, a2 +; ZIP-NEXT: vl1re32.v v14, (a6) +; ZIP-NEXT: vl1re32.v v15, (a1) +; ZIP-NEXT: add a5, a0, a5 +; ZIP-NEXT: vs2r.v v20, (a5) +; ZIP-NEXT: vs4r.v v16, (a2) +; ZIP-NEXT: vs8r.v v8, (a0) +; ZIP-NEXT: vl8re32.v v16, (a2) +; ZIP-NEXT: vl8re32.v v8, (a0) +; ZIP-NEXT: addi sp, s0, -80 +; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZIP-NEXT: addi sp, sp, 80 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave7.nxv28i32( %a, %b, %c, %d, %e, %f, %g) + ret %res +} + +define @vector_interleave_nxv14i64_nxv2i64( %a, %b, %c, %d, %e, %f, %g) nounwind { +; +; RV32-LABEL: vector_interleave_nxv14i64_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -80 +; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: addi s0, sp, 80 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32-NEXT: vmv2r.v v26, v20 +; RV32-NEXT: addi a0, sp, 64 +; RV32-NEXT: vmv2r.v v24, v16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a2, a1, 3 +; RV32-NEXT: sub a1, a2, a1 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 64 +; RV32-NEXT: vmv2r.v v22, v12 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: vmv2r.v v20, v8 +; RV32-NEXT: vmv1r.v v1, v20 +; RV32-NEXT: vmv1r.v v3, v22 +; RV32-NEXT: vmv1r.v v5, v24 +; RV32-NEXT: vmv1r.v v7, v26 +; RV32-NEXT: add a3, a0, a2 +; RV32-NEXT: vmv1r.v v2, v10 +; RV32-NEXT: add a4, a1, a2 +; RV32-NEXT: slli a5, a2, 2 +; RV32-NEXT: vmv1r.v v4, v14 +; RV32-NEXT: slli a6, a2, 4 +; RV32-NEXT: add a7, a4, a2 +; RV32-NEXT: vmv1r.v v6, v18 +; RV32-NEXT: sub a5, a6, a5 +; RV32-NEXT: vmv1r.v v22, v11 +; RV32-NEXT: add a6, a7, a2 +; RV32-NEXT: vmv1r.v v24, v15 +; RV32-NEXT: vsseg7e64.v v1, (a0) +; RV32-NEXT: vmv1r.v v26, v19 +; RV32-NEXT: vsseg7e64.v v21, (a1) +; RV32-NEXT: vl1re64.v v18, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re64.v v19, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re64.v v20, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re64.v v21, (a6) +; RV32-NEXT: add a6, a3, a2 +; RV32-NEXT: vl1re64.v v10, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re64.v v11, (a6) +; RV32-NEXT: vl1re64.v v8, (a0) +; RV32-NEXT: vl1re64.v v16, (a4) +; RV32-NEXT: vl1re64.v v9, (a3) +; RV32-NEXT: vl1re64.v v17, (a7) +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a3, 14 +; RV32-NEXT: mul a0, a0, a3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 64 +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re64.v v12, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re64.v v13, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: add a2, a0, a2 +; RV32-NEXT: vl1re64.v v14, (a6) +; RV32-NEXT: vl1re64.v v15, (a1) +; RV32-NEXT: add a5, a0, a5 +; RV32-NEXT: vs2r.v v20, (a5) +; RV32-NEXT: vs4r.v v16, (a2) +; RV32-NEXT: vs8r.v v8, (a0) +; RV32-NEXT: vl8re64.v v16, (a2) +; RV32-NEXT: vl8re64.v v8, (a0) +; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 80 +; RV32-NEXT: ret +; +; RV64-LABEL: vector_interleave_nxv14i64_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-NEXT: addi s0, sp, 80 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 5 +; RV64-NEXT: sub sp, sp, a0 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64-NEXT: vmv2r.v v26, v20 +; RV64-NEXT: addi a0, sp, 64 +; RV64-NEXT: vmv2r.v v24, v16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a2, a1, 3 +; RV64-NEXT: sub a1, a2, a1 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 64 +; RV64-NEXT: vmv2r.v v22, v12 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: vmv2r.v v20, v8 +; RV64-NEXT: vmv1r.v v1, v20 +; RV64-NEXT: vmv1r.v v3, v22 +; RV64-NEXT: vmv1r.v v5, v24 +; RV64-NEXT: vmv1r.v v7, v26 +; RV64-NEXT: add a3, a0, a2 +; RV64-NEXT: vmv1r.v v2, v10 +; RV64-NEXT: add a4, a1, a2 +; RV64-NEXT: slli a5, a2, 2 +; RV64-NEXT: vmv1r.v v4, v14 +; RV64-NEXT: slli a6, a2, 4 +; RV64-NEXT: add a7, a4, a2 +; RV64-NEXT: vmv1r.v v6, v18 +; RV64-NEXT: sub a5, a6, a5 +; RV64-NEXT: vmv1r.v v22, v11 +; RV64-NEXT: add a6, a7, a2 +; RV64-NEXT: vmv1r.v v24, v15 +; RV64-NEXT: vsseg7e64.v v1, (a0) +; RV64-NEXT: vmv1r.v v26, v19 +; RV64-NEXT: vsseg7e64.v v21, (a1) +; RV64-NEXT: vl1re64.v v18, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re64.v v19, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re64.v v20, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re64.v v21, (a6) +; RV64-NEXT: add a6, a3, a2 +; RV64-NEXT: vl1re64.v v10, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re64.v v11, (a6) +; RV64-NEXT: vl1re64.v v8, (a0) +; RV64-NEXT: vl1re64.v v16, (a4) +; RV64-NEXT: vl1re64.v v9, (a3) +; RV64-NEXT: vl1re64.v v17, (a7) +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: li a3, 14 +; RV64-NEXT: mul a0, a0, a3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 64 +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re64.v v12, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re64.v v13, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: slli a2, a2, 3 +; RV64-NEXT: add a2, a0, a2 +; RV64-NEXT: vl1re64.v v14, (a6) +; RV64-NEXT: vl1re64.v v15, (a1) +; RV64-NEXT: add a5, a0, a5 +; RV64-NEXT: vs2r.v v20, (a5) +; RV64-NEXT: vs4r.v v16, (a2) +; RV64-NEXT: vs8r.v v8, (a0) +; RV64-NEXT: vl8re64.v v16, (a2) +; RV64-NEXT: vl8re64.v v8, (a0) +; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret +; +; ZVBB-RV32-LABEL: vector_interleave_nxv14i64_nxv2i64: +; ZVBB-RV32: # %bb.0: +; ZVBB-RV32-NEXT: addi sp, sp, -80 +; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: addi s0, sp, 80 +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: slli a0, a0, 5 +; ZVBB-RV32-NEXT: sub sp, sp, a0 +; ZVBB-RV32-NEXT: andi sp, sp, -64 +; ZVBB-RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; ZVBB-RV32-NEXT: vmv2r.v v26, v20 +; ZVBB-RV32-NEXT: addi a0, sp, 64 +; ZVBB-RV32-NEXT: vmv2r.v v24, v16 +; ZVBB-RV32-NEXT: csrr a1, vlenb +; ZVBB-RV32-NEXT: slli a2, a1, 3 +; ZVBB-RV32-NEXT: sub a1, a2, a1 +; ZVBB-RV32-NEXT: add a1, sp, a1 +; ZVBB-RV32-NEXT: addi a1, a1, 64 +; ZVBB-RV32-NEXT: vmv2r.v v22, v12 +; ZVBB-RV32-NEXT: csrr a2, vlenb +; ZVBB-RV32-NEXT: vmv2r.v v20, v8 +; ZVBB-RV32-NEXT: vmv1r.v v1, v20 +; ZVBB-RV32-NEXT: vmv1r.v v3, v22 +; ZVBB-RV32-NEXT: vmv1r.v v5, v24 +; ZVBB-RV32-NEXT: vmv1r.v v7, v26 +; ZVBB-RV32-NEXT: add a3, a0, a2 +; ZVBB-RV32-NEXT: vmv1r.v v2, v10 +; ZVBB-RV32-NEXT: add a4, a1, a2 +; ZVBB-RV32-NEXT: slli a5, a2, 2 +; ZVBB-RV32-NEXT: vmv1r.v v4, v14 +; ZVBB-RV32-NEXT: slli a6, a2, 4 +; ZVBB-RV32-NEXT: add a7, a4, a2 +; ZVBB-RV32-NEXT: vmv1r.v v6, v18 +; ZVBB-RV32-NEXT: sub a5, a6, a5 +; ZVBB-RV32-NEXT: vmv1r.v v22, v11 +; ZVBB-RV32-NEXT: add a6, a7, a2 +; ZVBB-RV32-NEXT: vmv1r.v v24, v15 +; ZVBB-RV32-NEXT: vsseg7e64.v v1, (a0) +; ZVBB-RV32-NEXT: vmv1r.v v26, v19 +; ZVBB-RV32-NEXT: vsseg7e64.v v21, (a1) +; ZVBB-RV32-NEXT: vl1re64.v v18, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re64.v v19, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re64.v v20, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re64.v v21, (a6) +; ZVBB-RV32-NEXT: add a6, a3, a2 +; ZVBB-RV32-NEXT: vl1re64.v v10, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re64.v v11, (a6) +; ZVBB-RV32-NEXT: vl1re64.v v8, (a0) +; ZVBB-RV32-NEXT: vl1re64.v v16, (a4) +; ZVBB-RV32-NEXT: vl1re64.v v9, (a3) +; ZVBB-RV32-NEXT: vl1re64.v v17, (a7) +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: li a3, 14 +; ZVBB-RV32-NEXT: mul a0, a0, a3 +; ZVBB-RV32-NEXT: add a0, sp, a0 +; ZVBB-RV32-NEXT: addi a0, a0, 64 +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re64.v v12, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re64.v v13, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: slli a2, a2, 3 +; ZVBB-RV32-NEXT: add a2, a0, a2 +; ZVBB-RV32-NEXT: vl1re64.v v14, (a6) +; ZVBB-RV32-NEXT: vl1re64.v v15, (a1) +; ZVBB-RV32-NEXT: add a5, a0, a5 +; ZVBB-RV32-NEXT: vs2r.v v20, (a5) +; ZVBB-RV32-NEXT: vs4r.v v16, (a2) +; ZVBB-RV32-NEXT: vs8r.v v8, (a0) +; ZVBB-RV32-NEXT: vl8re64.v v16, (a2) +; ZVBB-RV32-NEXT: vl8re64.v v8, (a0) +; ZVBB-RV32-NEXT: addi sp, s0, -80 +; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: addi sp, sp, 80 +; ZVBB-RV32-NEXT: ret +; +; ZVBB-RV64-LABEL: vector_interleave_nxv14i64_nxv2i64: +; ZVBB-RV64: # %bb.0: +; ZVBB-RV64-NEXT: addi sp, sp, -80 +; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: addi s0, sp, 80 +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: slli a0, a0, 5 +; ZVBB-RV64-NEXT: sub sp, sp, a0 +; ZVBB-RV64-NEXT: andi sp, sp, -64 +; ZVBB-RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; ZVBB-RV64-NEXT: vmv2r.v v26, v20 +; ZVBB-RV64-NEXT: addi a0, sp, 64 +; ZVBB-RV64-NEXT: vmv2r.v v24, v16 +; ZVBB-RV64-NEXT: csrr a1, vlenb +; ZVBB-RV64-NEXT: slli a2, a1, 3 +; ZVBB-RV64-NEXT: sub a1, a2, a1 +; ZVBB-RV64-NEXT: add a1, sp, a1 +; ZVBB-RV64-NEXT: addi a1, a1, 64 +; ZVBB-RV64-NEXT: vmv2r.v v22, v12 +; ZVBB-RV64-NEXT: csrr a2, vlenb +; ZVBB-RV64-NEXT: vmv2r.v v20, v8 +; ZVBB-RV64-NEXT: vmv1r.v v1, v20 +; ZVBB-RV64-NEXT: vmv1r.v v3, v22 +; ZVBB-RV64-NEXT: vmv1r.v v5, v24 +; ZVBB-RV64-NEXT: vmv1r.v v7, v26 +; ZVBB-RV64-NEXT: add a3, a0, a2 +; ZVBB-RV64-NEXT: vmv1r.v v2, v10 +; ZVBB-RV64-NEXT: add a4, a1, a2 +; ZVBB-RV64-NEXT: slli a5, a2, 2 +; ZVBB-RV64-NEXT: vmv1r.v v4, v14 +; ZVBB-RV64-NEXT: slli a6, a2, 4 +; ZVBB-RV64-NEXT: add a7, a4, a2 +; ZVBB-RV64-NEXT: vmv1r.v v6, v18 +; ZVBB-RV64-NEXT: sub a5, a6, a5 +; ZVBB-RV64-NEXT: vmv1r.v v22, v11 +; ZVBB-RV64-NEXT: add a6, a7, a2 +; ZVBB-RV64-NEXT: vmv1r.v v24, v15 +; ZVBB-RV64-NEXT: vsseg7e64.v v1, (a0) +; ZVBB-RV64-NEXT: vmv1r.v v26, v19 +; ZVBB-RV64-NEXT: vsseg7e64.v v21, (a1) +; ZVBB-RV64-NEXT: vl1re64.v v18, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re64.v v19, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re64.v v20, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re64.v v21, (a6) +; ZVBB-RV64-NEXT: add a6, a3, a2 +; ZVBB-RV64-NEXT: vl1re64.v v10, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re64.v v11, (a6) +; ZVBB-RV64-NEXT: vl1re64.v v8, (a0) +; ZVBB-RV64-NEXT: vl1re64.v v16, (a4) +; ZVBB-RV64-NEXT: vl1re64.v v9, (a3) +; ZVBB-RV64-NEXT: vl1re64.v v17, (a7) +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: li a3, 14 +; ZVBB-RV64-NEXT: mul a0, a0, a3 +; ZVBB-RV64-NEXT: add a0, sp, a0 +; ZVBB-RV64-NEXT: addi a0, a0, 64 +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re64.v v12, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re64.v v13, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: slli a2, a2, 3 +; ZVBB-RV64-NEXT: add a2, a0, a2 +; ZVBB-RV64-NEXT: vl1re64.v v14, (a6) +; ZVBB-RV64-NEXT: vl1re64.v v15, (a1) +; ZVBB-RV64-NEXT: add a5, a0, a5 +; ZVBB-RV64-NEXT: vs2r.v v20, (a5) +; ZVBB-RV64-NEXT: vs4r.v v16, (a2) +; ZVBB-RV64-NEXT: vs8r.v v8, (a0) +; ZVBB-RV64-NEXT: vl8re64.v v16, (a2) +; ZVBB-RV64-NEXT: vl8re64.v v8, (a0) +; ZVBB-RV64-NEXT: addi sp, s0, -80 +; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: addi sp, sp, 80 +; ZVBB-RV64-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv14i64_nxv2i64: +; ZIP: # %bb.0: +; ZIP-NEXT: addi sp, sp, -80 +; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZIP-NEXT: addi s0, sp, 80 +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: slli a0, a0, 5 +; ZIP-NEXT: sub sp, sp, a0 +; ZIP-NEXT: andi sp, sp, -64 +; ZIP-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; ZIP-NEXT: vmv2r.v v26, v20 +; ZIP-NEXT: addi a0, sp, 64 +; ZIP-NEXT: vmv2r.v v24, v16 +; ZIP-NEXT: csrr a1, vlenb +; ZIP-NEXT: slli a2, a1, 3 +; ZIP-NEXT: sub a1, a2, a1 +; ZIP-NEXT: add a1, sp, a1 +; ZIP-NEXT: addi a1, a1, 64 +; ZIP-NEXT: vmv2r.v v22, v12 +; ZIP-NEXT: csrr a2, vlenb +; ZIP-NEXT: vmv2r.v v20, v8 +; ZIP-NEXT: vmv1r.v v1, v20 +; ZIP-NEXT: vmv1r.v v3, v22 +; ZIP-NEXT: vmv1r.v v5, v24 +; ZIP-NEXT: vmv1r.v v7, v26 +; ZIP-NEXT: add a3, a0, a2 +; ZIP-NEXT: vmv1r.v v2, v10 +; ZIP-NEXT: add a4, a1, a2 +; ZIP-NEXT: slli a5, a2, 2 +; ZIP-NEXT: vmv1r.v v4, v14 +; ZIP-NEXT: slli a6, a2, 4 +; ZIP-NEXT: add a7, a4, a2 +; ZIP-NEXT: vmv1r.v v6, v18 +; ZIP-NEXT: sub a5, a6, a5 +; ZIP-NEXT: vmv1r.v v22, v11 +; ZIP-NEXT: add a6, a7, a2 +; ZIP-NEXT: vmv1r.v v24, v15 +; ZIP-NEXT: vsseg7e64.v v1, (a0) +; ZIP-NEXT: vmv1r.v v26, v19 +; ZIP-NEXT: vsseg7e64.v v21, (a1) +; ZIP-NEXT: vl1re64.v v18, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re64.v v19, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re64.v v20, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re64.v v21, (a6) +; ZIP-NEXT: add a6, a3, a2 +; ZIP-NEXT: vl1re64.v v10, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re64.v v11, (a6) +; ZIP-NEXT: vl1re64.v v8, (a0) +; ZIP-NEXT: vl1re64.v v16, (a4) +; ZIP-NEXT: vl1re64.v v9, (a3) +; ZIP-NEXT: vl1re64.v v17, (a7) +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: li a3, 14 +; ZIP-NEXT: mul a0, a0, a3 +; ZIP-NEXT: add a0, sp, a0 +; ZIP-NEXT: addi a0, a0, 64 +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re64.v v12, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re64.v v13, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: slli a2, a2, 3 +; ZIP-NEXT: add a2, a0, a2 +; ZIP-NEXT: vl1re64.v v14, (a6) +; ZIP-NEXT: vl1re64.v v15, (a1) +; ZIP-NEXT: add a5, a0, a5 +; ZIP-NEXT: vs2r.v v20, (a5) +; ZIP-NEXT: vs4r.v v16, (a2) +; ZIP-NEXT: vs8r.v v8, (a0) +; ZIP-NEXT: vl8re64.v v16, (a2) +; ZIP-NEXT: vl8re64.v v8, (a0) +; ZIP-NEXT: addi sp, s0, -80 +; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZIP-NEXT: addi sp, sp, 80 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave7.nxv14i64( %a, %b, %c, %d, %e, %f, %g) + ret %res +} + +; Floats + +define @vector_interleave_nxv4bf16_nxv2bf16( %a, %b) { +; V-LABEL: vector_interleave_nxv4bf16_nxv2bf16: +; V: # %bb.0: +; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; V-NEXT: vwaddu.vv v10, v8, v9 +; V-NEXT: li a0, -1 +; V-NEXT: csrr a1, vlenb +; V-NEXT: vwmaccu.vx v10, a0, v9 +; V-NEXT: srli a1, a1, 2 +; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; V-NEXT: vslidedown.vx v8, v10, a1 +; V-NEXT: add a0, a1, a1 +; V-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; V-NEXT: vslideup.vx v10, v8, a1 +; V-NEXT: vmv.v.v v8, v10 +; V-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv4bf16_nxv2bf16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vwsll.vi v10, v9, 16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: vwaddu.wv v10, v10, v8 +; ZVBB-NEXT: srli a0, a0, 2 +; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVBB-NEXT: vslidedown.vx v8, v10, a0 +; ZVBB-NEXT: add a1, a0, a0 +; ZVBB-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v10, v8, a0 +; ZVBB-NEXT: vmv.v.v v8, v10 +; ZVBB-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv4bf16_nxv2bf16: +; ZIP: # %bb.0: +; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZIP-NEXT: ri.vzip2b.vv v11, v8, v9 +; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9 +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: srli a0, a0, 2 +; ZIP-NEXT: add a1, a0, a0 +; ZIP-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; ZIP-NEXT: vslideup.vx v10, v11, a0 +; ZIP-NEXT: vmv.v.v v8, v10 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave2.nxv4bf16( %a, %b) + ret %res +} + +define @vector_interleave_nxv8bf16_nxv4bf16( %a, %b) { +; V-LABEL: vector_interleave_nxv8bf16_nxv4bf16: +; V: # %bb.0: +; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; V-NEXT: vmv1r.v v10, v9 +; V-NEXT: vmv1r.v v11, v8 +; V-NEXT: vwaddu.vv v8, v11, v10 +; V-NEXT: li a0, -1 +; V-NEXT: vwmaccu.vx v8, a0, v10 +; V-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv8bf16_nxv4bf16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVBB-NEXT: vmv1r.v v10, v9 +; ZVBB-NEXT: vmv1r.v v11, v8 +; ZVBB-NEXT: vwsll.vi v8, v10, 16 +; ZVBB-NEXT: vwaddu.wv v8, v8, v11 +; ZVBB-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv8bf16_nxv4bf16: +; ZIP: # %bb.0: +; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZIP-NEXT: vmv1r.v v10, v9 +; ZIP-NEXT: vmv1r.v v11, v8 +; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10 +; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave2.nxv8bf16( %a, %b) + ret %res +} + +define @vector_interleave_nxv4f16_nxv2f16( %a, %b) { +; V-LABEL: vector_interleave_nxv4f16_nxv2f16: +; V: # %bb.0: +; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; V-NEXT: vwaddu.vv v10, v8, v9 +; V-NEXT: li a0, -1 +; V-NEXT: csrr a1, vlenb +; V-NEXT: vwmaccu.vx v10, a0, v9 +; V-NEXT: srli a1, a1, 2 +; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; V-NEXT: vslidedown.vx v8, v10, a1 +; V-NEXT: add a0, a1, a1 +; V-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; V-NEXT: vslideup.vx v10, v8, a1 +; V-NEXT: vmv.v.v v8, v10 +; V-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv4f16_nxv2f16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vwsll.vi v10, v9, 16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: vwaddu.wv v10, v10, v8 +; ZVBB-NEXT: srli a0, a0, 2 +; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVBB-NEXT: vslidedown.vx v8, v10, a0 +; ZVBB-NEXT: add a1, a0, a0 +; ZVBB-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v10, v8, a0 +; ZVBB-NEXT: vmv.v.v v8, v10 +; ZVBB-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv4f16_nxv2f16: +; ZIP: # %bb.0: +; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZIP-NEXT: ri.vzip2b.vv v11, v8, v9 +; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9 +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: srli a0, a0, 2 +; ZIP-NEXT: add a1, a0, a0 +; ZIP-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; ZIP-NEXT: vslideup.vx v10, v11, a0 +; ZIP-NEXT: vmv.v.v v8, v10 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave2.nxv4f16( %a, %b) + ret %res +} + +define @vector_interleave_nxv8f16_nxv4f16( %a, %b) { +; V-LABEL: vector_interleave_nxv8f16_nxv4f16: +; V: # %bb.0: +; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; V-NEXT: vmv1r.v v10, v9 +; V-NEXT: vmv1r.v v11, v8 +; V-NEXT: vwaddu.vv v8, v11, v10 +; V-NEXT: li a0, -1 +; V-NEXT: vwmaccu.vx v8, a0, v10 +; V-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv8f16_nxv4f16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVBB-NEXT: vmv1r.v v10, v9 +; ZVBB-NEXT: vmv1r.v v11, v8 +; ZVBB-NEXT: vwsll.vi v8, v10, 16 +; ZVBB-NEXT: vwaddu.wv v8, v8, v11 +; ZVBB-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv8f16_nxv4f16: +; ZIP: # %bb.0: +; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZIP-NEXT: vmv1r.v v10, v9 +; ZIP-NEXT: vmv1r.v v11, v8 +; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10 +; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave2.nxv8f16( %a, %b) + ret %res +} + +define @vector_interleave_nxv4f32_nxv2f32( %a, %b) { +; V-LABEL: vector_interleave_nxv4f32_nxv2f32: +; V: # %bb.0: +; V-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; V-NEXT: vmv1r.v v10, v9 +; V-NEXT: vmv1r.v v11, v8 +; V-NEXT: vwaddu.vv v8, v11, v10 +; V-NEXT: li a0, -1 +; V-NEXT: vwmaccu.vx v8, a0, v10 +; V-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv4f32_nxv2f32: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; ZVBB-NEXT: vmv1r.v v10, v9 +; ZVBB-NEXT: vmv1r.v v11, v8 +; ZVBB-NEXT: li a0, 32 +; ZVBB-NEXT: vwsll.vx v8, v10, a0 +; ZVBB-NEXT: vwaddu.wv v8, v8, v11 +; ZVBB-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv4f32_nxv2f32: +; ZIP: # %bb.0: +; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; ZIP-NEXT: vmv1r.v v10, v9 +; ZIP-NEXT: vmv1r.v v11, v8 +; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10 +; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave2.nxv4f32( %a, %b) + ret %res +} + +define @vector_interleave_nxv16bf16_nxv8bf16( %a, %b) { +; V-LABEL: vector_interleave_nxv16bf16_nxv8bf16: +; V: # %bb.0: +; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; V-NEXT: vmv2r.v v12, v10 +; V-NEXT: vmv2r.v v14, v8 +; V-NEXT: vwaddu.vv v8, v14, v12 +; V-NEXT: li a0, -1 +; V-NEXT: vwmaccu.vx v8, a0, v12 +; V-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv16bf16_nxv8bf16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVBB-NEXT: vmv2r.v v12, v10 +; ZVBB-NEXT: vmv2r.v v14, v8 +; ZVBB-NEXT: vwsll.vi v8, v12, 16 +; ZVBB-NEXT: vwaddu.wv v8, v8, v14 +; ZVBB-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv16bf16_nxv8bf16: +; ZIP: # %bb.0: +; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZIP-NEXT: vmv2r.v v12, v10 +; ZIP-NEXT: vmv2r.v v14, v8 +; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 +; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave2.nxv16bf16( %a, %b) + ret %res +} + +define @vector_interleave_nxv16f16_nxv8f16( %a, %b) { +; V-LABEL: vector_interleave_nxv16f16_nxv8f16: +; V: # %bb.0: +; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; V-NEXT: vmv2r.v v12, v10 +; V-NEXT: vmv2r.v v14, v8 +; V-NEXT: vwaddu.vv v8, v14, v12 +; V-NEXT: li a0, -1 +; V-NEXT: vwmaccu.vx v8, a0, v12 +; V-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv16f16_nxv8f16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVBB-NEXT: vmv2r.v v12, v10 +; ZVBB-NEXT: vmv2r.v v14, v8 +; ZVBB-NEXT: vwsll.vi v8, v12, 16 +; ZVBB-NEXT: vwaddu.wv v8, v8, v14 +; ZVBB-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv16f16_nxv8f16: +; ZIP: # %bb.0: +; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZIP-NEXT: vmv2r.v v12, v10 +; ZIP-NEXT: vmv2r.v v14, v8 +; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 +; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave2.nxv16f16( %a, %b) + ret %res +} + +define @vector_interleave_nxv8f32_nxv4f32( %a, %b) { +; V-LABEL: vector_interleave_nxv8f32_nxv4f32: +; V: # %bb.0: +; V-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; V-NEXT: vmv2r.v v12, v10 +; V-NEXT: vmv2r.v v14, v8 +; V-NEXT: vwaddu.vv v8, v14, v12 +; V-NEXT: li a0, -1 +; V-NEXT: vwmaccu.vx v8, a0, v12 +; V-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv8f32_nxv4f32: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; ZVBB-NEXT: vmv2r.v v12, v10 +; ZVBB-NEXT: vmv2r.v v14, v8 +; ZVBB-NEXT: li a0, 32 +; ZVBB-NEXT: vwsll.vx v8, v12, a0 +; ZVBB-NEXT: vwaddu.wv v8, v8, v14 +; ZVBB-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv8f32_nxv4f32: +; ZIP: # %bb.0: +; ZIP-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; ZIP-NEXT: vmv2r.v v12, v10 +; ZIP-NEXT: vmv2r.v v14, v8 +; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 +; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave2.nxv8f32( %a, %b) + ret %res +} + +define @vector_interleave_nxv4f64_nxv2f64( %a, %b) { +; V-LABEL: vector_interleave_nxv4f64_nxv2f64: +; V: # %bb.0: +; V-NEXT: csrr a0, vlenb +; V-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; V-NEXT: vid.v v12 +; V-NEXT: srli a0, a0, 2 +; V-NEXT: vand.vi v13, v12, 1 +; V-NEXT: vmsne.vi v0, v13, 0 +; V-NEXT: vsrl.vi v16, v12, 1 +; V-NEXT: vadd.vx v16, v16, a0, v0.t +; V-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; V-NEXT: vrgatherei16.vv v12, v8, v16 +; V-NEXT: vmv.v.v v8, v12 +; V-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv4f64_nxv2f64: +; ZVBB: # %bb.0: +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; ZVBB-NEXT: vid.v v12 +; ZVBB-NEXT: srli a0, a0, 2 +; ZVBB-NEXT: vand.vi v13, v12, 1 +; ZVBB-NEXT: vmsne.vi v0, v13, 0 +; ZVBB-NEXT: vsrl.vi v16, v12, 1 +; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t +; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16 +; ZVBB-NEXT: vmv.v.v v8, v12 +; ZVBB-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv4f64_nxv2f64: +; ZIP: # %bb.0: +; ZIP-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; ZIP-NEXT: vmv2r.v v12, v10 +; ZIP-NEXT: vmv2r.v v14, v8 +; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 +; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave2.nxv4f64( %a, %b) + ret %res +} + + + +define @vector_interleave_nxv64bf16_nxv32bf16( %a, %b) { +; V-LABEL: vector_interleave_nxv64bf16_nxv32bf16: +; V: # %bb.0: +; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; V-NEXT: vmv8r.v v24, v8 +; V-NEXT: vwaddu.vv v8, v24, v16 +; V-NEXT: li a0, -1 +; V-NEXT: vwaddu.vv v0, v28, v20 +; V-NEXT: vwmaccu.vx v8, a0, v16 +; V-NEXT: vwmaccu.vx v0, a0, v20 +; V-NEXT: vmv8r.v v16, v0 +; V-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv64bf16_nxv32bf16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVBB-NEXT: vwsll.vi v24, v16, 16 +; ZVBB-NEXT: vwsll.vi v0, v20, 16 +; ZVBB-NEXT: vwaddu.wv v24, v24, v8 +; ZVBB-NEXT: vwaddu.wv v0, v0, v12 +; ZVBB-NEXT: vmv8r.v v8, v24 +; ZVBB-NEXT: vmv8r.v v16, v0 +; ZVBB-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv64bf16_nxv32bf16: +; ZIP: # %bb.0: +; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 +; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 +; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 +; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 +; ZIP-NEXT: vmv8r.v v8, v24 +; ZIP-NEXT: vmv8r.v v16, v0 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave2.nxv64bf16( %a, %b) + ret %res +} + +define @vector_interleave_nxv64f16_nxv32f16( %a, %b) { +; V-LABEL: vector_interleave_nxv64f16_nxv32f16: +; V: # %bb.0: +; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; V-NEXT: vmv8r.v v24, v8 +; V-NEXT: vwaddu.vv v8, v24, v16 +; V-NEXT: li a0, -1 +; V-NEXT: vwaddu.vv v0, v28, v20 +; V-NEXT: vwmaccu.vx v8, a0, v16 +; V-NEXT: vwmaccu.vx v0, a0, v20 +; V-NEXT: vmv8r.v v16, v0 +; V-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVBB-NEXT: vwsll.vi v24, v16, 16 +; ZVBB-NEXT: vwsll.vi v0, v20, 16 +; ZVBB-NEXT: vwaddu.wv v24, v24, v8 +; ZVBB-NEXT: vwaddu.wv v0, v0, v12 +; ZVBB-NEXT: vmv8r.v v8, v24 +; ZVBB-NEXT: vmv8r.v v16, v0 +; ZVBB-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv64f16_nxv32f16: +; ZIP: # %bb.0: +; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 +; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 +; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 +; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 +; ZIP-NEXT: vmv8r.v v8, v24 +; ZIP-NEXT: vmv8r.v v16, v0 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave2.nxv64f16( %a, %b) + ret %res +} + +define @vector_interleave_nxv32f32_nxv16f32( %a, %b) { +; V-LABEL: vector_interleave_nxv32f32_nxv16f32: +; V: # %bb.0: +; V-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; V-NEXT: vmv8r.v v24, v8 +; V-NEXT: vwaddu.vv v8, v24, v16 +; V-NEXT: li a0, -1 +; V-NEXT: vwaddu.vv v0, v28, v20 +; V-NEXT: vwmaccu.vx v8, a0, v16 +; V-NEXT: vwmaccu.vx v0, a0, v20 +; V-NEXT: vmv8r.v v16, v0 +; V-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32: +; ZVBB: # %bb.0: +; ZVBB-NEXT: li a0, 32 +; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; ZVBB-NEXT: vwsll.vx v24, v16, a0 +; ZVBB-NEXT: vwsll.vx v0, v20, a0 +; ZVBB-NEXT: vwaddu.wv v24, v24, v8 +; ZVBB-NEXT: vwaddu.wv v0, v0, v12 +; ZVBB-NEXT: vmv8r.v v8, v24 +; ZVBB-NEXT: vmv8r.v v16, v0 +; ZVBB-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv32f32_nxv16f32: +; ZIP: # %bb.0: +; ZIP-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 +; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 +; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 +; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 +; ZIP-NEXT: vmv8r.v v8, v24 +; ZIP-NEXT: vmv8r.v v16, v0 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave2.nxv32f32( %a, %b) + ret %res +} + +define @vector_interleave_nxv16f64_nxv8f64( %a, %b) { +; V-LABEL: vector_interleave_nxv16f64_nxv8f64: +; V: # %bb.0: +; V-NEXT: csrr a0, vlenb +; V-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; V-NEXT: vid.v v6 +; V-NEXT: vmv8r.v v24, v8 +; V-NEXT: srli a0, a0, 1 +; V-NEXT: vmv4r.v v28, v16 +; V-NEXT: vmv4r.v v16, v12 +; V-NEXT: vand.vi v8, v6, 1 +; V-NEXT: vmsne.vi v0, v8, 0 +; V-NEXT: vsrl.vi v6, v6, 1 +; V-NEXT: vadd.vx v6, v6, a0, v0.t +; V-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; V-NEXT: vrgatherei16.vv v8, v24, v6 +; V-NEXT: vrgatherei16.vv v24, v16, v6 +; V-NEXT: vmv.v.v v16, v24 +; V-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv16f64_nxv8f64: +; ZVBB: # %bb.0: +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; ZVBB-NEXT: vid.v v6 +; ZVBB-NEXT: vmv8r.v v24, v8 +; ZVBB-NEXT: srli a0, a0, 1 +; ZVBB-NEXT: vmv4r.v v28, v16 +; ZVBB-NEXT: vmv4r.v v16, v12 +; ZVBB-NEXT: vand.vi v8, v6, 1 +; ZVBB-NEXT: vmsne.vi v0, v8, 0 +; ZVBB-NEXT: vsrl.vi v6, v6, 1 +; ZVBB-NEXT: vadd.vx v6, v6, a0, v0.t +; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; ZVBB-NEXT: vrgatherei16.vv v8, v24, v6 +; ZVBB-NEXT: vrgatherei16.vv v24, v16, v6 +; ZVBB-NEXT: vmv.v.v v16, v24 +; ZVBB-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv16f64_nxv8f64: +; ZIP: # %bb.0: +; ZIP-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 +; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 +; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 +; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 +; ZIP-NEXT: vmv8r.v v8, v24 +; ZIP-NEXT: vmv8r.v v16, v0 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave2.nxv16f64( %a, %b) + ret %res +} + +define @vector_interleave_nxv6f16_nxv2f16( %v0, %v1, %v2) nounwind { +; CHECK-LABEL: vector_interleave_nxv6f16_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a2, a1, 1 +; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: add a3, a0, a2 +; CHECK-NEXT: vle16.v v9, (a3) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: srli a1, a1, 2 +; CHECK-NEXT: add a0, a1, a1 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: add a2, a3, a2 +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v9, (a2) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv6f16_nxv2f16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a0, a0, 1 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: srli a2, a1, 1 +; ZVBB-NEXT: vsetvli a3, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vsseg3e16.v v8, (a0) +; ZVBB-NEXT: add a3, a0, a2 +; ZVBB-NEXT: vle16.v v9, (a3) +; ZVBB-NEXT: vle16.v v8, (a0) +; ZVBB-NEXT: srli a1, a1, 2 +; ZVBB-NEXT: add a0, a1, a1 +; ZVBB-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v8, v9, a1 +; ZVBB-NEXT: add a2, a3, a2 +; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vle16.v v9, (a2) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a0, a0, 1 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave3.nxv6f16( %v0, %v1, %v2) + ret %res +} + +define @vector_interleave_nxv12f16_nxv4f16( %v0, %v1, %v2) nounwind { +; CHECK-LABEL: vector_interleave_nxv12f16_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vl1re16.v v8, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl1re16.v v9, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl1re16.v v10, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv12f16_nxv4f16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVBB-NEXT: vsseg3e16.v v8, (a0) +; ZVBB-NEXT: vl1re16.v v8, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl1re16.v v9, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl1re16.v v10, (a0) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave3.nxv12f16( %v0, %v1, %v2) + ret %res +} + +define @vector_interleave_nxv24f16_nxv8f16( %v0, %v1, %v2) nounwind { +; CHECK-LABEL: vector_interleave_nxv24f16_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vl2re16.v v8, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2re16.v v10, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2re16.v v12, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv24f16_nxv8f16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: slli a1, a1, 1 +; ZVBB-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVBB-NEXT: vsseg3e16.v v8, (a0) +; ZVBB-NEXT: vl2re16.v v8, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2re16.v v10, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2re16.v v12, (a0) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave3.nxv24f16( %v0, %v1, %v2) + ret %res +} + +define @vector_interleave_nxv6bf16_nxv2bf16( %v0, %v1, %v2) nounwind { +; CHECK-LABEL: vector_interleave_nxv6bf16_nxv2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a2, a1, 1 +; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: add a3, a0, a2 +; CHECK-NEXT: vle16.v v9, (a3) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: srli a1, a1, 2 +; CHECK-NEXT: add a0, a1, a1 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: add a2, a3, a2 +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v9, (a2) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv6bf16_nxv2bf16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a0, a0, 1 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: srli a2, a1, 1 +; ZVBB-NEXT: vsetvli a3, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vsseg3e16.v v8, (a0) +; ZVBB-NEXT: add a3, a0, a2 +; ZVBB-NEXT: vle16.v v9, (a3) +; ZVBB-NEXT: vle16.v v8, (a0) +; ZVBB-NEXT: srli a1, a1, 2 +; ZVBB-NEXT: add a0, a1, a1 +; ZVBB-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v8, v9, a1 +; ZVBB-NEXT: add a2, a3, a2 +; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vle16.v v9, (a2) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a0, a0, 1 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave3.nxv6bf16( %v0, %v1, %v2) + ret %res +} + +define @vector_interleave_nxv12bf16_nxv4bf16( %v0, %v1, %v2) nounwind { +; CHECK-LABEL: vector_interleave_nxv12bf16_nxv4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vl1re16.v v8, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl1re16.v v9, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl1re16.v v10, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv12bf16_nxv4bf16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVBB-NEXT: vsseg3e16.v v8, (a0) +; ZVBB-NEXT: vl1re16.v v8, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl1re16.v v9, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl1re16.v v10, (a0) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave3.nxv12bf16( %v0, %v1, %v2) + ret %res +} + +define @vector_interleave_nxv24bf16_nxv8bf16( %v0, %v1, %v2) nounwind { +; CHECK-LABEL: vector_interleave_nxv24bf16_nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: vl2re16.v v8, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2re16.v v10, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2re16.v v12, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv24bf16_nxv8bf16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: slli a1, a1, 1 +; ZVBB-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVBB-NEXT: vsseg3e16.v v8, (a0) +; ZVBB-NEXT: vl2re16.v v8, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2re16.v v10, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2re16.v v12, (a0) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave3.nxv24bf16( %v0, %v1, %v2) + ret %res +} + +define @vector_interleave_nxv3f32_nxv1f32( %v0, %v1, %v2) nounwind { +; CHECK-LABEL: vector_interleave_nxv3f32_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a2, a1, 1 +; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: add a3, a0, a2 +; CHECK-NEXT: vle32.v v9, (a3) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: srli a1, a1, 3 +; CHECK-NEXT: add a0, a1, a1 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: add a2, a3, a2 +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vle32.v v9, (a2) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv3f32_nxv1f32: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a0, a0, 1 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: srli a2, a1, 1 +; ZVBB-NEXT: vsetvli a3, zero, e32, mf2, ta, ma +; ZVBB-NEXT: vsseg3e32.v v8, (a0) +; ZVBB-NEXT: add a3, a0, a2 +; ZVBB-NEXT: vle32.v v9, (a3) +; ZVBB-NEXT: vle32.v v8, (a0) +; ZVBB-NEXT: srli a1, a1, 3 +; ZVBB-NEXT: add a0, a1, a1 +; ZVBB-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v8, v9, a1 +; ZVBB-NEXT: add a2, a3, a2 +; ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; ZVBB-NEXT: vle32.v v9, (a2) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a0, a0, 1 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave3.nxv3f32( %v0, %v1, %v2) + ret %res +} + +define @vector_interleave_nxv6f32_nxv2f32( %v0, %v1, %v2) nounwind { +; CHECK-LABEL: vector_interleave_nxv6f32_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: vl1re32.v v8, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl1re32.v v9, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl1re32.v v10, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv6f32_nxv2f32: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: vsetvli a2, zero, e32, m1, ta, ma +; ZVBB-NEXT: vsseg3e32.v v8, (a0) +; ZVBB-NEXT: vl1re32.v v8, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl1re32.v v9, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl1re32.v v10, (a0) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave3.nxv6f32( %v0, %v1, %v2) + ret %res +} + +define @vector_interleave_nxv12f32_nxv4f32( %v0, %v1, %v2) nounwind { +; CHECK-LABEL: vector_interleave_nxv12f32_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 1 +; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: vl2re32.v v8, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2re32.v v10, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl2re32.v v12, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv12f32_nxv4f32: +; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 @@ -1169,26 +4832,75 @@ define @vector_interleave_nxv24i16_nxv8i16( @llvm.vector.interleave3.nxv24i16( %a, %b, %c) - ret %res + %res = call @llvm.vector.interleave3.nxv12f32( %v0, %v1, %v2) + ret %res } +define @vector_interleave_nxv3f64_nxv1f64( %v0, %v1, %v2) nounwind { +; CHECK-LABEL: vector_interleave_nxv3f64_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: vl1re64.v v8, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl1re64.v v9, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl1re64.v v10, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv3f64_nxv1f64: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; ZVBB-NEXT: vsseg3e64.v v8, (a0) +; ZVBB-NEXT: vl1re64.v v8, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl1re64.v v9, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl1re64.v v10, (a0) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave3.nxv3f64( %v0, %v1, %v2) + ret %res +} -define @vector_interleave_nxv12i32_nxv4i32( %a, %b, %c) nounwind { -; CHECK-LABEL: vector_interleave_nxv12i32_nxv4i32: +define @vector_interleave_nxv6f64_nxv2f64( %v0, %v1, %v2) nounwind { +; CHECK-LABEL: vector_interleave_nxv6f64_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb @@ -1198,13 +4910,13 @@ define @vector_interleave_nxv12i32_nxv4i32( @vector_interleave_nxv12i32_nxv4i32( @llvm.vector.interleave3.nxv12i32( %a, %b, %c) - ret %res +; ZVBB-LABEL: vector_interleave_nxv6f64_nxv2f64: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: slli a1, a1, 1 +; ZVBB-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; ZVBB-NEXT: vsseg3e64.v v8, (a0) +; ZVBB-NEXT: vl2re64.v v8, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2re64.v v10, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl2re64.v v12, (a0) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: li a1, 6 +; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave3.nxv6f64( %v0, %v1, %v2) + ret %res +} + +define @vector_interleave_nxv10f16_nxv2f16( %v0, %v1, %v2, %v3, %v4) nounwind { +; CHECK-LABEL: vector_interleave_nxv10f16_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a2, a1, 1 +; CHECK-NEXT: add a3, a0, a2 +; CHECK-NEXT: add a4, a3, a2 +; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: add a5, a4, a2 +; CHECK-NEXT: vle16.v v8, (a5) +; CHECK-NEXT: vle16.v v9, (a4) +; CHECK-NEXT: srli a1, a1, 2 +; CHECK-NEXT: add a4, a1, a1 +; CHECK-NEXT: vle16.v v10, (a3) +; CHECK-NEXT: vsetvli zero, a4, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v9, v8, a1 +; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a4, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v8, v10, a1 +; CHECK-NEXT: add a2, a5, a2 +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v10, (a2) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv10f16_nxv2f16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: srli a2, a1, 1 +; ZVBB-NEXT: add a3, a0, a2 +; ZVBB-NEXT: add a4, a3, a2 +; ZVBB-NEXT: vsetvli a5, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vsseg5e16.v v8, (a0) +; ZVBB-NEXT: add a5, a4, a2 +; ZVBB-NEXT: vle16.v v8, (a5) +; ZVBB-NEXT: vle16.v v9, (a4) +; ZVBB-NEXT: srli a1, a1, 2 +; ZVBB-NEXT: add a4, a1, a1 +; ZVBB-NEXT: vle16.v v10, (a3) +; ZVBB-NEXT: vsetvli zero, a4, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v9, v8, a1 +; ZVBB-NEXT: vsetvli a3, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vle16.v v8, (a0) +; ZVBB-NEXT: vsetvli zero, a4, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v8, v10, a1 +; ZVBB-NEXT: add a2, a5, a2 +; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vle16.v v10, (a2) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave5.nxv10f16( %v0, %v1, %v2, %v3, %v4) + ret %res +} + +define @vector_interleave_nxv20f16_nxv4f16( %v0, %v1, %v2, %v3, %v4) nounwind { +; CHECK-LABEL: vector_interleave_nxv20f16_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 2 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: add a2, a0, a1 +; CHECK-NEXT: add a3, a2, a1 +; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vl1re16.v v10, (a3) +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: vl1re16.v v11, (a3) +; CHECK-NEXT: vl1re16.v v8, (a0) +; CHECK-NEXT: vl1re16.v v9, (a2) +; CHECK-NEXT: add a1, a3, a1 +; CHECK-NEXT: vl1re16.v v12, (a1) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 2 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv20f16_nxv4f16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 2 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: add a2, a0, a1 +; ZVBB-NEXT: add a3, a2, a1 +; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma +; ZVBB-NEXT: vsseg5e16.v v8, (a0) +; ZVBB-NEXT: vl1re16.v v10, (a3) +; ZVBB-NEXT: add a3, a3, a1 +; ZVBB-NEXT: vl1re16.v v11, (a3) +; ZVBB-NEXT: vl1re16.v v8, (a0) +; ZVBB-NEXT: vl1re16.v v9, (a2) +; ZVBB-NEXT: add a1, a3, a1 +; ZVBB-NEXT: vl1re16.v v12, (a1) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 2 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave5.nxv20f16( %v0, %v1, %v2, %v3, %v4) + ret %res +} + +define @vector_interleave_nxv40f16_nxv8f16( %v0, %v1, %v2, %v3, %v4) nounwind { +; RV32-LABEL: vector_interleave_nxv40f16_nxv8f16: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -80 +; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: addi s0, sp, 80 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 28 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: sub sp, sp, a0 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; RV32-NEXT: vmv2r.v v20, v16 +; RV32-NEXT: addi a0, sp, 64 +; RV32-NEXT: vmv2r.v v18, v12 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a2, a1, 2 +; RV32-NEXT: add a1, a2, a1 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 64 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: vmv2r.v v16, v8 +; RV32-NEXT: vmv2r.v v22, v16 +; RV32-NEXT: vmv2r.v v24, v18 +; RV32-NEXT: vmv1r.v v26, v20 +; RV32-NEXT: add a3, a0, a2 +; RV32-NEXT: vmv1r.v v23, v10 +; RV32-NEXT: add a4, a1, a2 +; RV32-NEXT: add a5, a4, a2 +; RV32-NEXT: vmv1r.v v25, v14 +; RV32-NEXT: add a6, a5, a2 +; RV32-NEXT: vmv1r.v v18, v11 +; RV32-NEXT: vsseg5e16.v v22, (a0) +; RV32-NEXT: vmv1r.v v20, v15 +; RV32-NEXT: vsseg5e16.v v17, (a1) +; RV32-NEXT: vl1re16.v v16, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re16.v v17, (a6) +; RV32-NEXT: add a6, a3, a2 +; RV32-NEXT: vl1re16.v v10, (a6) +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re16.v v11, (a6) +; RV32-NEXT: vl1re16.v v8, (a0) +; RV32-NEXT: vl1re16.v v9, (a3) +; RV32-NEXT: vl1re16.v v14, (a4) +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a3, 10 +; RV32-NEXT: mul a0, a0, a3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 64 +; RV32-NEXT: add a6, a6, a2 +; RV32-NEXT: vl1re16.v v15, (a5) +; RV32-NEXT: vl1re16.v v12, (a6) +; RV32-NEXT: vl1re16.v v13, (a1) +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: add a2, a0, a2 +; RV32-NEXT: vs2r.v v16, (a2) +; RV32-NEXT: vs8r.v v8, (a0) +; RV32-NEXT: vl8re16.v v16, (a2) +; RV32-NEXT: vl8re16.v v8, (a0) +; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 80 +; RV32-NEXT: ret +; +; RV64-LABEL: vector_interleave_nxv40f16_nxv8f16: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-NEXT: addi s0, sp, 80 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: li a1, 28 +; RV64-NEXT: mul a0, a0, a1 +; RV64-NEXT: sub sp, sp, a0 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; RV64-NEXT: vmv2r.v v20, v16 +; RV64-NEXT: addi a0, sp, 64 +; RV64-NEXT: vmv2r.v v18, v12 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a2, a1, 2 +; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 64 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: vmv2r.v v16, v8 +; RV64-NEXT: vmv2r.v v22, v16 +; RV64-NEXT: vmv2r.v v24, v18 +; RV64-NEXT: vmv1r.v v26, v20 +; RV64-NEXT: add a3, a0, a2 +; RV64-NEXT: vmv1r.v v23, v10 +; RV64-NEXT: add a4, a1, a2 +; RV64-NEXT: add a5, a4, a2 +; RV64-NEXT: vmv1r.v v25, v14 +; RV64-NEXT: add a6, a5, a2 +; RV64-NEXT: vmv1r.v v18, v11 +; RV64-NEXT: vsseg5e16.v v22, (a0) +; RV64-NEXT: vmv1r.v v20, v15 +; RV64-NEXT: vsseg5e16.v v17, (a1) +; RV64-NEXT: vl1re16.v v16, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re16.v v17, (a6) +; RV64-NEXT: add a6, a3, a2 +; RV64-NEXT: vl1re16.v v10, (a6) +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re16.v v11, (a6) +; RV64-NEXT: vl1re16.v v8, (a0) +; RV64-NEXT: vl1re16.v v9, (a3) +; RV64-NEXT: vl1re16.v v14, (a4) +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: li a3, 10 +; RV64-NEXT: mul a0, a0, a3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 64 +; RV64-NEXT: add a6, a6, a2 +; RV64-NEXT: vl1re16.v v15, (a5) +; RV64-NEXT: vl1re16.v v12, (a6) +; RV64-NEXT: vl1re16.v v13, (a1) +; RV64-NEXT: slli a2, a2, 3 +; RV64-NEXT: add a2, a0, a2 +; RV64-NEXT: vs2r.v v16, (a2) +; RV64-NEXT: vs8r.v v8, (a0) +; RV64-NEXT: vl8re16.v v16, (a2) +; RV64-NEXT: vl8re16.v v8, (a0) +; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret +; +; ZVBB-RV32-LABEL: vector_interleave_nxv40f16_nxv8f16: +; ZVBB-RV32: # %bb.0: +; ZVBB-RV32-NEXT: addi sp, sp, -80 +; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; ZVBB-RV32-NEXT: addi s0, sp, 80 +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: li a1, 28 +; ZVBB-RV32-NEXT: mul a0, a0, a1 +; ZVBB-RV32-NEXT: sub sp, sp, a0 +; ZVBB-RV32-NEXT: andi sp, sp, -64 +; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVBB-RV32-NEXT: vmv2r.v v20, v16 +; ZVBB-RV32-NEXT: addi a0, sp, 64 +; ZVBB-RV32-NEXT: vmv2r.v v18, v12 +; ZVBB-RV32-NEXT: csrr a1, vlenb +; ZVBB-RV32-NEXT: slli a2, a1, 2 +; ZVBB-RV32-NEXT: add a1, a2, a1 +; ZVBB-RV32-NEXT: add a1, sp, a1 +; ZVBB-RV32-NEXT: addi a1, a1, 64 +; ZVBB-RV32-NEXT: csrr a2, vlenb +; ZVBB-RV32-NEXT: vmv2r.v v16, v8 +; ZVBB-RV32-NEXT: vmv2r.v v22, v16 +; ZVBB-RV32-NEXT: vmv2r.v v24, v18 +; ZVBB-RV32-NEXT: vmv1r.v v26, v20 +; ZVBB-RV32-NEXT: add a3, a0, a2 +; ZVBB-RV32-NEXT: vmv1r.v v23, v10 +; ZVBB-RV32-NEXT: add a4, a1, a2 +; ZVBB-RV32-NEXT: add a5, a4, a2 +; ZVBB-RV32-NEXT: vmv1r.v v25, v14 +; ZVBB-RV32-NEXT: add a6, a5, a2 +; ZVBB-RV32-NEXT: vmv1r.v v18, v11 +; ZVBB-RV32-NEXT: vsseg5e16.v v22, (a0) +; ZVBB-RV32-NEXT: vmv1r.v v20, v15 +; ZVBB-RV32-NEXT: vsseg5e16.v v17, (a1) +; ZVBB-RV32-NEXT: vl1re16.v v16, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re16.v v17, (a6) +; ZVBB-RV32-NEXT: add a6, a3, a2 +; ZVBB-RV32-NEXT: vl1re16.v v10, (a6) +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re16.v v11, (a6) +; ZVBB-RV32-NEXT: vl1re16.v v8, (a0) +; ZVBB-RV32-NEXT: vl1re16.v v9, (a3) +; ZVBB-RV32-NEXT: vl1re16.v v14, (a4) +; ZVBB-RV32-NEXT: csrr a0, vlenb +; ZVBB-RV32-NEXT: li a3, 10 +; ZVBB-RV32-NEXT: mul a0, a0, a3 +; ZVBB-RV32-NEXT: add a0, sp, a0 +; ZVBB-RV32-NEXT: addi a0, a0, 64 +; ZVBB-RV32-NEXT: add a6, a6, a2 +; ZVBB-RV32-NEXT: vl1re16.v v15, (a5) +; ZVBB-RV32-NEXT: vl1re16.v v12, (a6) +; ZVBB-RV32-NEXT: vl1re16.v v13, (a1) +; ZVBB-RV32-NEXT: slli a2, a2, 3 +; ZVBB-RV32-NEXT: add a2, a0, a2 +; ZVBB-RV32-NEXT: vs2r.v v16, (a2) +; ZVBB-RV32-NEXT: vs8r.v v8, (a0) +; ZVBB-RV32-NEXT: vl8re16.v v16, (a2) +; ZVBB-RV32-NEXT: vl8re16.v v8, (a0) +; ZVBB-RV32-NEXT: addi sp, s0, -80 +; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; ZVBB-RV32-NEXT: addi sp, sp, 80 +; ZVBB-RV32-NEXT: ret +; +; ZVBB-RV64-LABEL: vector_interleave_nxv40f16_nxv8f16: +; ZVBB-RV64: # %bb.0: +; ZVBB-RV64-NEXT: addi sp, sp, -80 +; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZVBB-RV64-NEXT: addi s0, sp, 80 +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: li a1, 28 +; ZVBB-RV64-NEXT: mul a0, a0, a1 +; ZVBB-RV64-NEXT: sub sp, sp, a0 +; ZVBB-RV64-NEXT: andi sp, sp, -64 +; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVBB-RV64-NEXT: vmv2r.v v20, v16 +; ZVBB-RV64-NEXT: addi a0, sp, 64 +; ZVBB-RV64-NEXT: vmv2r.v v18, v12 +; ZVBB-RV64-NEXT: csrr a1, vlenb +; ZVBB-RV64-NEXT: slli a2, a1, 2 +; ZVBB-RV64-NEXT: add a1, a2, a1 +; ZVBB-RV64-NEXT: add a1, sp, a1 +; ZVBB-RV64-NEXT: addi a1, a1, 64 +; ZVBB-RV64-NEXT: csrr a2, vlenb +; ZVBB-RV64-NEXT: vmv2r.v v16, v8 +; ZVBB-RV64-NEXT: vmv2r.v v22, v16 +; ZVBB-RV64-NEXT: vmv2r.v v24, v18 +; ZVBB-RV64-NEXT: vmv1r.v v26, v20 +; ZVBB-RV64-NEXT: add a3, a0, a2 +; ZVBB-RV64-NEXT: vmv1r.v v23, v10 +; ZVBB-RV64-NEXT: add a4, a1, a2 +; ZVBB-RV64-NEXT: add a5, a4, a2 +; ZVBB-RV64-NEXT: vmv1r.v v25, v14 +; ZVBB-RV64-NEXT: add a6, a5, a2 +; ZVBB-RV64-NEXT: vmv1r.v v18, v11 +; ZVBB-RV64-NEXT: vsseg5e16.v v22, (a0) +; ZVBB-RV64-NEXT: vmv1r.v v20, v15 +; ZVBB-RV64-NEXT: vsseg5e16.v v17, (a1) +; ZVBB-RV64-NEXT: vl1re16.v v16, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re16.v v17, (a6) +; ZVBB-RV64-NEXT: add a6, a3, a2 +; ZVBB-RV64-NEXT: vl1re16.v v10, (a6) +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re16.v v11, (a6) +; ZVBB-RV64-NEXT: vl1re16.v v8, (a0) +; ZVBB-RV64-NEXT: vl1re16.v v9, (a3) +; ZVBB-RV64-NEXT: vl1re16.v v14, (a4) +; ZVBB-RV64-NEXT: csrr a0, vlenb +; ZVBB-RV64-NEXT: li a3, 10 +; ZVBB-RV64-NEXT: mul a0, a0, a3 +; ZVBB-RV64-NEXT: add a0, sp, a0 +; ZVBB-RV64-NEXT: addi a0, a0, 64 +; ZVBB-RV64-NEXT: add a6, a6, a2 +; ZVBB-RV64-NEXT: vl1re16.v v15, (a5) +; ZVBB-RV64-NEXT: vl1re16.v v12, (a6) +; ZVBB-RV64-NEXT: vl1re16.v v13, (a1) +; ZVBB-RV64-NEXT: slli a2, a2, 3 +; ZVBB-RV64-NEXT: add a2, a0, a2 +; ZVBB-RV64-NEXT: vs2r.v v16, (a2) +; ZVBB-RV64-NEXT: vs8r.v v8, (a0) +; ZVBB-RV64-NEXT: vl8re16.v v16, (a2) +; ZVBB-RV64-NEXT: vl8re16.v v8, (a0) +; ZVBB-RV64-NEXT: addi sp, s0, -80 +; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZVBB-RV64-NEXT: addi sp, sp, 80 +; ZVBB-RV64-NEXT: ret +; +; ZIP-LABEL: vector_interleave_nxv40f16_nxv8f16: +; ZIP: # %bb.0: +; ZIP-NEXT: addi sp, sp, -80 +; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; ZIP-NEXT: addi s0, sp, 80 +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: li a1, 28 +; ZIP-NEXT: mul a0, a0, a1 +; ZIP-NEXT: sub sp, sp, a0 +; ZIP-NEXT: andi sp, sp, -64 +; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZIP-NEXT: vmv2r.v v20, v16 +; ZIP-NEXT: addi a0, sp, 64 +; ZIP-NEXT: vmv2r.v v18, v12 +; ZIP-NEXT: csrr a1, vlenb +; ZIP-NEXT: slli a2, a1, 2 +; ZIP-NEXT: add a1, a2, a1 +; ZIP-NEXT: add a1, sp, a1 +; ZIP-NEXT: addi a1, a1, 64 +; ZIP-NEXT: csrr a2, vlenb +; ZIP-NEXT: vmv2r.v v16, v8 +; ZIP-NEXT: vmv2r.v v22, v16 +; ZIP-NEXT: vmv2r.v v24, v18 +; ZIP-NEXT: vmv1r.v v26, v20 +; ZIP-NEXT: add a3, a0, a2 +; ZIP-NEXT: vmv1r.v v23, v10 +; ZIP-NEXT: add a4, a1, a2 +; ZIP-NEXT: add a5, a4, a2 +; ZIP-NEXT: vmv1r.v v25, v14 +; ZIP-NEXT: add a6, a5, a2 +; ZIP-NEXT: vmv1r.v v18, v11 +; ZIP-NEXT: vsseg5e16.v v22, (a0) +; ZIP-NEXT: vmv1r.v v20, v15 +; ZIP-NEXT: vsseg5e16.v v17, (a1) +; ZIP-NEXT: vl1re16.v v16, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re16.v v17, (a6) +; ZIP-NEXT: add a6, a3, a2 +; ZIP-NEXT: vl1re16.v v10, (a6) +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re16.v v11, (a6) +; ZIP-NEXT: vl1re16.v v8, (a0) +; ZIP-NEXT: vl1re16.v v9, (a3) +; ZIP-NEXT: vl1re16.v v14, (a4) +; ZIP-NEXT: csrr a0, vlenb +; ZIP-NEXT: li a3, 10 +; ZIP-NEXT: mul a0, a0, a3 +; ZIP-NEXT: add a0, sp, a0 +; ZIP-NEXT: addi a0, a0, 64 +; ZIP-NEXT: add a6, a6, a2 +; ZIP-NEXT: vl1re16.v v15, (a5) +; ZIP-NEXT: vl1re16.v v12, (a6) +; ZIP-NEXT: vl1re16.v v13, (a1) +; ZIP-NEXT: slli a2, a2, 3 +; ZIP-NEXT: add a2, a0, a2 +; ZIP-NEXT: vs2r.v v16, (a2) +; ZIP-NEXT: vs8r.v v8, (a0) +; ZIP-NEXT: vl8re16.v v16, (a2) +; ZIP-NEXT: vl8re16.v v8, (a0) +; ZIP-NEXT: addi sp, s0, -80 +; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; ZIP-NEXT: addi sp, sp, 80 +; ZIP-NEXT: ret + %res = call @llvm.vector.interleave5.nxv40f16( %v0, %v1, %v2, %v3, %v4) + ret %res } - -define @vector_interleave_nxv6i64_nxv2i64( %a, %b, %c) nounwind { -; CHECK-LABEL: vector_interleave_nxv6i64_nxv2i64: +define @vector_interleave_nxv10bf16_nxv2bf16( %v0, %v1, %v2, %v3, %v4) nounwind { +; CHECK-LABEL: vector_interleave_nxv10bf16_nxv2bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 6 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 1 -; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma -; CHECK-NEXT: vsseg3e64.v v8, (a0) -; CHECK-NEXT: vl2re64.v v8, (a0) -; CHECK-NEXT: add a0, a0, a1 -; CHECK-NEXT: vl2re64.v v10, (a0) -; CHECK-NEXT: add a0, a0, a1 -; CHECK-NEXT: vl2re64.v v12, (a0) +; CHECK-NEXT: srli a2, a1, 1 +; CHECK-NEXT: add a3, a0, a2 +; CHECK-NEXT: add a4, a3, a2 +; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: add a5, a4, a2 +; CHECK-NEXT: vle16.v v8, (a5) +; CHECK-NEXT: vle16.v v9, (a4) +; CHECK-NEXT: srli a1, a1, 2 +; CHECK-NEXT: add a4, a1, a1 +; CHECK-NEXT: vle16.v v10, (a3) +; CHECK-NEXT: vsetvli zero, a4, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v9, v8, a1 +; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a4, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v8, v10, a1 +; CHECK-NEXT: add a2, a5, a2 +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v10, (a2) ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 6 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; -; ZVBB-LABEL: vector_interleave_nxv6i64_nxv2i64: +; ZVBB-LABEL: vector_interleave_nxv10bf16_nxv2bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb -; ZVBB-NEXT: li a1, 6 -; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb -; ZVBB-NEXT: slli a1, a1, 1 -; ZVBB-NEXT: vsetvli a2, zero, e64, m2, ta, ma -; ZVBB-NEXT: vsseg3e64.v v8, (a0) -; ZVBB-NEXT: vl2re64.v v8, (a0) -; ZVBB-NEXT: add a0, a0, a1 -; ZVBB-NEXT: vl2re64.v v10, (a0) -; ZVBB-NEXT: add a0, a0, a1 -; ZVBB-NEXT: vl2re64.v v12, (a0) +; ZVBB-NEXT: srli a2, a1, 1 +; ZVBB-NEXT: add a3, a0, a2 +; ZVBB-NEXT: add a4, a3, a2 +; ZVBB-NEXT: vsetvli a5, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vsseg5e16.v v8, (a0) +; ZVBB-NEXT: add a5, a4, a2 +; ZVBB-NEXT: vle16.v v8, (a5) +; ZVBB-NEXT: vle16.v v9, (a4) +; ZVBB-NEXT: srli a1, a1, 2 +; ZVBB-NEXT: add a4, a1, a1 +; ZVBB-NEXT: vle16.v v10, (a3) +; ZVBB-NEXT: vsetvli zero, a4, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v9, v8, a1 +; ZVBB-NEXT: vsetvli a3, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vle16.v v8, (a0) +; ZVBB-NEXT: vsetvli zero, a4, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v8, v10, a1 +; ZVBB-NEXT: add a2, a5, a2 +; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vle16.v v10, (a2) ; ZVBB-NEXT: csrr a0, vlenb -; ZVBB-NEXT: li a1, 6 -; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret - %res = call @llvm.vector.interleave3.nxv6i64( %a, %b, %c) - ret %res + %res = call @llvm.vector.interleave5.nxv10bf16( %v0, %v1, %v2, %v3, %v4) + ret %res } -define @vector_interleave_nxv80i1_nxv16i1( %a, %b, %c, %d, %e) nounwind { -; CHECK-LABEL: vector_interleave_nxv80i1_nxv16i1: +define @vector_interleave_nxv20bf16_nxv4bf16( %v0, %v1, %v2, %v3, %v4) nounwind { +; CHECK-LABEL: vector_interleave_nxv20bf16_nxv4bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 10 -; CHECK-NEXT: mul a0, a0, a1 -; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmv.v.i v12, 0 -; CHECK-NEXT: addi a4, sp, 16 -; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 2 ; CHECK-NEXT: add a0, a1, a0 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: vmerge.vim v14, v12, 1, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmerge.vim v18, v12, 1, v0 -; CHECK-NEXT: add a2, a4, a1 -; CHECK-NEXT: srli a3, a1, 2 -; CHECK-NEXT: vmv2r.v v20, v14 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 -; CHECK-NEXT: vmv1r.v v21, v18 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmerge.vim v8, v12, 1, v0 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v16, v19 -; CHECK-NEXT: add a5, a2, a1 -; CHECK-NEXT: vmv1r.v v23, v8 -; CHECK-NEXT: vmv1r.v v18, v9 -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: vmerge.vim v24, v12, 1, v0 -; CHECK-NEXT: vsetvli a6, zero, e8, m1, ta, ma -; CHECK-NEXT: vsseg5e8.v v20, (a4) -; CHECK-NEXT: vmv1r.v v19, v25 -; CHECK-NEXT: vsseg5e8.v v15, (a0) -; CHECK-NEXT: vl1r.v v8, (a5) -; CHECK-NEXT: add a5, a5, a1 -; CHECK-NEXT: vl1r.v v10, (a4) -; CHECK-NEXT: add a4, a5, a1 -; CHECK-NEXT: vl1r.v v12, (a4) -; CHECK-NEXT: add a4, a0, a1 -; CHECK-NEXT: vl1r.v v14, (a4) -; CHECK-NEXT: add a4, a4, a1 -; CHECK-NEXT: vl1r.v v9, (a5) -; CHECK-NEXT: add a5, a4, a1 -; CHECK-NEXT: vl1r.v v16, (a5) -; CHECK-NEXT: add a5, a5, a1 -; CHECK-NEXT: srli a1, a1, 1 -; CHECK-NEXT: vl1r.v v11, (a2) -; CHECK-NEXT: add a2, a3, a3 -; CHECK-NEXT: vl1r.v v15, (a4) -; CHECK-NEXT: add a4, a1, a1 -; CHECK-NEXT: vl1r.v v13, (a0) -; CHECK-NEXT: vl1r.v v17, (a5) -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmsne.vi v18, v8, 0 -; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vmsne.vi v8, v14, 0 -; CHECK-NEXT: vmsne.vi v9, v12, 0 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; CHECK-NEXT: vslideup.vx v0, v18, a3 -; CHECK-NEXT: vslideup.vx v9, v8, a3 -; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, ma -; CHECK-NEXT: vslideup.vx v0, v9, a1 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmsne.vi v8, v16, 0 +; CHECK-NEXT: add a2, a0, a1 +; CHECK-NEXT: add a3, a2, a1 +; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma +; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: vl1re16.v v10, (a3) +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: vl1re16.v v11, (a3) +; CHECK-NEXT: vl1re16.v v8, (a0) +; CHECK-NEXT: vl1re16.v v9, (a2) +; CHECK-NEXT: add a1, a3, a1 +; CHECK-NEXT: vl1re16.v v12, (a1) ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 10 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a1, a0, 2 +; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; -; ZVBB-LABEL: vector_interleave_nxv80i1_nxv16i1: +; ZVBB-LABEL: vector_interleave_nxv20bf16_nxv4bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb -; ZVBB-NEXT: li a1, 10 -; ZVBB-NEXT: mul a0, a0, a1 -; ZVBB-NEXT: sub sp, sp, a0 -; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; ZVBB-NEXT: vmv.v.i v12, 0 -; ZVBB-NEXT: addi a4, sp, 16 -; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 2 ; ZVBB-NEXT: add a0, a1, a0 -; ZVBB-NEXT: add a0, sp, a0 -; ZVBB-NEXT: addi a0, a0, 16 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb -; ZVBB-NEXT: vmerge.vim v14, v12, 1, v0 -; ZVBB-NEXT: vmv1r.v v0, v8 -; ZVBB-NEXT: vmerge.vim v18, v12, 1, v0 -; ZVBB-NEXT: add a2, a4, a1 -; ZVBB-NEXT: srli a3, a1, 2 -; ZVBB-NEXT: vmv2r.v v20, v14 -; ZVBB-NEXT: vmv1r.v v0, v9 -; ZVBB-NEXT: vmerge.vim v16, v12, 1, v0 -; ZVBB-NEXT: vmv1r.v v21, v18 -; ZVBB-NEXT: vmv1r.v v0, v10 -; ZVBB-NEXT: vmerge.vim v8, v12, 1, v0 -; ZVBB-NEXT: vmv1r.v v22, v16 -; ZVBB-NEXT: vmv1r.v v16, v19 -; ZVBB-NEXT: add a5, a2, a1 -; ZVBB-NEXT: vmv1r.v v23, v8 -; ZVBB-NEXT: vmv1r.v v18, v9 -; ZVBB-NEXT: vmv1r.v v0, v11 -; ZVBB-NEXT: vmerge.vim v24, v12, 1, v0 -; ZVBB-NEXT: vsetvli a6, zero, e8, m1, ta, ma -; ZVBB-NEXT: vsseg5e8.v v20, (a4) -; ZVBB-NEXT: vmv1r.v v19, v25 -; ZVBB-NEXT: vsseg5e8.v v15, (a0) -; ZVBB-NEXT: vl1r.v v8, (a5) -; ZVBB-NEXT: add a5, a5, a1 -; ZVBB-NEXT: vl1r.v v10, (a4) -; ZVBB-NEXT: add a4, a5, a1 -; ZVBB-NEXT: vl1r.v v12, (a4) -; ZVBB-NEXT: add a4, a0, a1 -; ZVBB-NEXT: vl1r.v v14, (a4) -; ZVBB-NEXT: add a4, a4, a1 -; ZVBB-NEXT: vl1r.v v9, (a5) -; ZVBB-NEXT: add a5, a4, a1 -; ZVBB-NEXT: vl1r.v v16, (a5) -; ZVBB-NEXT: add a5, a5, a1 -; ZVBB-NEXT: srli a1, a1, 1 -; ZVBB-NEXT: vl1r.v v11, (a2) -; ZVBB-NEXT: add a2, a3, a3 -; ZVBB-NEXT: vl1r.v v15, (a4) -; ZVBB-NEXT: add a4, a1, a1 -; ZVBB-NEXT: vl1r.v v13, (a0) -; ZVBB-NEXT: vl1r.v v17, (a5) -; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; ZVBB-NEXT: vmsne.vi v18, v8, 0 -; ZVBB-NEXT: vmsne.vi v0, v10, 0 -; ZVBB-NEXT: vmsne.vi v8, v14, 0 -; ZVBB-NEXT: vmsne.vi v9, v12, 0 -; ZVBB-NEXT: vsetvli zero, a2, e8, mf2, ta, ma -; ZVBB-NEXT: vslideup.vx v0, v18, a3 -; ZVBB-NEXT: vslideup.vx v9, v8, a3 -; ZVBB-NEXT: vsetvli zero, a4, e8, m1, ta, ma -; ZVBB-NEXT: vslideup.vx v0, v9, a1 -; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; ZVBB-NEXT: vmsne.vi v8, v16, 0 +; ZVBB-NEXT: add a2, a0, a1 +; ZVBB-NEXT: add a3, a2, a1 +; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma +; ZVBB-NEXT: vsseg5e16.v v8, (a0) +; ZVBB-NEXT: vl1re16.v v10, (a3) +; ZVBB-NEXT: add a3, a3, a1 +; ZVBB-NEXT: vl1re16.v v11, (a3) +; ZVBB-NEXT: vl1re16.v v8, (a0) +; ZVBB-NEXT: vl1re16.v v9, (a2) +; ZVBB-NEXT: add a1, a3, a1 +; ZVBB-NEXT: vl1re16.v v12, (a1) ; ZVBB-NEXT: csrr a0, vlenb -; ZVBB-NEXT: li a1, 10 -; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: slli a1, a0, 2 +; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret - %res = call @llvm.vector.interleave5.nxv80i1( %a, %b, %c, %d, %e) - ret %res + %res = call @llvm.vector.interleave5.nxv20bf16( %v0, %v1, %v2, %v3, %v4) + ret %res } - -define @vector_interleave_nxv80i8_nxv16i8( %a, %b, %c, %d, %e) nounwind { -; -; RV32-LABEL: vector_interleave_nxv80i8_nxv16i8: +define @vector_interleave_nxv40bf16_nxv8bf16( %v0, %v1, %v2, %v3, %v4) nounwind { +; RV32-LABEL: vector_interleave_nxv40bf16_nxv8bf16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill @@ -1462,7 +5565,7 @@ define @vector_interleave_nxv80i8_nxv16i8( ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 -; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v16 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v18, v12 @@ -1483,41 +5586,41 @@ define @vector_interleave_nxv80i8_nxv16i8( ; RV32-NEXT: vmv1r.v v25, v14 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v18, v11 -; RV32-NEXT: vsseg5e8.v v22, (a0) +; RV32-NEXT: vsseg5e16.v v22, (a0) ; RV32-NEXT: vmv1r.v v20, v15 -; RV32-NEXT: vsseg5e8.v v17, (a1) -; RV32-NEXT: vl1r.v v16, (a6) +; RV32-NEXT: vsseg5e16.v v17, (a1) +; RV32-NEXT: vl1re16.v v16, (a6) ; RV32-NEXT: add a6, a6, a2 -; RV32-NEXT: vl1r.v v17, (a6) +; RV32-NEXT: vl1re16.v v17, (a6) ; RV32-NEXT: add a6, a3, a2 -; RV32-NEXT: vl1r.v v10, (a6) +; RV32-NEXT: vl1re16.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 -; RV32-NEXT: vl1r.v v11, (a6) -; RV32-NEXT: vl1r.v v8, (a0) -; RV32-NEXT: vl1r.v v9, (a3) -; RV32-NEXT: vl1r.v v14, (a4) +; RV32-NEXT: vl1re16.v v11, (a6) +; RV32-NEXT: vl1re16.v v8, (a0) +; RV32-NEXT: vl1re16.v v9, (a3) +; RV32-NEXT: vl1re16.v v14, (a4) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 10 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 -; RV32-NEXT: vl1r.v v15, (a5) -; RV32-NEXT: vl1r.v v12, (a6) -; RV32-NEXT: vl1r.v v13, (a1) +; RV32-NEXT: vl1re16.v v15, (a5) +; RV32-NEXT: vl1re16.v v12, (a6) +; RV32-NEXT: vl1re16.v v13, (a1) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vs2r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) -; RV32-NEXT: vl8r.v v16, (a2) -; RV32-NEXT: vl8r.v v8, (a0) +; RV32-NEXT: vl8re16.v v16, (a2) +; RV32-NEXT: vl8re16.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; -; RV64-LABEL: vector_interleave_nxv80i8_nxv16i8: +; RV64-LABEL: vector_interleave_nxv40bf16_nxv8bf16: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill @@ -1528,7 +5631,7 @@ define @vector_interleave_nxv80i8_nxv16i8( ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 -; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v16 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v18, v12 @@ -1549,41 +5652,41 @@ define @vector_interleave_nxv80i8_nxv16i8( ; RV64-NEXT: vmv1r.v v25, v14 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v18, v11 -; RV64-NEXT: vsseg5e8.v v22, (a0) +; RV64-NEXT: vsseg5e16.v v22, (a0) ; RV64-NEXT: vmv1r.v v20, v15 -; RV64-NEXT: vsseg5e8.v v17, (a1) -; RV64-NEXT: vl1r.v v16, (a6) +; RV64-NEXT: vsseg5e16.v v17, (a1) +; RV64-NEXT: vl1re16.v v16, (a6) ; RV64-NEXT: add a6, a6, a2 -; RV64-NEXT: vl1r.v v17, (a6) +; RV64-NEXT: vl1re16.v v17, (a6) ; RV64-NEXT: add a6, a3, a2 -; RV64-NEXT: vl1r.v v10, (a6) +; RV64-NEXT: vl1re16.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 -; RV64-NEXT: vl1r.v v11, (a6) -; RV64-NEXT: vl1r.v v8, (a0) -; RV64-NEXT: vl1r.v v9, (a3) -; RV64-NEXT: vl1r.v v14, (a4) +; RV64-NEXT: vl1re16.v v11, (a6) +; RV64-NEXT: vl1re16.v v8, (a0) +; RV64-NEXT: vl1re16.v v9, (a3) +; RV64-NEXT: vl1re16.v v14, (a4) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 10 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 -; RV64-NEXT: vl1r.v v15, (a5) -; RV64-NEXT: vl1r.v v12, (a6) -; RV64-NEXT: vl1r.v v13, (a1) +; RV64-NEXT: vl1re16.v v15, (a5) +; RV64-NEXT: vl1re16.v v12, (a6) +; RV64-NEXT: vl1re16.v v13, (a1) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vs2r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) -; RV64-NEXT: vl8r.v v16, (a2) -; RV64-NEXT: vl8r.v v8, (a0) +; RV64-NEXT: vl8re16.v v16, (a2) +; RV64-NEXT: vl8re16.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; -; ZVBB-RV32-LABEL: vector_interleave_nxv80i8_nxv16i8: +; ZVBB-RV32-LABEL: vector_interleave_nxv40bf16_nxv8bf16: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill @@ -1594,7 +5697,7 @@ define @vector_interleave_nxv80i8_nxv16i8( ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 -; ZVBB-RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v16 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v18, v12 @@ -1615,41 +5718,41 @@ define @vector_interleave_nxv80i8_nxv16i8( ; ZVBB-RV32-NEXT: vmv1r.v v25, v14 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v18, v11 -; ZVBB-RV32-NEXT: vsseg5e8.v v22, (a0) +; ZVBB-RV32-NEXT: vsseg5e16.v v22, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v20, v15 -; ZVBB-RV32-NEXT: vsseg5e8.v v17, (a1) -; ZVBB-RV32-NEXT: vl1r.v v16, (a6) +; ZVBB-RV32-NEXT: vsseg5e16.v v17, (a1) +; ZVBB-RV32-NEXT: vl1re16.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 -; ZVBB-RV32-NEXT: vl1r.v v17, (a6) +; ZVBB-RV32-NEXT: vl1re16.v v17, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 -; ZVBB-RV32-NEXT: vl1r.v v10, (a6) +; ZVBB-RV32-NEXT: vl1re16.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 -; ZVBB-RV32-NEXT: vl1r.v v11, (a6) -; ZVBB-RV32-NEXT: vl1r.v v8, (a0) -; ZVBB-RV32-NEXT: vl1r.v v9, (a3) -; ZVBB-RV32-NEXT: vl1r.v v14, (a4) +; ZVBB-RV32-NEXT: vl1re16.v v11, (a6) +; ZVBB-RV32-NEXT: vl1re16.v v8, (a0) +; ZVBB-RV32-NEXT: vl1re16.v v9, (a3) +; ZVBB-RV32-NEXT: vl1re16.v v14, (a4) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 10 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 -; ZVBB-RV32-NEXT: vl1r.v v15, (a5) -; ZVBB-RV32-NEXT: vl1r.v v12, (a6) -; ZVBB-RV32-NEXT: vl1r.v v13, (a1) +; ZVBB-RV32-NEXT: vl1re16.v v15, (a5) +; ZVBB-RV32-NEXT: vl1re16.v v12, (a6) +; ZVBB-RV32-NEXT: vl1re16.v v13, (a1) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vs2r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) -; ZVBB-RV32-NEXT: vl8r.v v16, (a2) -; ZVBB-RV32-NEXT: vl8r.v v8, (a0) +; ZVBB-RV32-NEXT: vl8re16.v v16, (a2) +; ZVBB-RV32-NEXT: vl8re16.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; -; ZVBB-RV64-LABEL: vector_interleave_nxv80i8_nxv16i8: +; ZVBB-RV64-LABEL: vector_interleave_nxv40bf16_nxv8bf16: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill @@ -1660,7 +5763,7 @@ define @vector_interleave_nxv80i8_nxv16i8( ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 -; ZVBB-RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v16 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v18, v12 @@ -1681,41 +5784,41 @@ define @vector_interleave_nxv80i8_nxv16i8( ; ZVBB-RV64-NEXT: vmv1r.v v25, v14 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v18, v11 -; ZVBB-RV64-NEXT: vsseg5e8.v v22, (a0) +; ZVBB-RV64-NEXT: vsseg5e16.v v22, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v20, v15 -; ZVBB-RV64-NEXT: vsseg5e8.v v17, (a1) -; ZVBB-RV64-NEXT: vl1r.v v16, (a6) +; ZVBB-RV64-NEXT: vsseg5e16.v v17, (a1) +; ZVBB-RV64-NEXT: vl1re16.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 -; ZVBB-RV64-NEXT: vl1r.v v17, (a6) +; ZVBB-RV64-NEXT: vl1re16.v v17, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 -; ZVBB-RV64-NEXT: vl1r.v v10, (a6) +; ZVBB-RV64-NEXT: vl1re16.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 -; ZVBB-RV64-NEXT: vl1r.v v11, (a6) -; ZVBB-RV64-NEXT: vl1r.v v8, (a0) -; ZVBB-RV64-NEXT: vl1r.v v9, (a3) -; ZVBB-RV64-NEXT: vl1r.v v14, (a4) +; ZVBB-RV64-NEXT: vl1re16.v v11, (a6) +; ZVBB-RV64-NEXT: vl1re16.v v8, (a0) +; ZVBB-RV64-NEXT: vl1re16.v v9, (a3) +; ZVBB-RV64-NEXT: vl1re16.v v14, (a4) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 10 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 -; ZVBB-RV64-NEXT: vl1r.v v15, (a5) -; ZVBB-RV64-NEXT: vl1r.v v12, (a6) -; ZVBB-RV64-NEXT: vl1r.v v13, (a1) +; ZVBB-RV64-NEXT: vl1re16.v v15, (a5) +; ZVBB-RV64-NEXT: vl1re16.v v12, (a6) +; ZVBB-RV64-NEXT: vl1re16.v v13, (a1) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vs2r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) -; ZVBB-RV64-NEXT: vl8r.v v16, (a2) -; ZVBB-RV64-NEXT: vl8r.v v8, (a0) +; ZVBB-RV64-NEXT: vl8re16.v v16, (a2) +; ZVBB-RV64-NEXT: vl8re16.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; -; ZIP-LABEL: vector_interleave_nxv80i8_nxv16i8: +; ZIP-LABEL: vector_interleave_nxv40bf16_nxv8bf16: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill @@ -1726,7 +5829,7 @@ define @vector_interleave_nxv80i8_nxv16i8( ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 -; ZIP-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v16 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v18, v12 @@ -1747,46 +5850,121 @@ define @vector_interleave_nxv80i8_nxv16i8( ; ZIP-NEXT: vmv1r.v v25, v14 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v18, v11 -; ZIP-NEXT: vsseg5e8.v v22, (a0) +; ZIP-NEXT: vsseg5e16.v v22, (a0) ; ZIP-NEXT: vmv1r.v v20, v15 -; ZIP-NEXT: vsseg5e8.v v17, (a1) -; ZIP-NEXT: vl1r.v v16, (a6) +; ZIP-NEXT: vsseg5e16.v v17, (a1) +; ZIP-NEXT: vl1re16.v v16, (a6) ; ZIP-NEXT: add a6, a6, a2 -; ZIP-NEXT: vl1r.v v17, (a6) +; ZIP-NEXT: vl1re16.v v17, (a6) ; ZIP-NEXT: add a6, a3, a2 -; ZIP-NEXT: vl1r.v v10, (a6) +; ZIP-NEXT: vl1re16.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 -; ZIP-NEXT: vl1r.v v11, (a6) -; ZIP-NEXT: vl1r.v v8, (a0) -; ZIP-NEXT: vl1r.v v9, (a3) -; ZIP-NEXT: vl1r.v v14, (a4) +; ZIP-NEXT: vl1re16.v v11, (a6) +; ZIP-NEXT: vl1re16.v v8, (a0) +; ZIP-NEXT: vl1re16.v v9, (a3) +; ZIP-NEXT: vl1re16.v v14, (a4) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 10 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 -; ZIP-NEXT: vl1r.v v15, (a5) -; ZIP-NEXT: vl1r.v v12, (a6) -; ZIP-NEXT: vl1r.v v13, (a1) +; ZIP-NEXT: vl1re16.v v15, (a5) +; ZIP-NEXT: vl1re16.v v12, (a6) +; ZIP-NEXT: vl1re16.v v13, (a1) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vs2r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) -; ZIP-NEXT: vl8r.v v16, (a2) -; ZIP-NEXT: vl8r.v v8, (a0) +; ZIP-NEXT: vl8re16.v v16, (a2) +; ZIP-NEXT: vl8re16.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret - %res = call @llvm.vector.interleave5.nxv80i8( %a, %b, %c, %d, %e) - ret %res + %res = call @llvm.vector.interleave5.nxv40bf16( %v0, %v1, %v2, %v3, %v4) + ret %res } +define @vector_interleave_nxv5f32_nxv1f32( %v0, %v1, %v2, %v3, %v4) nounwind { +; CHECK-LABEL: vector_interleave_nxv5f32_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a2, a1, 1 +; CHECK-NEXT: add a3, a0, a2 +; CHECK-NEXT: add a4, a3, a2 +; CHECK-NEXT: vsetvli a5, zero, e32, mf2, ta, ma +; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: add a5, a4, a2 +; CHECK-NEXT: vle32.v v8, (a5) +; CHECK-NEXT: vle32.v v9, (a4) +; CHECK-NEXT: srli a1, a1, 3 +; CHECK-NEXT: add a4, a1, a1 +; CHECK-NEXT: vle32.v v10, (a3) +; CHECK-NEXT: vsetvli zero, a4, e32, m1, ta, ma +; CHECK-NEXT: vslideup.vx v9, v8, a1 +; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a4, e32, m1, ta, ma +; CHECK-NEXT: vslideup.vx v8, v10, a1 +; CHECK-NEXT: add a2, a5, a2 +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vle32.v v10, (a2) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 1 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv5f32_nxv1f32: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: srli a2, a1, 1 +; ZVBB-NEXT: add a3, a0, a2 +; ZVBB-NEXT: add a4, a3, a2 +; ZVBB-NEXT: vsetvli a5, zero, e32, mf2, ta, ma +; ZVBB-NEXT: vsseg5e32.v v8, (a0) +; ZVBB-NEXT: add a5, a4, a2 +; ZVBB-NEXT: vle32.v v8, (a5) +; ZVBB-NEXT: vle32.v v9, (a4) +; ZVBB-NEXT: srli a1, a1, 3 +; ZVBB-NEXT: add a4, a1, a1 +; ZVBB-NEXT: vle32.v v10, (a3) +; ZVBB-NEXT: vsetvli zero, a4, e32, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v9, v8, a1 +; ZVBB-NEXT: vsetvli a3, zero, e32, mf2, ta, ma +; ZVBB-NEXT: vle32.v v8, (a0) +; ZVBB-NEXT: vsetvli zero, a4, e32, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v8, v10, a1 +; ZVBB-NEXT: add a2, a5, a2 +; ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; ZVBB-NEXT: vle32.v v10, (a2) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 1 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave5.nxv5f32( %v0, %v1, %v2, %v3, %v4) + ret %res +} -define @vector_interleave_nxv40i8_nxv8i8( %a, %b, %c, %d, %e) nounwind { -; CHECK-LABEL: vector_interleave_nxv40i8_nxv8i8: +define @vector_interleave_nxv10f32_nxv2f32( %v0, %v1, %v2, %v3, %v4) nounwind { +; CHECK-LABEL: vector_interleave_nxv10f32_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb @@ -1797,15 +5975,15 @@ define @vector_interleave_nxv40i8_nxv8i8( %a ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0) -; CHECK-NEXT: vl1r.v v10, (a3) +; CHECK-NEXT: vsetvli a4, zero, e32, m1, ta, ma +; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: vl1re32.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 -; CHECK-NEXT: vl1r.v v11, (a3) -; CHECK-NEXT: vl1r.v v8, (a0) -; CHECK-NEXT: vl1r.v v9, (a2) +; CHECK-NEXT: vl1re32.v v11, (a3) +; CHECK-NEXT: vl1re32.v v8, (a0) +; CHECK-NEXT: vl1re32.v v9, (a2) ; CHECK-NEXT: add a1, a3, a1 -; CHECK-NEXT: vl1r.v v12, (a1) +; CHECK-NEXT: vl1re32.v v12, (a1) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 2 ; CHECK-NEXT: add a0, a1, a0 @@ -1813,7 +5991,7 @@ define @vector_interleave_nxv40i8_nxv8i8( %a ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; -; ZVBB-LABEL: vector_interleave_nxv40i8_nxv8i8: +; ZVBB-LABEL: vector_interleave_nxv10f32_nxv2f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb @@ -1824,29 +6002,27 @@ define @vector_interleave_nxv40i8_nxv8i8( %a ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 -; ZVBB-NEXT: vsetvli a4, zero, e8, m1, ta, ma -; ZVBB-NEXT: vsseg5e8.v v8, (a0) -; ZVBB-NEXT: vl1r.v v10, (a3) +; ZVBB-NEXT: vsetvli a4, zero, e32, m1, ta, ma +; ZVBB-NEXT: vsseg5e32.v v8, (a0) +; ZVBB-NEXT: vl1re32.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 -; ZVBB-NEXT: vl1r.v v11, (a3) -; ZVBB-NEXT: vl1r.v v8, (a0) -; ZVBB-NEXT: vl1r.v v9, (a2) +; ZVBB-NEXT: vl1re32.v v11, (a3) +; ZVBB-NEXT: vl1re32.v v8, (a0) +; ZVBB-NEXT: vl1re32.v v9, (a2) ; ZVBB-NEXT: add a1, a3, a1 -; ZVBB-NEXT: vl1r.v v12, (a1) +; ZVBB-NEXT: vl1re32.v v12, (a1) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 2 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret - %res = call @llvm.vector.interleave5.nxv40i8( %a, %b, %c, %d, %e) - ret %res + %res = call @llvm.vector.interleave5.nxv10f32( %v0, %v1, %v2, %v3, %v4) + ret %res } - -define @vector_interleave_nxv20i32_nxv4i32( %a, %b, %c, %d, %e) nounwind { -; -; RV32-LABEL: vector_interleave_nxv20i32_nxv4i32: +define @vector_interleave_nxv20f32_nxv4f32( %v0, %v1, %v2, %v3, %v4) nounwind { +; RV32-LABEL: vector_interleave_nxv20f32_nxv4f32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill @@ -1912,7 +6088,7 @@ define @vector_interleave_nxv20i32_nxv4i32( @vector_interleave_nxv20i32_nxv4i32( @vector_interleave_nxv20i32_nxv4i32( @vector_interleave_nxv20i32_nxv4i32( @vector_interleave_nxv20i32_nxv4i32( @llvm.vector.interleave5.nxv20i32( %a, %b, %c, %d, %e) - ret %res + %res = call @llvm.vector.interleave5.nxv20f32( %v0, %v1, %v2, %v3, %v4) + ret %res } - -define @vector_interleave_nxv10i64_nxv2i64( %a, %b, %c, %d, %e) nounwind { +define @vector_interleave_nxv5f64_nxv1f64( %v0, %v1, %v2, %v3, %v4) nounwind { +; CHECK-LABEL: vector_interleave_nxv5f64_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 2 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: add a2, a0, a1 +; CHECK-NEXT: add a3, a2, a1 +; CHECK-NEXT: vsetvli a4, zero, e64, m1, ta, ma +; CHECK-NEXT: vsseg5e64.v v8, (a0) +; CHECK-NEXT: vl1re64.v v10, (a3) +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: vl1re64.v v11, (a3) +; CHECK-NEXT: vl1re64.v v8, (a0) +; CHECK-NEXT: vl1re64.v v9, (a2) +; CHECK-NEXT: add a1, a3, a1 +; CHECK-NEXT: vl1re64.v v12, (a1) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 2 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret ; -; RV32-LABEL: vector_interleave_nxv10i64_nxv2i64: +; ZVBB-LABEL: vector_interleave_nxv5f64_nxv1f64: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 2 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: add a2, a0, a1 +; ZVBB-NEXT: add a3, a2, a1 +; ZVBB-NEXT: vsetvli a4, zero, e64, m1, ta, ma +; ZVBB-NEXT: vsseg5e64.v v8, (a0) +; ZVBB-NEXT: vl1re64.v v10, (a3) +; ZVBB-NEXT: add a3, a3, a1 +; ZVBB-NEXT: vl1re64.v v11, (a3) +; ZVBB-NEXT: vl1re64.v v8, (a0) +; ZVBB-NEXT: vl1re64.v v9, (a2) +; ZVBB-NEXT: add a1, a3, a1 +; ZVBB-NEXT: vl1re64.v v12, (a1) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 2 +; ZVBB-NEXT: add a0, a1, a0 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave5.nxv5f64( %v0, %v1, %v2, %v3, %v4) + ret %res +} + +define @vector_interleave_nxv10f64_nxv2f64( %v0, %v1, %v2, %v3, %v4) nounwind { +; RV32-LABEL: vector_interleave_nxv10f64_nxv2f64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill @@ -2248,7 +6480,7 @@ define @vector_interleave_nxv10i64_nxv2i64( @vector_interleave_nxv10i64_nxv2i64( @vector_interleave_nxv10i64_nxv2i64( @vector_interleave_nxv10i64_nxv2i64( @vector_interleave_nxv10i64_nxv2i64( @llvm.vector.interleave5.nxv10i64( %a, %b, %c, %d, %e) - ret %res + %res = call @llvm.vector.interleave5.nxv10f64( %v0, %v1, %v2, %v3, %v4) + ret %res } -define @vector_interleave_nxv112i1_nxv16i1( %a, %b, %c, %d, %e, %f, %g) nounwind { -; CHECK-LABEL: vector_interleave_nxv112i1_nxv16i1: +define @vector_interleave_nxv14f16_nxv2f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { +; CHECK-LABEL: vector_interleave_nxv14f16_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 14 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmv.v.i v14, 0 -; CHECK-NEXT: addi a4, sp, 16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a1, a0, 3 -; CHECK-NEXT: sub a0, a1, a0 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: vmerge.vim v16, v14, 1, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmerge.vim v22, v14, 1, v0 -; CHECK-NEXT: add a3, a4, a2 -; CHECK-NEXT: srli a1, a2, 2 -; CHECK-NEXT: add a5, a0, a2 -; CHECK-NEXT: vmv4r.v v24, v16 -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmerge.vim v18, v14, 1, v0 -; CHECK-NEXT: add a6, a3, a2 -; CHECK-NEXT: vmv1r.v v25, v22 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vmerge.vim v8, v14, 1, v0 -; CHECK-NEXT: vmv1r.v v26, v18 -; CHECK-NEXT: vmv1r.v v0, v11 -; CHECK-NEXT: vmerge.vim v20, v14, 1, v0 -; CHECK-NEXT: vmv1r.v v27, v8 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vmerge.vim v10, v14, 1, v0 -; CHECK-NEXT: vmv1r.v v28, v20 -; CHECK-NEXT: vmv1r.v v18, v23 -; CHECK-NEXT: add a7, a6, a2 -; CHECK-NEXT: vmv1r.v v29, v10 -; CHECK-NEXT: vmv1r.v v20, v9 -; CHECK-NEXT: vmv1r.v v0, v13 -; CHECK-NEXT: vmerge.vim v30, v14, 1, v0 -; CHECK-NEXT: vmv1r.v v22, v11 -; CHECK-NEXT: vsetvli t0, zero, e8, m1, ta, ma -; CHECK-NEXT: vsseg7e8.v v24, (a4) -; CHECK-NEXT: vmv1r.v v23, v31 -; CHECK-NEXT: vsseg7e8.v v17, (a0) -; CHECK-NEXT: vl1r.v v8, (a6) -; CHECK-NEXT: add a6, a7, a2 -; CHECK-NEXT: vl1r.v v10, (a4) -; CHECK-NEXT: add a4, a6, a2 -; CHECK-NEXT: vl1r.v v12, (a6) -; CHECK-NEXT: add a6, a4, a2 -; CHECK-NEXT: vl1r.v v14, (a6) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a2, a1, 1 +; CHECK-NEXT: srli a1, a1, 2 +; CHECK-NEXT: add a3, a0, a2 +; CHECK-NEXT: add a4, a3, a2 +; CHECK-NEXT: add a5, a4, a2 ; CHECK-NEXT: add a6, a5, a2 -; CHECK-NEXT: vl1r.v v16, (a5) -; CHECK-NEXT: add a5, a6, a2 -; CHECK-NEXT: vl1r.v v18, (a5) -; CHECK-NEXT: add a5, a5, a2 -; CHECK-NEXT: vl1r.v v9, (a7) -; CHECK-NEXT: add a7, a5, a2 -; CHECK-NEXT: vl1r.v v20, (a7) -; CHECK-NEXT: add a7, a7, a2 -; CHECK-NEXT: srli a2, a2, 1 -; CHECK-NEXT: vl1r.v v11, (a3) -; CHECK-NEXT: add a3, a1, a1 -; CHECK-NEXT: vl1r.v v13, (a4) -; CHECK-NEXT: add a4, a2, a2 -; CHECK-NEXT: vl1r.v v15, (a0) -; CHECK-NEXT: vl1r.v v19, (a5) -; CHECK-NEXT: vl1r.v v17, (a6) -; CHECK-NEXT: vl1r.v v21, (a7) -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vmsne.vi v22, v8, 0 -; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vmsne.vi v9, v12, 0 -; CHECK-NEXT: vmsne.vi v10, v14, 0 -; CHECK-NEXT: vmsne.vi v11, v18, 0 -; CHECK-NEXT: vmsne.vi v8, v16, 0 -; CHECK-NEXT: vmsne.vi v12, v20, 0 -; CHECK-NEXT: vsetvli zero, a3, e8, mf2, ta, ma -; CHECK-NEXT: vslideup.vx v0, v22, a1 -; CHECK-NEXT: vslideup.vx v9, v10, a1 -; CHECK-NEXT: vslideup.vx v8, v11, a1 -; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, ma -; CHECK-NEXT: vslideup.vx v0, v9, a2 -; CHECK-NEXT: vslideup.vx v8, v12, a2 +; CHECK-NEXT: vsetvli a7, zero, e16, mf2, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: add a7, a6, a2 +; CHECK-NEXT: vle16.v v8, (a7) +; CHECK-NEXT: vle16.v v10, (a6) +; CHECK-NEXT: add a6, a1, a1 +; CHECK-NEXT: add a2, a7, a2 +; CHECK-NEXT: vle16.v v12, (a5) +; CHECK-NEXT: vsetvli zero, a6, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v10, v8, a1 +; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v11, (a2) +; CHECK-NEXT: vle16.v v9, (a4) +; CHECK-NEXT: vsetvli zero, a6, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v9, v12, a1 +; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v12, (a3) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a6, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v8, v12, a1 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv14f16_nxv2f16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a0, a0, 2 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: srli a2, a1, 1 +; ZVBB-NEXT: srli a1, a1, 2 +; ZVBB-NEXT: add a3, a0, a2 +; ZVBB-NEXT: add a4, a3, a2 +; ZVBB-NEXT: add a5, a4, a2 +; ZVBB-NEXT: add a6, a5, a2 +; ZVBB-NEXT: vsetvli a7, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vsseg7e16.v v8, (a0) +; ZVBB-NEXT: add a7, a6, a2 +; ZVBB-NEXT: vle16.v v8, (a7) +; ZVBB-NEXT: vle16.v v10, (a6) +; ZVBB-NEXT: add a6, a1, a1 +; ZVBB-NEXT: add a2, a7, a2 +; ZVBB-NEXT: vle16.v v12, (a5) +; ZVBB-NEXT: vsetvli zero, a6, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v10, v8, a1 +; ZVBB-NEXT: vsetvli a5, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vle16.v v11, (a2) +; ZVBB-NEXT: vle16.v v9, (a4) +; ZVBB-NEXT: vsetvli zero, a6, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v9, v12, a1 +; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vle16.v v12, (a3) +; ZVBB-NEXT: vle16.v v8, (a0) +; ZVBB-NEXT: vsetvli zero, a6, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v8, v12, a1 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a0, a0, 2 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave7.nxv14f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) + ret %res +} + +define @vector_interleave_nxv28f16_nxv4f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { +; CHECK-LABEL: vector_interleave_nxv28f16_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 14 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a1, a0, 3 +; CHECK-NEXT: sub a0, a1, a0 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: add a2, a0, a1 +; CHECK-NEXT: add a3, a2, a1 +; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: vl1re16.v v10, (a3) +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: vl1re16.v v11, (a3) +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: vl1re16.v v8, (a0) +; CHECK-NEXT: add a0, a3, a1 +; CHECK-NEXT: vl1re16.v v9, (a2) +; CHECK-NEXT: vl1re16.v v12, (a3) +; CHECK-NEXT: vl1re16.v v13, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl1re16.v v14, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 3 +; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; -; ZVBB-LABEL: vector_interleave_nxv112i1_nxv16i1: +; ZVBB-LABEL: vector_interleave_nxv28f16_nxv4f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb -; ZVBB-NEXT: li a1, 14 -; ZVBB-NEXT: mul a0, a0, a1 +; ZVBB-NEXT: slli a1, a0, 3 +; ZVBB-NEXT: sub a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 -; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; ZVBB-NEXT: vmv.v.i v14, 0 -; ZVBB-NEXT: addi a4, sp, 16 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: add a2, a0, a1 +; ZVBB-NEXT: add a3, a2, a1 +; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma +; ZVBB-NEXT: vsseg7e16.v v8, (a0) +; ZVBB-NEXT: vl1re16.v v10, (a3) +; ZVBB-NEXT: add a3, a3, a1 +; ZVBB-NEXT: vl1re16.v v11, (a3) +; ZVBB-NEXT: add a3, a3, a1 +; ZVBB-NEXT: vl1re16.v v8, (a0) +; ZVBB-NEXT: add a0, a3, a1 +; ZVBB-NEXT: vl1re16.v v9, (a2) +; ZVBB-NEXT: vl1re16.v v12, (a3) +; ZVBB-NEXT: vl1re16.v v13, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl1re16.v v14, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 3 ; ZVBB-NEXT: sub a0, a1, a0 -; ZVBB-NEXT: add a0, sp, a0 -; ZVBB-NEXT: addi a0, a0, 16 -; ZVBB-NEXT: csrr a2, vlenb -; ZVBB-NEXT: vmerge.vim v16, v14, 1, v0 -; ZVBB-NEXT: vmv1r.v v0, v8 -; ZVBB-NEXT: vmerge.vim v22, v14, 1, v0 -; ZVBB-NEXT: add a3, a4, a2 -; ZVBB-NEXT: srli a1, a2, 2 -; ZVBB-NEXT: add a5, a0, a2 -; ZVBB-NEXT: vmv4r.v v24, v16 -; ZVBB-NEXT: vmv1r.v v0, v9 -; ZVBB-NEXT: vmerge.vim v18, v14, 1, v0 -; ZVBB-NEXT: add a6, a3, a2 -; ZVBB-NEXT: vmv1r.v v25, v22 -; ZVBB-NEXT: vmv1r.v v0, v10 -; ZVBB-NEXT: vmerge.vim v8, v14, 1, v0 -; ZVBB-NEXT: vmv1r.v v26, v18 -; ZVBB-NEXT: vmv1r.v v0, v11 -; ZVBB-NEXT: vmerge.vim v20, v14, 1, v0 -; ZVBB-NEXT: vmv1r.v v27, v8 -; ZVBB-NEXT: vmv1r.v v0, v12 -; ZVBB-NEXT: vmerge.vim v10, v14, 1, v0 -; ZVBB-NEXT: vmv1r.v v28, v20 -; ZVBB-NEXT: vmv1r.v v18, v23 -; ZVBB-NEXT: add a7, a6, a2 -; ZVBB-NEXT: vmv1r.v v29, v10 -; ZVBB-NEXT: vmv1r.v v20, v9 -; ZVBB-NEXT: vmv1r.v v0, v13 -; ZVBB-NEXT: vmerge.vim v30, v14, 1, v0 -; ZVBB-NEXT: vmv1r.v v22, v11 -; ZVBB-NEXT: vsetvli t0, zero, e8, m1, ta, ma -; ZVBB-NEXT: vsseg7e8.v v24, (a4) -; ZVBB-NEXT: vmv1r.v v23, v31 -; ZVBB-NEXT: vsseg7e8.v v17, (a0) -; ZVBB-NEXT: vl1r.v v8, (a6) -; ZVBB-NEXT: add a6, a7, a2 -; ZVBB-NEXT: vl1r.v v10, (a4) -; ZVBB-NEXT: add a4, a6, a2 -; ZVBB-NEXT: vl1r.v v12, (a6) -; ZVBB-NEXT: add a6, a4, a2 -; ZVBB-NEXT: vl1r.v v14, (a6) -; ZVBB-NEXT: add a6, a5, a2 -; ZVBB-NEXT: vl1r.v v16, (a5) -; ZVBB-NEXT: add a5, a6, a2 -; ZVBB-NEXT: vl1r.v v18, (a5) -; ZVBB-NEXT: add a5, a5, a2 -; ZVBB-NEXT: vl1r.v v9, (a7) -; ZVBB-NEXT: add a7, a5, a2 -; ZVBB-NEXT: vl1r.v v20, (a7) -; ZVBB-NEXT: add a7, a7, a2 -; ZVBB-NEXT: srli a2, a2, 1 -; ZVBB-NEXT: vl1r.v v11, (a3) -; ZVBB-NEXT: add a3, a1, a1 -; ZVBB-NEXT: vl1r.v v13, (a4) -; ZVBB-NEXT: add a4, a2, a2 -; ZVBB-NEXT: vl1r.v v15, (a0) -; ZVBB-NEXT: vl1r.v v19, (a5) -; ZVBB-NEXT: vl1r.v v17, (a6) -; ZVBB-NEXT: vl1r.v v21, (a7) -; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; ZVBB-NEXT: vmsne.vi v22, v8, 0 -; ZVBB-NEXT: vmsne.vi v0, v10, 0 -; ZVBB-NEXT: vmsne.vi v9, v12, 0 -; ZVBB-NEXT: vmsne.vi v10, v14, 0 -; ZVBB-NEXT: vmsne.vi v11, v18, 0 -; ZVBB-NEXT: vmsne.vi v8, v16, 0 -; ZVBB-NEXT: vmsne.vi v12, v20, 0 -; ZVBB-NEXT: vsetvli zero, a3, e8, mf2, ta, ma -; ZVBB-NEXT: vslideup.vx v0, v22, a1 -; ZVBB-NEXT: vslideup.vx v9, v10, a1 -; ZVBB-NEXT: vslideup.vx v8, v11, a1 -; ZVBB-NEXT: vsetvli zero, a4, e8, m1, ta, ma -; ZVBB-NEXT: vslideup.vx v0, v9, a2 -; ZVBB-NEXT: vslideup.vx v8, v12, a2 -; ZVBB-NEXT: csrr a0, vlenb -; ZVBB-NEXT: li a1, 14 -; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret - %res = call @llvm.vector.interleave7.nxv112i1( %a, %b, %c, %d, %e, %f, %g) - ret %res + %res = call @llvm.vector.interleave7.nxv28f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) + ret %res } - -define @vector_interleave_nxv112i8_nxv16i8( %a, %b, %c, %d, %e, %f, %g) nounwind { -; -; RV32-LABEL: vector_interleave_nxv112i8_nxv16i8: +define @vector_interleave_nxv56f16_nxv8f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { +; RV32-LABEL: vector_interleave_nxv56f16_nxv8f16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill @@ -2720,7 +6908,7 @@ define @vector_interleave_nxv112i8_nxv16i8( @vector_interleave_nxv112i8_nxv16i8( @vector_interleave_nxv112i8_nxv16i8( @vector_interleave_nxv112i8_nxv16i8( @vector_interleave_nxv112i8_nxv16i8( @vector_interleave_nxv112i8_nxv16i8( @vector_interleave_nxv112i8_nxv16i8( @vector_interleave_nxv112i8_nxv16i8( @vector_interleave_nxv112i8_nxv16i8( @vector_interleave_nxv112i8_nxv16i8( @llvm.vector.interleave7.nxv112i8( %a, %b, %c, %d, %e, %f, %g) - ret %res + %res = call @llvm.vector.interleave7.nxv56f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) + ret %res } +define @vector_interleave_nxv14bf16_nxv2bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { +; CHECK-LABEL: vector_interleave_nxv14bf16_nxv2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a2, a1, 1 +; CHECK-NEXT: srli a1, a1, 2 +; CHECK-NEXT: add a3, a0, a2 +; CHECK-NEXT: add a4, a3, a2 +; CHECK-NEXT: add a5, a4, a2 +; CHECK-NEXT: add a6, a5, a2 +; CHECK-NEXT: vsetvli a7, zero, e16, mf2, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: add a7, a6, a2 +; CHECK-NEXT: vle16.v v8, (a7) +; CHECK-NEXT: vle16.v v10, (a6) +; CHECK-NEXT: add a6, a1, a1 +; CHECK-NEXT: add a2, a7, a2 +; CHECK-NEXT: vle16.v v12, (a5) +; CHECK-NEXT: vsetvli zero, a6, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v10, v8, a1 +; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v11, (a2) +; CHECK-NEXT: vle16.v v9, (a4) +; CHECK-NEXT: vsetvli zero, a6, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v9, v12, a1 +; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v12, (a3) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a6, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v8, v12, a1 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv14bf16_nxv2bf16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a0, a0, 2 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: srli a2, a1, 1 +; ZVBB-NEXT: srli a1, a1, 2 +; ZVBB-NEXT: add a3, a0, a2 +; ZVBB-NEXT: add a4, a3, a2 +; ZVBB-NEXT: add a5, a4, a2 +; ZVBB-NEXT: add a6, a5, a2 +; ZVBB-NEXT: vsetvli a7, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vsseg7e16.v v8, (a0) +; ZVBB-NEXT: add a7, a6, a2 +; ZVBB-NEXT: vle16.v v8, (a7) +; ZVBB-NEXT: vle16.v v10, (a6) +; ZVBB-NEXT: add a6, a1, a1 +; ZVBB-NEXT: add a2, a7, a2 +; ZVBB-NEXT: vle16.v v12, (a5) +; ZVBB-NEXT: vsetvli zero, a6, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v10, v8, a1 +; ZVBB-NEXT: vsetvli a5, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vle16.v v11, (a2) +; ZVBB-NEXT: vle16.v v9, (a4) +; ZVBB-NEXT: vsetvli zero, a6, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v9, v12, a1 +; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vle16.v v12, (a3) +; ZVBB-NEXT: vle16.v v8, (a0) +; ZVBB-NEXT: vsetvli zero, a6, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v8, v12, a1 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a0, a0, 2 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave7.nxv14bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) + ret %res +} -define @vector_interleave_nxv56i16_nxv8i16( %a, %b, %c, %d, %e, %f, %g) nounwind { +define @vector_interleave_nxv28bf16_nxv4bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { +; CHECK-LABEL: vector_interleave_nxv28bf16_nxv4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 3 +; CHECK-NEXT: sub a0, a1, a0 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: add a2, a0, a1 +; CHECK-NEXT: add a3, a2, a1 +; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma +; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: vl1re16.v v10, (a3) +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: vl1re16.v v11, (a3) +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: vl1re16.v v8, (a0) +; CHECK-NEXT: add a0, a3, a1 +; CHECK-NEXT: vl1re16.v v9, (a2) +; CHECK-NEXT: vl1re16.v v12, (a3) +; CHECK-NEXT: vl1re16.v v13, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl1re16.v v14, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 3 +; CHECK-NEXT: sub a0, a1, a0 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret ; -; RV32-LABEL: vector_interleave_nxv56i16_nxv8i16: +; ZVBB-LABEL: vector_interleave_nxv28bf16_nxv4bf16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 3 +; ZVBB-NEXT: sub a0, a1, a0 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: add a2, a0, a1 +; ZVBB-NEXT: add a3, a2, a1 +; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma +; ZVBB-NEXT: vsseg7e16.v v8, (a0) +; ZVBB-NEXT: vl1re16.v v10, (a3) +; ZVBB-NEXT: add a3, a3, a1 +; ZVBB-NEXT: vl1re16.v v11, (a3) +; ZVBB-NEXT: add a3, a3, a1 +; ZVBB-NEXT: vl1re16.v v8, (a0) +; ZVBB-NEXT: add a0, a3, a1 +; ZVBB-NEXT: vl1re16.v v9, (a2) +; ZVBB-NEXT: vl1re16.v v12, (a3) +; ZVBB-NEXT: vl1re16.v v13, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl1re16.v v14, (a0) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 3 +; ZVBB-NEXT: sub a0, a1, a0 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave7.nxv28bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) + ret %res +} + +define @vector_interleave_nxv56bf16_nxv8bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { +; RV32-LABEL: vector_interleave_nxv56bf16_nxv8bf16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill @@ -3208,7 +7544,7 @@ define @vector_interleave_nxv56i16_nxv8i16( @vector_interleave_nxv56i16_nxv8i16( @vector_interleave_nxv56i16_nxv8i16( @vector_interleave_nxv56i16_nxv8i16( @vector_interleave_nxv56i16_nxv8i16( @llvm.vector.interleave7.nxv56i16( %a, %b, %c, %d, %e, %f, %g) - ret %res + %res = call @llvm.vector.interleave7.nxv56bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) + ret %res } +define @vector_interleave_nxv7f32_nxv1f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { +; CHECK-LABEL: vector_interleave_nxv7f32_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a2, a1, 1 +; CHECK-NEXT: srli a1, a1, 3 +; CHECK-NEXT: add a3, a0, a2 +; CHECK-NEXT: add a4, a3, a2 +; CHECK-NEXT: add a5, a4, a2 +; CHECK-NEXT: add a6, a5, a2 +; CHECK-NEXT: vsetvli a7, zero, e32, mf2, ta, ma +; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: add a7, a6, a2 +; CHECK-NEXT: vle32.v v8, (a7) +; CHECK-NEXT: vle32.v v10, (a6) +; CHECK-NEXT: add a6, a1, a1 +; CHECK-NEXT: add a2, a7, a2 +; CHECK-NEXT: vle32.v v12, (a5) +; CHECK-NEXT: vsetvli zero, a6, e32, m1, ta, ma +; CHECK-NEXT: vslideup.vx v10, v8, a1 +; CHECK-NEXT: vsetvli a5, zero, e32, mf2, ta, ma +; CHECK-NEXT: vle32.v v11, (a2) +; CHECK-NEXT: vle32.v v9, (a4) +; CHECK-NEXT: vsetvli zero, a6, e32, m1, ta, ma +; CHECK-NEXT: vslideup.vx v9, v12, a1 +; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma +; CHECK-NEXT: vle32.v v12, (a3) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli zero, a6, e32, m1, ta, ma +; CHECK-NEXT: vslideup.vx v8, v12, a1 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv7f32_nxv1f32: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a0, a0, 2 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: srli a2, a1, 1 +; ZVBB-NEXT: srli a1, a1, 3 +; ZVBB-NEXT: add a3, a0, a2 +; ZVBB-NEXT: add a4, a3, a2 +; ZVBB-NEXT: add a5, a4, a2 +; ZVBB-NEXT: add a6, a5, a2 +; ZVBB-NEXT: vsetvli a7, zero, e32, mf2, ta, ma +; ZVBB-NEXT: vsseg7e32.v v8, (a0) +; ZVBB-NEXT: add a7, a6, a2 +; ZVBB-NEXT: vle32.v v8, (a7) +; ZVBB-NEXT: vle32.v v10, (a6) +; ZVBB-NEXT: add a6, a1, a1 +; ZVBB-NEXT: add a2, a7, a2 +; ZVBB-NEXT: vle32.v v12, (a5) +; ZVBB-NEXT: vsetvli zero, a6, e32, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v10, v8, a1 +; ZVBB-NEXT: vsetvli a5, zero, e32, mf2, ta, ma +; ZVBB-NEXT: vle32.v v11, (a2) +; ZVBB-NEXT: vle32.v v9, (a4) +; ZVBB-NEXT: vsetvli zero, a6, e32, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v9, v12, a1 +; ZVBB-NEXT: vsetvli a2, zero, e32, mf2, ta, ma +; ZVBB-NEXT: vle32.v v12, (a3) +; ZVBB-NEXT: vle32.v v8, (a0) +; ZVBB-NEXT: vsetvli zero, a6, e32, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v8, v12, a1 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a0, a0, 2 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave7.nxv7f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6) + ret %res +} -define @vector_interleave_nxv28i32_nxv4i32( %a, %b, %c, %d, %e, %f, %g) nounwind { +define @vector_interleave_nxv14f32_nxv2f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { +; CHECK-LABEL: vector_interleave_nxv14f32_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 3 +; CHECK-NEXT: sub a0, a1, a0 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: add a2, a0, a1 +; CHECK-NEXT: add a3, a2, a1 +; CHECK-NEXT: vsetvli a4, zero, e32, m1, ta, ma +; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: vl1re32.v v10, (a3) +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: vl1re32.v v11, (a3) +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: vl1re32.v v8, (a0) +; CHECK-NEXT: add a0, a3, a1 +; CHECK-NEXT: vl1re32.v v9, (a2) +; CHECK-NEXT: vl1re32.v v12, (a3) +; CHECK-NEXT: vl1re32.v v13, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl1re32.v v14, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 3 +; CHECK-NEXT: sub a0, a1, a0 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret ; -; RV32-LABEL: vector_interleave_nxv28i32_nxv4i32: +; ZVBB-LABEL: vector_interleave_nxv14f32_nxv2f32: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 3 +; ZVBB-NEXT: sub a0, a1, a0 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: add a2, a0, a1 +; ZVBB-NEXT: add a3, a2, a1 +; ZVBB-NEXT: vsetvli a4, zero, e32, m1, ta, ma +; ZVBB-NEXT: vsseg7e32.v v8, (a0) +; ZVBB-NEXT: vl1re32.v v10, (a3) +; ZVBB-NEXT: add a3, a3, a1 +; ZVBB-NEXT: vl1re32.v v11, (a3) +; ZVBB-NEXT: add a3, a3, a1 +; ZVBB-NEXT: vl1re32.v v8, (a0) +; ZVBB-NEXT: add a0, a3, a1 +; ZVBB-NEXT: vl1re32.v v9, (a2) +; ZVBB-NEXT: vl1re32.v v12, (a3) +; ZVBB-NEXT: vl1re32.v v13, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl1re32.v v14, (a0) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 3 +; ZVBB-NEXT: sub a0, a1, a0 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave7.nxv14f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6) + ret %res +} + +define @vector_interleave_nxv28f32_nxv4f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { +; RV32-LABEL: vector_interleave_nxv28f32_nxv4f32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill @@ -3624,7 +8108,7 @@ define @vector_interleave_nxv28i32_nxv4i32( @vector_interleave_nxv28i32_nxv4i32( @vector_interleave_nxv28i32_nxv4i32( @vector_interleave_nxv28i32_nxv4i32( @vector_interleave_nxv28i32_nxv4i32( @llvm.vector.interleave7.nxv28i32( %a, %b, %c, %d, %e, %f, %g) - ret %res + %res = call @llvm.vector.interleave7.nxv28f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6) + ret %res } -define @vector_interleave_nxv14i64_nxv2i64( %a, %b, %c, %d, %e, %f, %g) nounwind { +define @vector_interleave_nxv7f64_nxv1f64( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { +; CHECK-LABEL: vector_interleave_nxv7f64_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 3 +; CHECK-NEXT: sub a0, a1, a0 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: add a2, a0, a1 +; CHECK-NEXT: add a3, a2, a1 +; CHECK-NEXT: vsetvli a4, zero, e64, m1, ta, ma +; CHECK-NEXT: vsseg7e64.v v8, (a0) +; CHECK-NEXT: vl1re64.v v10, (a3) +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: vl1re64.v v11, (a3) +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: vl1re64.v v8, (a0) +; CHECK-NEXT: add a0, a3, a1 +; CHECK-NEXT: vl1re64.v v9, (a2) +; CHECK-NEXT: vl1re64.v v12, (a3) +; CHECK-NEXT: vl1re64.v v13, (a0) +; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: vl1re64.v v14, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a1, a0, 3 +; CHECK-NEXT: sub a0, a1, a0 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret ; -; RV32-LABEL: vector_interleave_nxv14i64_nxv2i64: +; ZVBB-LABEL: vector_interleave_nxv7f64_nxv1f64: +; ZVBB: # %bb.0: +; ZVBB-NEXT: addi sp, sp, -16 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 3 +; ZVBB-NEXT: sub a0, a1, a0 +; ZVBB-NEXT: sub sp, sp, a0 +; ZVBB-NEXT: addi a0, sp, 16 +; ZVBB-NEXT: csrr a1, vlenb +; ZVBB-NEXT: add a2, a0, a1 +; ZVBB-NEXT: add a3, a2, a1 +; ZVBB-NEXT: vsetvli a4, zero, e64, m1, ta, ma +; ZVBB-NEXT: vsseg7e64.v v8, (a0) +; ZVBB-NEXT: vl1re64.v v10, (a3) +; ZVBB-NEXT: add a3, a3, a1 +; ZVBB-NEXT: vl1re64.v v11, (a3) +; ZVBB-NEXT: add a3, a3, a1 +; ZVBB-NEXT: vl1re64.v v8, (a0) +; ZVBB-NEXT: add a0, a3, a1 +; ZVBB-NEXT: vl1re64.v v9, (a2) +; ZVBB-NEXT: vl1re64.v v12, (a3) +; ZVBB-NEXT: vl1re64.v v13, (a0) +; ZVBB-NEXT: add a0, a0, a1 +; ZVBB-NEXT: vl1re64.v v14, (a0) +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: slli a1, a0, 3 +; ZVBB-NEXT: sub a0, a1, a0 +; ZVBB-NEXT: add sp, sp, a0 +; ZVBB-NEXT: addi sp, sp, 16 +; ZVBB-NEXT: ret + %res = call @llvm.vector.interleave7.nxv7f64( %v0, %v1, %v2, %v3, %v4, %v5, %v6) + ret %res +} + +define @vector_interleave_nxv14f64_nxv2f64( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { +; RV32-LABEL: vector_interleave_nxv14f64_nxv2f64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill @@ -4039,7 +8588,7 @@ define @vector_interleave_nxv14i64_nxv2i64( @vector_interleave_nxv14i64_nxv2i64( @vector_interleave_nxv14i64_nxv2i64( @vector_interleave_nxv14i64_nxv2i64( @vector_interleave_nxv14i64_nxv2i64( @llvm.vector.interleave7.nxv14i64( %a, %b, %c, %d, %e, %f, %g) - ret %res + %res = call @llvm.vector.interleave7.nxv14f64( %v0, %v1, %v2, %v3, %v4, %v5, %v6) + ret %res }