From 6395f6c2cdc80e08e9d6a65fe80cb601e5ebccfc Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 13 Jan 2025 13:31:13 -0800 Subject: [PATCH] [RISCV] Remove loads from fixed-vectors-extract.ll. NFC These tests cases weren't trying to test load+extract. I believe they only used loads because fixed vector arguments weren't supported when they were written or they weren't copied from the structure of other tests that pre-date fixed vector argument support. Reduces diff from #122671. --- .../RISCV/rvv/fixed-vectors-extract.ll | 425 ++++++------------ 1 file changed, 148 insertions(+), 277 deletions(-) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll index a193d4e4e689f..7e45136372b6c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll @@ -7,50 +7,42 @@ ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+f,+d,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32,RV32M ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+f,+d,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64,RV64M -define i8 @extractelt_v16i8(ptr %x) nounwind { +define i8 @extractelt_v16i8(<16 x i8> %a) nounwind { ; CHECK-LABEL: extractelt_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <16 x i8>, ptr %x %b = extractelement <16 x i8> %a, i32 7 ret i8 %b } -define i16 @extractelt_v8i16(ptr %x) nounwind { +define i16 @extractelt_v8i16(<8 x i16> %a) nounwind { ; CHECK-LABEL: extractelt_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <8 x i16>, ptr %x %b = extractelement <8 x i16> %a, i32 7 ret i16 %b } -define i32 @extractelt_v4i32(ptr %x) nounwind { +define i32 @extractelt_v4i32(<4 x i32> %a) nounwind { ; CHECK-LABEL: extractelt_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <4 x i32>, ptr %x %b = extractelement <4 x i32> %a, i32 2 ret i32 %b } -define i64 @extractelt_v2i64(ptr %x) nounwind { +define i64 @extractelt_v2i64(<2 x i64> %a) nounwind { ; RV32-LABEL: extractelt_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v9, v8, a0 @@ -60,126 +52,104 @@ define i64 @extractelt_v2i64(ptr %x) nounwind { ; ; RV64-LABEL: extractelt_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret - %a = load <2 x i64>, ptr %x %b = extractelement <2 x i64> %a, i32 0 ret i64 %b } -define bfloat @extractelt_v8bf16(ptr %x) nounwind { +define bfloat @extractelt_v8bf16(<8 x bfloat> %a) nounwind { ; CHECK-LABEL: extractelt_v8bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: fmv.h.x fa0, a0 ; CHECK-NEXT: ret - %a = load <8 x bfloat>, ptr %x %b = extractelement <8 x bfloat> %a, i32 7 ret bfloat %b } -define half @extractelt_v8f16(ptr %x) nounwind { +define half @extractelt_v8f16(<8 x half> %a) nounwind { ; ZVFH-LABEL: extractelt_v8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; ZVFH-NEXT: vle16.v v8, (a0) +; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; ZVFH-NEXT: vslidedown.vi v8, v8, 7 ; ZVFH-NEXT: vfmv.f.s fa0, v8 ; ZVFH-NEXT: ret ; ; ZVFHMIN-LABEL: extractelt_v8f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; ZVFHMIN-NEXT: vle16.v v8, (a0) +; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; ZVFHMIN-NEXT: vslidedown.vi v8, v8, 7 ; ZVFHMIN-NEXT: vmv.x.s a0, v8 ; ZVFHMIN-NEXT: fmv.h.x fa0, a0 ; ZVFHMIN-NEXT: ret - %a = load <8 x half>, ptr %x %b = extractelement <8 x half> %a, i32 7 ret half %b } -define float @extractelt_v4f32(ptr %x) nounwind { +define float @extractelt_v4f32(<4 x float> %a) nounwind { ; CHECK-LABEL: extractelt_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret - %a = load <4 x float>, ptr %x %b = extractelement <4 x float> %a, i32 2 ret float %b } -define double @extractelt_v2f64(ptr %x) nounwind { +define double @extractelt_v2f64(<2 x double> %a) nounwind { ; CHECK-LABEL: extractelt_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret - %a = load <2 x double>, ptr %x %b = extractelement <2 x double> %a, i32 0 ret double %b } -define i8 @extractelt_v32i8(ptr %x) nounwind { +define i8 @extractelt_v32i8(<32 x i8> %a) nounwind { ; CHECK-LABEL: extractelt_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <32 x i8>, ptr %x %b = extractelement <32 x i8> %a, i32 7 ret i8 %b } -define i16 @extractelt_v16i16(ptr %x) nounwind { +define i16 @extractelt_v16i16(<16 x i16> %a) nounwind { ; CHECK-LABEL: extractelt_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma -; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <16 x i16>, ptr %x %b = extractelement <16 x i16> %a, i32 7 ret i16 %b } -define i32 @extractelt_v8i32(ptr %x) nounwind { +define i32 @extractelt_v8i32(<8 x i32> %a) nounwind { ; CHECK-LABEL: extractelt_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 6 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <8 x i32>, ptr %x %b = extractelement <8 x i32> %a, i32 6 ret i32 %b } -define i64 @extractelt_v4i64(ptr %x) nounwind { +define i64 @extractelt_v4i64(<4 x i64> %a) nounwind { ; RV32-LABEL: extractelt_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: li a0, 32 -; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV32-NEXT: vslidedown.vi v8, v8, 3 +; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v10, v8, a0 ; RV32-NEXT: vmv.x.s a1, v10 ; RV32-NEXT: vmv.x.s a0, v8 @@ -187,36 +157,29 @@ define i64 @extractelt_v4i64(ptr %x) nounwind { ; ; RV64-LABEL: extractelt_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 3 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret - %a = load <4 x i64>, ptr %x %b = extractelement <4 x i64> %a, i32 3 ret i64 %b } -define bfloat @extractelt_v16bf16(ptr %x) nounwind { +define bfloat @extractelt_v16bf16(<16 x bfloat> %a) nounwind { ; CHECK-LABEL: extractelt_v16bf16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma -; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: fmv.h.x fa0, a0 ; CHECK-NEXT: ret - %a = load <16 x bfloat>, ptr %x %b = extractelement <16 x bfloat> %a, i32 7 ret bfloat %b } -define half @extractelt_v16f16(ptr %x) nounwind { +define half @extractelt_v16f16(<16 x half> %a) nounwind { ; ZVFH-LABEL: extractelt_v16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma -; ZVFH-NEXT: vle16.v v8, (a0) ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; ZVFH-NEXT: vslidedown.vi v8, v8, 7 ; ZVFH-NEXT: vfmv.f.s fa0, v8 @@ -224,40 +187,32 @@ define half @extractelt_v16f16(ptr %x) nounwind { ; ; ZVFHMIN-LABEL: extractelt_v16f16: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma -; ZVFHMIN-NEXT: vle16.v v8, (a0) ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; ZVFHMIN-NEXT: vslidedown.vi v8, v8, 7 ; ZVFHMIN-NEXT: vmv.x.s a0, v8 ; ZVFHMIN-NEXT: fmv.h.x fa0, a0 ; ZVFHMIN-NEXT: ret - %a = load <16 x half>, ptr %x %b = extractelement <16 x half> %a, i32 7 ret half %b } -define float @extractelt_v8f32(ptr %x) nounwind { +define float @extractelt_v8f32(<8 x float> %a) nounwind { ; CHECK-LABEL: extractelt_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret - %a = load <8 x float>, ptr %x %b = extractelement <8 x float> %a, i32 2 ret float %b } -define double @extractelt_v4f64(ptr %x) nounwind { +define double @extractelt_v4f64(<4 x double> %a) nounwind { ; CHECK-LABEL: extractelt_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret - %a = load <4 x double>, ptr %x %b = extractelement <4 x double> %a, i32 0 ret double %b } @@ -266,11 +221,9 @@ define double @extractelt_v4f64(ptr %x) nounwind { ; incorrect use of getSimpleValueType(). ; NOTE: Type legalization is bitcasting to vXi32 and doing 2 independent ; slidedowns and extracts. -define i64 @extractelt_v3i64(ptr %x) nounwind { +define i64 @extractelt_v3i64(<3 x i64> %a) nounwind { ; RV32-LABEL: extractelt_v3i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 3, e64, m2, ta, ma -; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32-NEXT: vslidedown.vi v10, v8, 4 ; RV32-NEXT: vslidedown.vi v8, v8, 5 @@ -280,18 +233,16 @@ define i64 @extractelt_v3i64(ptr %x) nounwind { ; ; RV64-LABEL: extractelt_v3i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 3, e64, m2, ta, ma -; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret - %a = load <3 x i64>, ptr %x %b = extractelement <3 x i64> %a, i32 2 ret i64 %b } ; A LMUL8 type -define i32 @extractelt_v32i32(ptr %x) nounwind { +define i32 @extractelt_v32i32(<32 x i32> %a) nounwind { ; RV32-LABEL: extractelt_v32i32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -256 @@ -299,11 +250,10 @@ define i32 @extractelt_v32i32(ptr %x) nounwind { ; RV32-NEXT: sw s0, 248(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 256 ; RV32-NEXT: andi sp, sp, -128 -; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: mv a0, sp -; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: li a0, 32 +; RV32-NEXT: mv a1, sp +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV32-NEXT: vse32.v v8, (a1) ; RV32-NEXT: lw a0, 124(sp) ; RV32-NEXT: addi sp, s0, -256 ; RV32-NEXT: lw ra, 252(sp) # 4-byte Folded Reload @@ -318,24 +268,22 @@ define i32 @extractelt_v32i32(ptr %x) nounwind { ; RV64-NEXT: sd s0, 240(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 256 ; RV64-NEXT: andi sp, sp, -128 -; RV64-NEXT: li a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: mv a0, sp -; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: li a0, 32 +; RV64-NEXT: mv a1, sp +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV64-NEXT: vse32.v v8, (a1) ; RV64-NEXT: lw a0, 124(sp) ; RV64-NEXT: addi sp, s0, -256 ; RV64-NEXT: ld ra, 248(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 240(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 256 ; RV64-NEXT: ret - %a = load <32 x i32>, ptr %x %b = extractelement <32 x i32> %a, i32 31 ret i32 %b } ; Exercise type legalization for type beyond LMUL8 -define i32 @extractelt_v64i32(ptr %x) nounwind { +define i32 @extractelt_v64i32(<64 x i32> %a) nounwind { ; RV32-LABEL: extractelt_v64i32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -256 @@ -343,12 +291,10 @@ define i32 @extractelt_v64i32(ptr %x) nounwind { ; RV32-NEXT: sw s0, 248(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 256 ; RV32-NEXT: andi sp, sp, -128 -; RV32-NEXT: addi a0, a0, 128 -; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: mv a0, sp -; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: li a0, 32 +; RV32-NEXT: mv a1, sp +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV32-NEXT: vse32.v v16, (a1) ; RV32-NEXT: lw a0, 124(sp) ; RV32-NEXT: addi sp, s0, -256 ; RV32-NEXT: lw ra, 252(sp) # 4-byte Folded Reload @@ -363,315 +309,275 @@ define i32 @extractelt_v64i32(ptr %x) nounwind { ; RV64-NEXT: sd s0, 240(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 256 ; RV64-NEXT: andi sp, sp, -128 -; RV64-NEXT: addi a0, a0, 128 -; RV64-NEXT: li a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: mv a0, sp -; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: li a0, 32 +; RV64-NEXT: mv a1, sp +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV64-NEXT: vse32.v v16, (a1) ; RV64-NEXT: lw a0, 124(sp) ; RV64-NEXT: addi sp, s0, -256 ; RV64-NEXT: ld ra, 248(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 240(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 256 ; RV64-NEXT: ret - %a = load <64 x i32>, ptr %x %b = extractelement <64 x i32> %a, i32 63 ret i32 %b } -define i8 @extractelt_v16i8_idx(ptr %x, i32 zeroext %idx) nounwind { +define i8 @extractelt_v16i8_idx(<16 x i8> %a, i32 zeroext %idx) nounwind { ; CHECK-LABEL: extractelt_v16i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vslidedown.vx v8, v8, a1 +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <16 x i8>, ptr %x %b = extractelement <16 x i8> %a, i32 %idx ret i8 %b } -define i16 @extractelt_v8i16_idx(ptr %x, i32 zeroext %idx) nounwind { +define i16 @extractelt_v8i16_idx(<8 x i16> %a, i32 zeroext %idx) nounwind { ; CHECK-LABEL: extractelt_v8i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vslidedown.vx v8, v8, a1 +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <8 x i16>, ptr %x %b = extractelement <8 x i16> %a, i32 %idx ret i16 %b } -define i32 @extractelt_v4i32_idx(ptr %x, i32 zeroext %idx) nounwind { +define i32 @extractelt_v4i32_idx(<4 x i32> %a, i32 zeroext %idx) nounwind { ; CHECK-LABEL: extractelt_v4i32_idx: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v8 -; CHECK-NEXT: vslidedown.vx v8, v8, a1 +; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <4 x i32>, ptr %x %b = add <4 x i32> %a, %a %c = extractelement <4 x i32> %b, i32 %idx ret i32 %c } -define i64 @extractelt_v2i64_idx(ptr %x, i32 zeroext %idx) nounwind { +define i64 @extractelt_v2i64_idx(<2 x i64> %a, i32 zeroext %idx) nounwind { ; RV32-LABEL: extractelt_v2i64_idx: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: li a2, 32 ; RV32-NEXT: vadd.vv v8, v8, v8 -; RV32-NEXT: vslidedown.vx v8, v8, a1 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vslidedown.vx v8, v8, a0 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; RV32-NEXT: vsrl.vx v8, v8, a2 +; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: extractelt_v2i64_idx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vadd.vv v8, v8, v8 -; RV64-NEXT: vslidedown.vx v8, v8, a1 +; RV64-NEXT: vslidedown.vx v8, v8, a0 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret - %a = load <2 x i64>, ptr %x %b = add <2 x i64> %a, %a %c = extractelement <2 x i64> %b, i32 %idx ret i64 %c } -define bfloat @extractelt_v8bf16_idx(ptr %x, i32 zeroext %idx) nounwind { +define bfloat @extractelt_v8bf16_idx(<8 x bfloat> %a, i32 zeroext %idx) nounwind { ; CHECK-LABEL: extractelt_v8bf16_idx: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v10, v10 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8 -; CHECK-NEXT: vslidedown.vx v8, v10, a1 +; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: fmv.h.x fa0, a0 ; CHECK-NEXT: ret - %a = load <8 x bfloat>, ptr %x %b = fadd <8 x bfloat> %a, %a %c = extractelement <8 x bfloat> %b, i32 %idx ret bfloat %c } -define half @extractelt_v8f16_idx(ptr %x, i32 zeroext %idx) nounwind { +define half @extractelt_v8f16_idx(<8 x half> %a, i32 zeroext %idx) nounwind { ; ZVFH-LABEL: extractelt_v8f16_idx: ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; ZVFH-NEXT: vle16.v v8, (a0) ; ZVFH-NEXT: vfadd.vv v8, v8, v8 -; ZVFH-NEXT: vslidedown.vx v8, v8, a1 +; ZVFH-NEXT: vslidedown.vx v8, v8, a0 ; ZVFH-NEXT: vfmv.f.s fa0, v8 ; ZVFH-NEXT: ret ; ; ZVFHMIN-LABEL: extractelt_v8f16_idx: ; ZVFHMIN: # %bb.0: ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; ZVFHMIN-NEXT: vle16.v v8, (a0) ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; ZVFHMIN-NEXT: vfadd.vv v8, v10, v10 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v8 -; ZVFHMIN-NEXT: vslidedown.vx v8, v10, a1 +; ZVFHMIN-NEXT: vslidedown.vx v8, v10, a0 ; ZVFHMIN-NEXT: vmv.x.s a0, v8 ; ZVFHMIN-NEXT: fmv.h.x fa0, a0 ; ZVFHMIN-NEXT: ret - %a = load <8 x half>, ptr %x %b = fadd <8 x half> %a, %a %c = extractelement <8 x half> %b, i32 %idx ret half %c } -define float @extractelt_v4f32_idx(ptr %x, i32 zeroext %idx) nounwind { +define float @extractelt_v4f32_idx(<4 x float> %a, i32 zeroext %idx) nounwind { ; CHECK-LABEL: extractelt_v4f32_idx: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v8 -; CHECK-NEXT: vslidedown.vx v8, v8, a1 +; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret - %a = load <4 x float>, ptr %x %b = fadd <4 x float> %a, %a %c = extractelement <4 x float> %b, i32 %idx ret float %c } -define double @extractelt_v2f64_idx(ptr %x, i32 zeroext %idx) nounwind { +define double @extractelt_v2f64_idx(<2 x double> %a, i32 zeroext %idx) nounwind { ; CHECK-LABEL: extractelt_v2f64_idx: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v8 -; CHECK-NEXT: vslidedown.vx v8, v8, a1 +; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret - %a = load <2 x double>, ptr %x %b = fadd <2 x double> %a, %a %c = extractelement <2 x double> %b, i32 %idx ret double %c } -define i8 @extractelt_v32i8_idx(ptr %x, i32 zeroext %idx) nounwind { +define i8 @extractelt_v32i8_idx(<32 x i8> %a, i32 zeroext %idx) nounwind { ; CHECK-LABEL: extractelt_v32i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma -; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vslidedown.vx v8, v8, a1 +; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma +; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <32 x i8>, ptr %x %b = extractelement <32 x i8> %a, i32 %idx ret i8 %b } -define i16 @extractelt_v16i16_idx(ptr %x, i32 zeroext %idx) nounwind { +define i16 @extractelt_v16i16_idx(<16 x i16> %a, i32 zeroext %idx) nounwind { ; CHECK-LABEL: extractelt_v16i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma -; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vslidedown.vx v8, v8, a1 +; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <16 x i16>, ptr %x %b = extractelement <16 x i16> %a, i32 %idx ret i16 %b } -define i32 @extractelt_v8i32_idx(ptr %x, i32 zeroext %idx) nounwind { +define i32 @extractelt_v8i32_idx(<8 x i32> %a, i32 zeroext %idx) nounwind { ; CHECK-LABEL: extractelt_v8i32_idx: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v8 -; CHECK-NEXT: vslidedown.vx v8, v8, a1 +; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <8 x i32>, ptr %x %b = add <8 x i32> %a, %a %c = extractelement <8 x i32> %b, i32 %idx ret i32 %c } -define i64 @extractelt_v4i64_idx(ptr %x, i32 zeroext %idx) nounwind { +define i64 @extractelt_v4i64_idx(<4 x i64> %a, i32 zeroext %idx) nounwind { ; RV32-LABEL: extractelt_v4i64_idx: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: li a2, 32 ; RV32-NEXT: vadd.vv v8, v8, v8 -; RV32-NEXT: vslidedown.vx v8, v8, a1 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vslidedown.vx v8, v8, a0 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma -; RV32-NEXT: vsrl.vx v8, v8, a2 +; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: extractelt_v4i64_idx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vadd.vv v8, v8, v8 -; RV64-NEXT: vslidedown.vx v8, v8, a1 +; RV64-NEXT: vslidedown.vx v8, v8, a0 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret - %a = load <4 x i64>, ptr %x %b = add <4 x i64> %a, %a %c = extractelement <4 x i64> %b, i32 %idx ret i64 %c } -define bfloat @extractelt_v16bf16_idx(ptr %x, i32 zeroext %idx) nounwind { +define bfloat @extractelt_v16bf16_idx(<16 x bfloat> %a, i32 zeroext %idx) nounwind { ; CHECK-LABEL: extractelt_v16bf16_idx: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma -; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v12, v12 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvtbf16.f.f.w v12, v8 -; CHECK-NEXT: vslidedown.vx v8, v12, a1 +; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: fmv.h.x fa0, a0 ; CHECK-NEXT: ret - %a = load <16 x bfloat>, ptr %x %b = fadd <16 x bfloat> %a, %a %c = extractelement <16 x bfloat> %b, i32 %idx ret bfloat %c } -define half @extractelt_v16f16_idx(ptr %x, i32 zeroext %idx) nounwind { +define half @extractelt_v16f16_idx(<16 x half> %a, i32 zeroext %idx) nounwind { ; ZVFH-LABEL: extractelt_v16f16_idx: ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma -; ZVFH-NEXT: vle16.v v8, (a0) ; ZVFH-NEXT: vfadd.vv v8, v8, v8 -; ZVFH-NEXT: vslidedown.vx v8, v8, a1 +; ZVFH-NEXT: vslidedown.vx v8, v8, a0 ; ZVFH-NEXT: vfmv.f.s fa0, v8 ; ZVFH-NEXT: ret ; ; ZVFHMIN-LABEL: extractelt_v16f16_idx: ; ZVFHMIN: # %bb.0: ; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma -; ZVFHMIN-NEXT: vle16.v v8, (a0) ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; ZVFHMIN-NEXT: vfadd.vv v8, v12, v12 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v8 -; ZVFHMIN-NEXT: vslidedown.vx v8, v12, a1 +; ZVFHMIN-NEXT: vslidedown.vx v8, v12, a0 ; ZVFHMIN-NEXT: vmv.x.s a0, v8 ; ZVFHMIN-NEXT: fmv.h.x fa0, a0 ; ZVFHMIN-NEXT: ret - %a = load <16 x half>, ptr %x %b = fadd <16 x half> %a, %a %c = extractelement <16 x half> %b, i32 %idx ret half %c } -define float @extractelt_v8f32_idx(ptr %x, i32 zeroext %idx) nounwind { +define float @extractelt_v8f32_idx(<8 x float> %a, i32 zeroext %idx) nounwind { ; CHECK-LABEL: extractelt_v8f32_idx: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v8 -; CHECK-NEXT: vslidedown.vx v8, v8, a1 +; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret - %a = load <8 x float>, ptr %x %b = fadd <8 x float> %a, %a %c = extractelement <8 x float> %b, i32 %idx ret float %c } -define double @extractelt_v4f64_idx(ptr %x, i32 zeroext %idx) nounwind { +define double @extractelt_v4f64_idx(<4 x double> %a, i32 zeroext %idx) nounwind { ; CHECK-LABEL: extractelt_v4f64_idx: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v8 -; CHECK-NEXT: vslidedown.vx v8, v8, a1 +; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret - %a = load <4 x double>, ptr %x %b = fadd <4 x double> %a, %a %c = extractelement <4 x double> %b, i32 %idx ret double %c @@ -681,32 +587,27 @@ define double @extractelt_v4f64_idx(ptr %x, i32 zeroext %idx) nounwind { ; incorrect use of getSimpleValueType_idx(, i32 zeroext %idx). ; NOTE: Type legalization is bitcasting to vXi32 and doing 2 independent ; slidedowns and extracts. -define i64 @extractelt_v3i64_idx(ptr %x, i32 zeroext %idx) nounwind { +define i64 @extractelt_v3i64_idx(<3 x i64> %a, i32 zeroext %idx) nounwind { ; RV32-LABEL: extractelt_v3i64_idx: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 3, e64, m2, ta, ma -; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: add a1, a1, a1 -; RV32-NEXT: addi a0, a1, 1 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v8 +; RV32-NEXT: add a0, a0, a0 ; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma -; RV32-NEXT: vslidedown.vx v10, v8, a1 -; RV32-NEXT: vslidedown.vx v8, v8, a0 +; RV32-NEXT: vslidedown.vx v10, v8, a0 +; RV32-NEXT: addi a1, a0, 1 ; RV32-NEXT: vmv.x.s a0, v10 +; RV32-NEXT: vslidedown.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: extractelt_v3i64_idx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 3, e64, m2, ta, ma -; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vadd.vv v8, v8, v8 -; RV64-NEXT: vslidedown.vx v8, v8, a1 +; RV64-NEXT: vslidedown.vx v8, v8, a0 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret - %a = load <3 x i64>, ptr %x %b = add <3 x i64> %a, %a %c = extractelement <3 x i64> %b, i32 %idx ret i64 %c @@ -818,7 +719,7 @@ define i32 @extractelt_v32i32_idx(ptr %x, i32 zeroext %idx) nounwind { ret i32 %c } -define i32 @extractelt_v64i32_idx(ptr %x, i32 zeroext %idx) nounwind { +define i32 @extractelt_v64i32_idx(<64 x i32> %a, i32 zeroext %idx) nounwind { ; RV32-LABEL: extractelt_v64i32_idx: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -384 @@ -826,21 +727,18 @@ define i32 @extractelt_v64i32_idx(ptr %x, i32 zeroext %idx) nounwind { ; RV32-NEXT: sw s0, 376(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 384 ; RV32-NEXT: andi sp, sp, -128 -; RV32-NEXT: andi a1, a1, 63 -; RV32-NEXT: mv a2, sp -; RV32-NEXT: li a3, 32 -; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: addi a0, a0, 128 -; RV32-NEXT: vle32.v v16, (a0) -; RV32-NEXT: addi a0, sp, 128 -; RV32-NEXT: slli a1, a1, 2 -; RV32-NEXT: add a1, a2, a1 -; RV32-NEXT: vadd.vv v16, v16, v16 +; RV32-NEXT: andi a0, a0, 63 +; RV32-NEXT: mv a1, sp +; RV32-NEXT: li a2, 32 +; RV32-NEXT: addi a3, sp, 128 +; RV32-NEXT: slli a0, a0, 2 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v8 -; RV32-NEXT: vse32.v v8, (a2) -; RV32-NEXT: vse32.v v16, (a0) -; RV32-NEXT: lw a0, 0(a1) +; RV32-NEXT: vadd.vv v16, v16, v16 +; RV32-NEXT: add a0, a1, a0 +; RV32-NEXT: vse32.v v16, (a3) +; RV32-NEXT: vse32.v v8, (a1) +; RV32-NEXT: lw a0, 0(a0) ; RV32-NEXT: addi sp, s0, -384 ; RV32-NEXT: lw ra, 380(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 376(sp) # 4-byte Folded Reload @@ -854,117 +752,96 @@ define i32 @extractelt_v64i32_idx(ptr %x, i32 zeroext %idx) nounwind { ; RV64-NEXT: sd s0, 368(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 384 ; RV64-NEXT: andi sp, sp, -128 -; RV64-NEXT: andi a1, a1, 63 -; RV64-NEXT: mv a2, sp -; RV64-NEXT: li a3, 32 -; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: addi a0, a0, 128 -; RV64-NEXT: vle32.v v16, (a0) -; RV64-NEXT: addi a0, sp, 128 -; RV64-NEXT: slli a1, a1, 2 -; RV64-NEXT: add a1, a2, a1 -; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: andi a0, a0, 63 +; RV64-NEXT: mv a1, sp +; RV64-NEXT: li a2, 32 +; RV64-NEXT: addi a3, sp, 128 +; RV64-NEXT: slli a0, a0, 2 +; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV64-NEXT: vadd.vv v8, v8, v8 -; RV64-NEXT: vse32.v v8, (a2) -; RV64-NEXT: vse32.v v16, (a0) -; RV64-NEXT: lw a0, 0(a1) +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: add a0, a1, a0 +; RV64-NEXT: vse32.v v16, (a3) +; RV64-NEXT: vse32.v v8, (a1) +; RV64-NEXT: lw a0, 0(a0) ; RV64-NEXT: addi sp, s0, -384 ; RV64-NEXT: ld ra, 376(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 368(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 384 ; RV64-NEXT: ret - %a = load <64 x i32>, ptr %x %b = add <64 x i32> %a, %a %c = extractelement <64 x i32> %b, i32 %idx ret i32 %c } -define void @store_extractelt_v16i8(ptr %x, ptr %p) nounwind { +define void @store_extractelt_v16i8(<16 x i8> %a, ptr %p) nounwind { ; CHECK-LABEL: store_extractelt_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma -; CHECK-NEXT: vse8.v v8, (a1) +; CHECK-NEXT: vslidedown.vi v8, v8, 7 +; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret - %a = load <16 x i8>, ptr %x %b = extractelement <16 x i8> %a, i32 7 store i8 %b, ptr %p ret void } -define void @store_extractelt_v8i16(ptr %x, ptr %p) nounwind { +define void @store_extractelt_v8i16(<8 x i16> %a, ptr %p) nounwind { ; CHECK-LABEL: store_extractelt_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma -; CHECK-NEXT: vse16.v v8, (a1) +; CHECK-NEXT: vslidedown.vi v8, v8, 7 +; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret - %a = load <8 x i16>, ptr %x %b = extractelement <8 x i16> %a, i32 7 store i16 %b, ptr %p ret void } -define void @store_extractelt_v4i32(ptr %x, ptr %p) nounwind { +define void @store_extractelt_v4i32(<4 x i32> %a, ptr %p) nounwind { ; CHECK-LABEL: store_extractelt_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma -; CHECK-NEXT: vse32.v v8, (a1) +; CHECK-NEXT: vslidedown.vi v8, v8, 2 +; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret - %a = load <4 x i32>, ptr %x %b = extractelement <4 x i32> %a, i32 2 store i32 %b, ptr %p ret void } ; FIXME: Use vse64.v on RV32 to avoid two scalar extracts and two scalar stores. -define void @store_extractelt_v2i64(ptr %x, ptr %p) nounwind { +define void @store_extractelt_v2i64(<2 x i64> %a, ptr %p) nounwind { ; RV32-LABEL: store_extractelt_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: li a0, 32 -; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; RV32-NEXT: vsrl.vx v9, v8, a0 -; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: vslidedown.vi v8, v8, 1 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v9, v8, a1 +; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: vmv.x.s a2, v9 -; RV32-NEXT: sw a0, 0(a1) -; RV32-NEXT: sw a2, 4(a1) +; RV32-NEXT: sw a1, 0(a0) +; RV32-NEXT: sw a2, 4(a0) ; RV32-NEXT: ret ; ; RV64-LABEL: store_extractelt_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vslidedown.vi v8, v8, 1 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; RV64-NEXT: vse64.v v8, (a1) +; RV64-NEXT: vslidedown.vi v8, v8, 1 +; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: ret - %a = load <2 x i64>, ptr %x %b = extractelement <2 x i64> %a, i64 1 store i64 %b, ptr %p ret void } -define void @store_extractelt_v2f64(ptr %x, ptr %p) nounwind { +define void @store_extractelt_v2f64(<2 x double> %a, ptr %p) nounwind { ; CHECK-LABEL: store_extractelt_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vslidedown.vi v8, v8, 1 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; CHECK-NEXT: vse64.v v8, (a1) +; CHECK-NEXT: vslidedown.vi v8, v8, 1 +; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret - %a = load <2 x double>, ptr %x %b = extractelement <2 x double> %a, i64 1 store double %b, ptr %p ret void @@ -1246,30 +1123,24 @@ define float @extractelt_fdiv_v4f32(<4 x float> %x) { ret float %ext } -define i32 @extractelt_v16i32_idx7_exact_vlen(ptr %x) nounwind vscale_range(2,2) { +define i32 @extractelt_v16i32_idx7_exact_vlen(<16 x i32> %a) nounwind vscale_range(2,2) { ; CHECK-LABEL: extractelt_v16i32_idx7_exact_vlen: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl1re32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma -; CHECK-NEXT: vslidedown.vi v8, v8, 3 +; CHECK-NEXT: vslidedown.vi v8, v9, 3 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <16 x i32>, ptr %x %b = extractelement <16 x i32> %a, i32 7 ret i32 %b } -define i32 @extractelt_v16i32_idx15_exact_vlen(ptr %x) nounwind vscale_range(2,2) { +define i32 @extractelt_v16i32_idx15_exact_vlen(<16 x i32> %a) nounwind vscale_range(2,2) { ; CHECK-LABEL: extractelt_v16i32_idx15_exact_vlen: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, a0, 48 -; CHECK-NEXT: vl1re32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma -; CHECK-NEXT: vslidedown.vi v8, v8, 3 +; CHECK-NEXT: vslidedown.vi v8, v11, 3 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret - %a = load <16 x i32>, ptr %x %b = extractelement <16 x i32> %a, i32 15 ret i32 %b }