|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 |
| 2 | +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+sve2,+f8f32mm < %s | FileCheck %s --check-prefixes=CHECK |
| 3 | + |
| 4 | +define dso_local <vscale x 4 x float> @_Z5t_varu13__SVFloat32_tu13__SVMfloat8_tS0_m(<vscale x 4 x float> %acc, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i64 noundef %fpmr) #0 { |
| 5 | +; CHECK-LABEL: _Z5t_varu13__SVFloat32_tu13__SVMfloat8_tS0_m: |
| 6 | +; CHECK: // %bb.0: // %entry |
| 7 | +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill |
| 8 | +; CHECK-NEXT: addvl sp, sp, #-3 |
| 9 | +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG |
| 10 | +; CHECK-NEXT: .cfi_offset w29, -16 |
| 11 | +; CHECK-NEXT: addvl x8, sp, #3 |
| 12 | +; CHECK-NEXT: str z1, [sp, #1, mul vl] |
| 13 | +; CHECK-NEXT: str z0, [sp, #2, mul vl] |
| 14 | +; CHECK-NEXT: str z2, [sp] |
| 15 | +; CHECK-NEXT: str x0, [x8, #8] |
| 16 | +; CHECK-NEXT: msr FPMR, x0 |
| 17 | +; CHECK-NEXT: fmmla z0.s, z1.b, z2.b |
| 18 | +; CHECK-NEXT: addvl sp, sp, #3 |
| 19 | +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload |
| 20 | +; CHECK-NEXT: ret |
| 21 | +entry: |
| 22 | + %acc.addr = alloca <vscale x 4 x float>, align 16 |
| 23 | + %a.addr = alloca <vscale x 16 x i8>, align 16 |
| 24 | + %b.addr = alloca <vscale x 16 x i8>, align 16 |
| 25 | + %fpmr.addr = alloca i64, align 8 |
| 26 | + store <vscale x 4 x float> %acc, ptr %acc.addr, align 16 |
| 27 | + store <vscale x 16 x i8> %a, ptr %a.addr, align 16 |
| 28 | + store <vscale x 16 x i8> %b, ptr %b.addr, align 16 |
| 29 | + store i64 %fpmr, ptr %fpmr.addr, align 8 |
| 30 | + %0 = load <vscale x 4 x float>, ptr %acc.addr, align 16 |
| 31 | + %1 = load <vscale x 16 x i8>, ptr %a.addr, align 16 |
| 32 | + %2 = load <vscale x 16 x i8>, ptr %b.addr, align 16 |
| 33 | + %3 = load i64, ptr %fpmr.addr, align 8 |
| 34 | + call void @llvm.aarch64.set.fpmr(i64 %3) |
| 35 | + %4 = call <vscale x 4 x float> @llvm.aarch64.sve.fmmla.mf8f32(<vscale x 4 x float> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) |
| 36 | + ret <vscale x 4 x float> %4 |
| 37 | +} |
| 38 | + |
| 39 | +declare void @llvm.aarch64.set.fpmr(i64) |
| 40 | + |
| 41 | +declare <vscale x 4 x float> @llvm.aarch64.sve.fmmla.mf8f32(<vscale x 4 x float>, <vscale x 16 x i8>, <vscale x 16 x i8>) |
0 commit comments