|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -mattr=+real-true16 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX12-TRUE16 %s |
| 3 | +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -mattr=-real-true16 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX12-FAKE16 %s |
| 4 | + |
| 5 | +; FIXME: t16 doesn't work at the moment because the store of s16 under t16 mode fails to select. |
| 6 | + |
| 7 | +declare bfloat @llvm.sqrt.bf16(bfloat %a) |
| 8 | +declare <2 x bfloat> @llvm.sqrt.v2bf16(<2 x bfloat> %a) |
| 9 | + |
| 10 | +define amdgpu_kernel void @sqrt_bf16(ptr addrspace(1) %r, ptr addrspace(1) %a) { |
| 11 | +; GFX12-TRUE16-LABEL: sqrt_bf16: |
| 12 | +; GFX12-TRUE16: ; %bb.0: ; %entry |
| 13 | +; GFX12-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 |
| 14 | +; GFX12-TRUE16-NEXT: s_mov_b32 s6, -1 |
| 15 | +; GFX12-TRUE16-NEXT: s_mov_b32 s7, 0x31016000 |
| 16 | +; GFX12-TRUE16-NEXT: s_mov_b32 s10, s6 |
| 17 | +; GFX12-TRUE16-NEXT: s_mov_b32 s11, s7 |
| 18 | +; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 |
| 19 | +; GFX12-TRUE16-NEXT: s_mov_b32 s8, s2 |
| 20 | +; GFX12-TRUE16-NEXT: s_mov_b32 s9, s3 |
| 21 | +; GFX12-TRUE16-NEXT: s_mov_b32 s4, s0 |
| 22 | +; GFX12-TRUE16-NEXT: buffer_load_u16 v0, off, s[8:11], null |
| 23 | +; GFX12-TRUE16-NEXT: s_mov_b32 s5, s1 |
| 24 | +; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 |
| 25 | +; GFX12-TRUE16-NEXT: v_sqrt_bf16_e32 v0.l, v0.l |
| 26 | +; GFX12-TRUE16-NEXT: buffer_store_b16 v0, off, s[4:7], null |
| 27 | +; GFX12-TRUE16-NEXT: s_endpgm |
| 28 | +; |
| 29 | +; GFX12-FAKE16-LABEL: sqrt_bf16: |
| 30 | +; GFX12-FAKE16: ; %bb.0: ; %entry |
| 31 | +; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 |
| 32 | +; GFX12-FAKE16-NEXT: s_mov_b32 s6, -1 |
| 33 | +; GFX12-FAKE16-NEXT: s_mov_b32 s7, 0x31016000 |
| 34 | +; GFX12-FAKE16-NEXT: s_mov_b32 s10, s6 |
| 35 | +; GFX12-FAKE16-NEXT: s_mov_b32 s11, s7 |
| 36 | +; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 |
| 37 | +; GFX12-FAKE16-NEXT: s_mov_b32 s8, s2 |
| 38 | +; GFX12-FAKE16-NEXT: s_mov_b32 s9, s3 |
| 39 | +; GFX12-FAKE16-NEXT: s_mov_b32 s4, s0 |
| 40 | +; GFX12-FAKE16-NEXT: buffer_load_u16 v0, off, s[8:11], null |
| 41 | +; GFX12-FAKE16-NEXT: s_mov_b32 s5, s1 |
| 42 | +; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 |
| 43 | +; GFX12-FAKE16-NEXT: v_sqrt_bf16_e32 v0, v0 |
| 44 | +; GFX12-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null |
| 45 | +; GFX12-FAKE16-NEXT: s_endpgm |
| 46 | +entry: |
| 47 | + %a.val = load bfloat, ptr addrspace(1) %a |
| 48 | + %r.val = call bfloat @llvm.sqrt.bf16(bfloat %a.val) |
| 49 | + store bfloat %r.val, ptr addrspace(1) %r |
| 50 | + ret void |
| 51 | +} |
| 52 | + |
| 53 | +define amdgpu_kernel void @sqrt_v2bf16(ptr addrspace(1) %r, ptr addrspace(1) %a) { |
| 54 | +; GFX12-TRUE16-LABEL: sqrt_v2bf16: |
| 55 | +; GFX12-TRUE16: ; %bb.0: ; %entry |
| 56 | +; GFX12-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 |
| 57 | +; GFX12-TRUE16-NEXT: s_mov_b32 s6, -1 |
| 58 | +; GFX12-TRUE16-NEXT: s_mov_b32 s7, 0x31016000 |
| 59 | +; GFX12-TRUE16-NEXT: s_mov_b32 s10, s6 |
| 60 | +; GFX12-TRUE16-NEXT: s_mov_b32 s11, s7 |
| 61 | +; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 |
| 62 | +; GFX12-TRUE16-NEXT: s_mov_b32 s8, s2 |
| 63 | +; GFX12-TRUE16-NEXT: s_mov_b32 s9, s3 |
| 64 | +; GFX12-TRUE16-NEXT: s_mov_b32 s4, s0 |
| 65 | +; GFX12-TRUE16-NEXT: buffer_load_b32 v0, off, s[8:11], null |
| 66 | +; GFX12-TRUE16-NEXT: s_mov_b32 s5, s1 |
| 67 | +; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 |
| 68 | +; GFX12-TRUE16-NEXT: v_sqrt_bf16_e32 v1.l, v0.l |
| 69 | +; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0 |
| 70 | +; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(TRANS32_DEP_2) |
| 71 | +; GFX12-TRUE16-NEXT: v_sqrt_bf16_e32 v0.l, v0.l |
| 72 | +; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 |
| 73 | +; GFX12-TRUE16-NEXT: s_delay_alu instid0(TRANS32_DEP_1) | instid1(VALU_DEP_1) |
| 74 | +; GFX12-TRUE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1 |
| 75 | +; GFX12-TRUE16-NEXT: buffer_store_b32 v0, off, s[4:7], null |
| 76 | +; GFX12-TRUE16-NEXT: s_endpgm |
| 77 | +; |
| 78 | +; GFX12-FAKE16-LABEL: sqrt_v2bf16: |
| 79 | +; GFX12-FAKE16: ; %bb.0: ; %entry |
| 80 | +; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 |
| 81 | +; GFX12-FAKE16-NEXT: s_mov_b32 s6, -1 |
| 82 | +; GFX12-FAKE16-NEXT: s_mov_b32 s7, 0x31016000 |
| 83 | +; GFX12-FAKE16-NEXT: s_mov_b32 s10, s6 |
| 84 | +; GFX12-FAKE16-NEXT: s_mov_b32 s11, s7 |
| 85 | +; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 |
| 86 | +; GFX12-FAKE16-NEXT: s_mov_b32 s8, s2 |
| 87 | +; GFX12-FAKE16-NEXT: s_mov_b32 s9, s3 |
| 88 | +; GFX12-FAKE16-NEXT: s_mov_b32 s4, s0 |
| 89 | +; GFX12-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null |
| 90 | +; GFX12-FAKE16-NEXT: s_mov_b32 s5, s1 |
| 91 | +; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 |
| 92 | +; GFX12-FAKE16-NEXT: v_sqrt_bf16_e32 v1, v0 |
| 93 | +; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0 |
| 94 | +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(TRANS32_DEP_2) |
| 95 | +; GFX12-FAKE16-NEXT: v_sqrt_bf16_e32 v0, v0 |
| 96 | +; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 |
| 97 | +; GFX12-FAKE16-NEXT: s_delay_alu instid0(TRANS32_DEP_1) | instid1(VALU_DEP_1) |
| 98 | +; GFX12-FAKE16-NEXT: v_lshl_or_b32 v0, v0, 16, v1 |
| 99 | +; GFX12-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null |
| 100 | +; GFX12-FAKE16-NEXT: s_endpgm |
| 101 | +entry: |
| 102 | + %a.val = load <2 x bfloat>, ptr addrspace(1) %a |
| 103 | + %r.val = call <2 x bfloat> @llvm.sqrt.v2bf16(<2 x bfloat> %a.val) |
| 104 | + store <2 x bfloat> %r.val, ptr addrspace(1) %r |
| 105 | + ret void |
| 106 | +} |
0 commit comments