|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX12 |
| 3 | + |
| 4 | +; Check that the scheduler does not hoist the s_wait_event above the |
| 5 | +; interpolation calculations. |
| 6 | +define amdgpu_ps void @test_wait_event(i32 inreg %arg, float %arg1, float %arg2, <8 x i32> inreg %arg3) { |
| 7 | +; GFX12-LABEL: test_wait_event: |
| 8 | +; GFX12: ; %bb.0: ; %bb |
| 9 | +; GFX12-NEXT: s_mov_b32 s11, s8 |
| 10 | +; GFX12-NEXT: s_mov_b32 m0, s0 |
| 11 | +; GFX12-NEXT: s_mov_b32 s0, exec_lo |
| 12 | +; GFX12-NEXT: s_wqm_b32 exec_lo, exec_lo |
| 13 | +; GFX12-NEXT: ds_param_load v2, attr1.x wait_va_vdst:15 wait_vm_vsrc:1 |
| 14 | +; GFX12-NEXT: ds_param_load v3, attr1.y wait_va_vdst:15 wait_vm_vsrc:1 |
| 15 | +; GFX12-NEXT: s_mov_b32 s10, s7 |
| 16 | +; GFX12-NEXT: s_mov_b32 s9, s6 |
| 17 | +; GFX12-NEXT: s_mov_b32 s8, s5 |
| 18 | +; GFX12-NEXT: s_mov_b32 s7, s4 |
| 19 | +; GFX12-NEXT: s_mov_b32 s6, s3 |
| 20 | +; GFX12-NEXT: s_mov_b32 s5, s2 |
| 21 | +; GFX12-NEXT: s_mov_b32 s4, s1 |
| 22 | +; GFX12-NEXT: s_mov_b32 exec_lo, s0 |
| 23 | +; GFX12-NEXT: v_interp_p10_f32 v4, v2, v1, v2 wait_exp:1 |
| 24 | +; GFX12-NEXT: v_interp_p10_f32 v1, v3, v1, v3 wait_exp:0 |
| 25 | +; GFX12-NEXT: s_wait_event 0x2 |
| 26 | +; GFX12-NEXT: v_mov_b32_e32 v8, 0 |
| 27 | +; GFX12-NEXT: v_interp_p2_f32 v4, v2, v0, v4 wait_exp:7 |
| 28 | +; GFX12-NEXT: v_interp_p2_f32 v0, v3, v0, v1 wait_exp:7 |
| 29 | +; GFX12-NEXT: v_mul_f32_e32 v1, 0x44800000, v4 |
| 30 | +; GFX12-NEXT: v_mul_f32_e32 v0, 0x44800000, v0 |
| 31 | +; GFX12-NEXT: v_cvt_i32_f32_e32 v1, v1 |
| 32 | +; GFX12-NEXT: v_cvt_i32_f32_e32 v0, v0 |
| 33 | +; GFX12-NEXT: image_load v[4:7], [v1, v0], s[4:11] dmask:0xf dim:SQ_RSRC_IMG_2D |
| 34 | +; GFX12-NEXT: s_wait_loadcnt 0x0 |
| 35 | +; GFX12-NEXT: v_dual_mul_f32 v7, 0.5, v7 :: v_dual_mul_f32 v6, 0.5, v6 |
| 36 | +; GFX12-NEXT: v_dual_mul_f32 v5, 0.5, v5 :: v_dual_mul_f32 v4, 0.5, v4 |
| 37 | +; GFX12-NEXT: image_store v[4:7], [v1, v0], s[4:11] dmask:0xf dim:SQ_RSRC_IMG_2D |
| 38 | +; GFX12-NEXT: s_wait_storecnt 0x0 |
| 39 | +; GFX12-NEXT: export mrt0 v8, v8, v8, v8 done |
| 40 | +; GFX12-NEXT: s_endpgm |
| 41 | +bb: |
| 42 | + %i = call float @llvm.amdgcn.lds.param.load(i32 0, i32 1, i32 %arg) |
| 43 | + %i4 = call float @llvm.amdgcn.interp.inreg.p10(float %i, float %arg2, float %i) |
| 44 | + %i5 = call float @llvm.amdgcn.interp.inreg.p2(float %i, float %arg1, float %i4) |
| 45 | + %i6 = call float @llvm.amdgcn.lds.param.load(i32 1, i32 1, i32 %arg) |
| 46 | + %i7 = call float @llvm.amdgcn.interp.inreg.p10(float %i6, float %arg2, float %i6) |
| 47 | + %i8 = call float @llvm.amdgcn.interp.inreg.p2(float %i6, float %arg1, float %i7) |
| 48 | + %i9 = fmul float %i5, 1024.0 |
| 49 | + %i10 = fmul float %i8, 1024.0 |
| 50 | + %i11 = fptosi float %i9 to i32 |
| 51 | + %i12 = fptosi float %i10 to i32 |
| 52 | + call void @llvm.amdgcn.s.wait.event.export.ready() |
| 53 | + %i13 = call <4 x float> @llvm.amdgcn.image.load.2d.v4f32.i32.v8i32(i32 15, i32 %i11, i32 %i12, <8 x i32> %arg3, i32 0, i32 0) |
| 54 | + %i14 = fmul <4 x float> %i13, splat (float 0.5) |
| 55 | + call void @llvm.amdgcn.image.store.2d.v4f32.i32.v8i32(<4 x float> %i14, i32 15, i32 %i11, i32 %i12, <8 x i32> %arg3, i32 0, i32 0) |
| 56 | + fence syncscope("agent") release |
| 57 | + call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float 0.0, float 0.0, float 0.0, float 0.0, i1 true, i1 true) |
| 58 | + ret void |
| 59 | +} |
0 commit comments