Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 73 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
//===--- AMDGPUBarrierLatency.cpp - AMDGPU Barrier Latency ----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file contains a DAG scheduling mutation to add latency to
/// barrier edges between ATOMIC_FENCE instructions and preceding
/// memory accesses potentially affected by the fence.
/// This encourages the scheduling of more instructions before
/// ATOMIC_FENCE instructions. ATOMIC_FENCE instructions may
/// introduce wait counting or indicate an impending S_BARRIER
/// wait. Having more instructions in-flight across these
/// constructs improves latency hiding.
//
//===----------------------------------------------------------------------===//

#include "AMDGPUBarrierLatency.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "SIInstrInfo.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"

using namespace llvm;

namespace {

class BarrierLatency : public ScheduleDAGMutation {
public:
BarrierLatency() = default;
void apply(ScheduleDAGInstrs *DAG) override;
};

void BarrierLatency::apply(ScheduleDAGInstrs *DAG) {
constexpr unsigned SyntheticLatency = 2000;
for (SUnit &SU : DAG->SUnits) {
const MachineInstr *MI = SU.getInstr();
if (MI->getOpcode() != AMDGPU::ATOMIC_FENCE)
continue;

// Update latency on barrier edges of ATOMIC_FENCE.
// We don't consider the scope of the fence or type of instruction
// involved in the barrier edge.
for (SDep &PredDep : SU.Preds) {
if (!PredDep.isBarrier())
continue;
SUnit *PredSU = PredDep.getSUnit();
MachineInstr *MI = PredSU->getInstr();
// Only consider memory loads
if (!MI->mayLoad() || MI->mayStore())
continue;
SDep ForwardD = PredDep;
ForwardD.setSUnit(&SU);
for (SDep &SuccDep : PredSU->Succs) {
if (SuccDep == ForwardD) {
SuccDep.setLatency(SuccDep.getLatency() + SyntheticLatency);
break;
}
}
PredDep.setLatency(PredDep.getLatency() + SyntheticLatency);
PredSU->setDepthDirty();
SU.setDepthDirty();
}
}
}

} // end namespace

std::unique_ptr<ScheduleDAGMutation>
llvm::createAMDGPUBarrierLatencyDAGMutation() {
return std::make_unique<BarrierLatency>();
}
21 changes: 21 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
//===- AMDGPUBarrierLatency.h - AMDGPU Export Clustering --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUBARRIERLATENCY_H
#define LLVM_LIB_TARGET_AMDGPU_AMDGPUBARRIERLATENCY_H

#include "llvm/CodeGen/ScheduleDAGMutation.h"
#include <memory>

namespace llvm {

std::unique_ptr<ScheduleDAGMutation> createAMDGPUBarrierLatencyDAGMutation();

} // namespace llvm

#endif // LLVM_LIB_TARGET_AMDGPU_AMDGPUBARRIERLATENCY_H
4 changes: 4 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "AMDGPUTargetMachine.h"
#include "AMDGPU.h"
#include "AMDGPUAliasAnalysis.h"
#include "AMDGPUBarrierLatency.h"
#include "AMDGPUCtorDtorLowering.h"
#include "AMDGPUExportClustering.h"
#include "AMDGPUExportKernelRuntimeHandles.h"
Expand Down Expand Up @@ -639,6 +640,7 @@ createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
DAG->addMutation(createAMDGPUBarrierLatencyDAGMutation());
return DAG;
}

Expand All @@ -659,6 +661,7 @@ createGCNMaxMemoryClauseMachineScheduler(MachineSchedContext *C) {
if (ST.shouldClusterStores())
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
DAG->addMutation(createAMDGPUBarrierLatencyDAGMutation());
return DAG;
}

Expand Down Expand Up @@ -1197,6 +1200,7 @@ GCNTargetMachine::createPostMachineScheduler(MachineSchedContext *C) const {
EnableVOPD)
DAG->addMutation(createVOPDPairingMutation());
DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
DAG->addMutation(createAMDGPUBarrierLatencyDAGMutation());
return DAG;
}
//===----------------------------------------------------------------------===//
Expand Down
1 change: 1 addition & 0 deletions llvm/lib/Target/AMDGPU/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ add_llvm_target(AMDGPUCodeGen
AMDGPUAsmPrinter.cpp
AMDGPUAtomicOptimizer.cpp
AMDGPUAttributor.cpp
AMDGPUBarrierLatency.cpp
AMDGPUCallLowering.cpp
AMDGPUCodeGenPrepare.cpp
AMDGPUCombinerHelper.cpp
Expand Down
35 changes: 17 additions & 18 deletions llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -147,14 +147,13 @@ define weak_odr amdgpu_kernel void @dpp_test1(ptr %arg) local_unnamed_addr {
; GFX8-OPT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX8-OPT-NEXT: v_mov_b32_e32 v2, 0
; GFX8-OPT-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-OPT-NEXT: s_barrier
; GFX8-OPT-NEXT: v_add_u32_e32 v1, vcc, v1, v1
; GFX8-OPT-NEXT: s_nop 1
; GFX8-OPT-NEXT: v_mov_b32_dpp v2, v1 quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
; GFX8-OPT-NEXT: v_add_u32_e32 v2, vcc, v2, v1
; GFX8-OPT-NEXT: v_mov_b32_e32 v1, s1
; GFX8-OPT-NEXT: v_add_u32_e32 v4, vcc, v1, v1
; GFX8-OPT-NEXT: v_mov_b32_e32 v3, s1
; GFX8-OPT-NEXT: v_add_u32_e32 v0, vcc, s0, v0
; GFX8-OPT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX8-OPT-NEXT: v_mov_b32_dpp v2, v4 quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
; GFX8-OPT-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
; GFX8-OPT-NEXT: v_add_u32_e32 v2, vcc, v2, v4
; GFX8-OPT-NEXT: s_barrier
; GFX8-OPT-NEXT: flat_store_dword v[0:1], v2
; GFX8-OPT-NEXT: s_endpgm
;
Expand Down Expand Up @@ -194,14 +193,14 @@ define weak_odr amdgpu_kernel void @dpp_test1(ptr %arg) local_unnamed_addr {
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: ds_read_b32 v1, v0
; GFX10-NEXT: s_barrier
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: v_add_co_u32 v0, s0, s0, v0
; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v1
; GFX10-NEXT: v_mov_b32_dpp v2, v1 quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v1
; GFX10-NEXT: v_add_nc_u32_e32 v3, v1, v1
; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, s1, 0, s0
; GFX10-NEXT: v_mov_b32_dpp v2, v3 quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v3
; GFX10-NEXT: s_barrier
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: flat_store_dword v[0:1], v2
; GFX10-NEXT: s_endpgm
;
Expand All @@ -213,15 +212,15 @@ define weak_odr amdgpu_kernel void @dpp_test1(ptr %arg) local_unnamed_addr {
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX11-NEXT: ds_load_b32 v1, v0
; GFX11-NEXT: s_barrier
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_add_co_u32 v0, s0, s0, v0
; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mov_b32_dpp v2, v1 quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v1
; GFX11-NEXT: v_add_nc_u32_e32 v3, v1, v1
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s1, 0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mov_b32_dpp v2, v3 quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v3
; GFX11-NEXT: s_barrier
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: flat_store_b32 v[0:1], v2
; GFX11-NEXT: s_endpgm
bb:
Expand Down
83 changes: 83 additions & 0 deletions llvm/test/CodeGen/AMDGPU/schedule-barrier-latency.mir

Large diffs are not rendered by default.

20 changes: 9 additions & 11 deletions llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.ll
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,17 @@ define amdgpu_kernel void @barrier_vmcnt_global(ptr addrspace(1) %arg) {
; GFX9-LABEL: barrier_vmcnt_global:
; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0
; GFX9-NEXT: v_add_u32_e32 v2, 1, v0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_load_dword v2, v1, s[0:1]
; GFX9-NEXT: v_add_u32_e32 v1, 1, v0
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[0:1]
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: global_load_dword v3, v1, s[0:1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2]
; GFX9-NEXT: v_mov_b32_e32 v2, s1
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v2, v1, vcc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_barrier
; GFX9-NEXT: global_store_dword v[0:1], v2, off
; GFX9-NEXT: global_store_dword v[0:1], v3, off
; GFX9-NEXT: s_endpgm
bb:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
Expand Down Expand Up @@ -369,10 +369,9 @@ define amdgpu_kernel void @barrier_vmcnt_vscnt_flat_workgroup(ptr %arg) {
; GFX8-NEXT: flat_load_dword v3, v[2:3]
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0
; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: s_barrier
; GFX8-NEXT: flat_store_dword v[0:1], v3
; GFX8-NEXT: s_endpgm
Expand All @@ -393,10 +392,9 @@ define amdgpu_kernel void @barrier_vmcnt_vscnt_flat_workgroup(ptr %arg) {
; GFX9-NEXT: flat_load_dword v3, v[2:3]
; GFX9-NEXT: v_add_u32_e32 v2, 1, v0
; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_barrier
; GFX9-NEXT: flat_store_dword v[0:1], v3
; GFX9-NEXT: s_endpgm
Expand Down