Skip to content
Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 22 additions & 12 deletions llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,20 @@ bool AMDGPUAtomicOptimizerImpl::run(Function &F) {
return Changed;
}

static bool shouldOptimize(Type *Ty) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Better name that expresses why this type is handleable.

Also in a follow up, really should cover the i16/half/bfloat and 2 x half, 2 x bfloat cases

switch (Ty->getTypeID()) {
case Type::FloatTyID:
case Type::DoubleTyID:
return true;
case Type::IntegerTyID: {
if (Ty->getIntegerBitWidth() == 32 || Ty->getIntegerBitWidth() == 64)
return true;
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Don't forget pointers

default:
return false;
}
}

void AMDGPUAtomicOptimizerImpl::visitAtomicRMWInst(AtomicRMWInst &I) {
// Early exit for unhandled address space atomic instructions.
switch (I.getPointerAddressSpace()) {
Expand Down Expand Up @@ -227,12 +241,10 @@ void AMDGPUAtomicOptimizerImpl::visitAtomicRMWInst(AtomicRMWInst &I) {
const bool ValDivergent = UA->isDivergentUse(I.getOperandUse(ValIdx));

// If the value operand is divergent, each lane is contributing a different
// value to the atomic calculation. We can only optimize divergent values if
// we have DPP available on our subtarget, and the atomic operation is either
// 32 or 64 bits.
if (ValDivergent &&
(!ST->hasDPP() || (DL->getTypeSizeInBits(I.getType()) != 32 &&
DL->getTypeSizeInBits(I.getType()) != 64))) {
// value to the atomic calculation. We only optimize divergent values if
// we have DPP available on our subtarget, and the atomic operation is of
// 32/64 bit integer, float or double type.
if (ValDivergent && (!ST->hasDPP() || !shouldOptimize(I.getType()))) {
return;
}

Expand Down Expand Up @@ -311,12 +323,10 @@ void AMDGPUAtomicOptimizerImpl::visitIntrinsicInst(IntrinsicInst &I) {
const bool ValDivergent = UA->isDivergentUse(I.getOperandUse(ValIdx));

// If the value operand is divergent, each lane is contributing a different
// value to the atomic calculation. We can only optimize divergent values if
// we have DPP available on our subtarget, and the atomic operation is 32 or
// 64 bits.
if (ValDivergent &&
(!ST->hasDPP() || (DL->getTypeSizeInBits(I.getType()) != 32 &&
DL->getTypeSizeInBits(I.getType()) != 64))) {
// value to the atomic calculation. We only optimize divergent values if
// we have DPP available on our subtarget, and the atomic operation is of
// 32/64 bit integer, float or double type.
if (ValDivergent && (!ST->hasDPP() || !shouldOptimize(I.getType()))) {
return;
}

Expand Down
1 change: 1 addition & 0 deletions llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5430,6 +5430,7 @@ bool AMDGPULegalizerInfo::legalizeDSAtomicFPIntrinsic(LegalizerHelper &Helper,
return true;
}

// TODO: Fix pointer type handling
bool AMDGPULegalizerInfo::legalizeLaneOp(LegalizerHelper &Helper,
MachineInstr &MI,
Intrinsic::ID IID) const {
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/VOP2Instructions.td
Original file line number Diff line number Diff line change
Expand Up @@ -780,7 +780,7 @@ defm V_SUBREV_U32 : VOP2Inst <"v_subrev_u32", VOP_I32_I32_I32_ARITH, null_frag,

// These are special and do not read the exec mask.
let isConvergent = 1, Uses = []<Register>, IsInvalidSingleUseConsumer = 1 in {
def V_READLANE_B32 : VOP2_Pseudo<"v_readlane_b32", VOP_READLANE,[]>;
def V_READLANE_B32 : VOP2_Pseudo<"v_readlane_b32", VOP_READLANE, []>;
let IsNeverUniform = 1, Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in {
def V_WRITELANE_B32 : VOP2_Pseudo<"v_writelane_b32", VOP_WRITELANE, []> {
let IsInvalidSingleUseProducer = 1;
Expand Down
115 changes: 52 additions & 63 deletions llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ptr.ll
Original file line number Diff line number Diff line change
Expand Up @@ -51,99 +51,88 @@ define void @test_writelane_p0(ptr addrspace(1) %out, ptr %src, i32 %src1) {
ret void
}

define void @test_writelane_v3p0(ptr addrspace(1) %out, <4 x ptr> %src, i32 %src1) {
define void @test_writelane_v3p0(ptr addrspace(1) %out, <3 x ptr> %src, i32 %src1) {
; GFX802-SDAG-LABEL: test_writelane_v3p0:
; GFX802-SDAG: ; %bb.0:
; GFX802-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX802-SDAG-NEXT: v_add_u32_e32 v19, vcc, 16, v0
; GFX802-SDAG-NEXT: flat_load_dwordx4 v[11:14], v[0:1]
; GFX802-SDAG-NEXT: v_addc_u32_e32 v20, vcc, 0, v1, vcc
; GFX802-SDAG-NEXT: flat_load_dwordx4 v[15:18], v[19:20]
; GFX802-SDAG-NEXT: v_readfirstlane_b32 m0, v10
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s8, v5
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s9, v4
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s10, v3
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s11, v2
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s4, v9
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s5, v8
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s6, v7
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s7, v6
; GFX802-SDAG-NEXT: v_add_u32_e32 v13, vcc, 16, v0
; GFX802-SDAG-NEXT: flat_load_dwordx4 v[9:12], v[0:1]
; GFX802-SDAG-NEXT: v_addc_u32_e32 v14, vcc, 0, v1, vcc
; GFX802-SDAG-NEXT: flat_load_dwordx2 v[15:16], v[13:14]
; GFX802-SDAG-NEXT: v_readfirstlane_b32 m0, v8
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s6, v5
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s7, v4
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s8, v3
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s9, v2
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s4, v7
; GFX802-SDAG-NEXT: v_readfirstlane_b32 s5, v6
; GFX802-SDAG-NEXT: s_waitcnt vmcnt(1)
; GFX802-SDAG-NEXT: v_writelane_b32 v14, s8, m0
; GFX802-SDAG-NEXT: v_writelane_b32 v13, s9, m0
; GFX802-SDAG-NEXT: v_writelane_b32 v12, s10, m0
; GFX802-SDAG-NEXT: v_writelane_b32 v11, s11, m0
; GFX802-SDAG-NEXT: v_writelane_b32 v12, s6, m0
; GFX802-SDAG-NEXT: v_writelane_b32 v11, s7, m0
; GFX802-SDAG-NEXT: v_writelane_b32 v10, s8, m0
; GFX802-SDAG-NEXT: v_writelane_b32 v9, s9, m0
; GFX802-SDAG-NEXT: s_waitcnt vmcnt(0)
; GFX802-SDAG-NEXT: v_writelane_b32 v18, s4, m0
; GFX802-SDAG-NEXT: v_writelane_b32 v17, s5, m0
; GFX802-SDAG-NEXT: v_writelane_b32 v16, s6, m0
; GFX802-SDAG-NEXT: v_writelane_b32 v15, s7, m0
; GFX802-SDAG-NEXT: flat_store_dwordx4 v[0:1], v[11:14]
; GFX802-SDAG-NEXT: flat_store_dwordx4 v[19:20], v[15:18]
; GFX802-SDAG-NEXT: v_writelane_b32 v16, s4, m0
; GFX802-SDAG-NEXT: v_writelane_b32 v15, s5, m0
; GFX802-SDAG-NEXT: flat_store_dwordx4 v[0:1], v[9:12]
; GFX802-SDAG-NEXT: flat_store_dwordx2 v[13:14], v[15:16]
; GFX802-SDAG-NEXT: s_waitcnt vmcnt(0)
; GFX802-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX1010-SDAG-LABEL: test_writelane_v3p0:
; GFX1010-SDAG: ; %bb.0:
; GFX1010-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-SDAG-NEXT: s_clause 0x1
; GFX1010-SDAG-NEXT: global_load_dwordx4 v[11:14], v[0:1], off offset:16
; GFX1010-SDAG-NEXT: global_load_dwordx4 v[15:18], v[0:1], off
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s5, v10
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s9, v5
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s10, v4
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s11, v3
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s12, v2
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s4, v9
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s6, v8
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s7, v7
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s8, v6
; GFX1010-SDAG-NEXT: global_load_dwordx2 v[13:14], v[0:1], off offset:16
; GFX1010-SDAG-NEXT: global_load_dwordx4 v[9:12], v[0:1], off
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s5, v8
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s7, v5
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s8, v4
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s9, v3
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s10, v2
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s4, v7
; GFX1010-SDAG-NEXT: v_readfirstlane_b32 s6, v6
; GFX1010-SDAG-NEXT: s_waitcnt vmcnt(1)
; GFX1010-SDAG-NEXT: v_writelane_b32 v14, s4, s5
; GFX1010-SDAG-NEXT: s_waitcnt vmcnt(0)
; GFX1010-SDAG-NEXT: v_writelane_b32 v18, s9, s5
; GFX1010-SDAG-NEXT: v_writelane_b32 v17, s10, s5
; GFX1010-SDAG-NEXT: v_writelane_b32 v16, s11, s5
; GFX1010-SDAG-NEXT: v_writelane_b32 v15, s12, s5
; GFX1010-SDAG-NEXT: v_writelane_b32 v13, s6, s5
; GFX1010-SDAG-NEXT: v_writelane_b32 v12, s7, s5
; GFX1010-SDAG-NEXT: v_writelane_b32 v11, s8, s5
; GFX1010-SDAG-NEXT: global_store_dwordx4 v[0:1], v[15:18], off
; GFX1010-SDAG-NEXT: global_store_dwordx4 v[0:1], v[11:14], off offset:16
; GFX1010-SDAG-NEXT: v_writelane_b32 v10, s9, s5
; GFX1010-SDAG-NEXT: v_writelane_b32 v9, s10, s5
; GFX1010-SDAG-NEXT: v_writelane_b32 v13, s6, s5
; GFX1010-SDAG-NEXT: global_store_dwordx4 v[0:1], v[9:12], off
; GFX1010-SDAG-NEXT: global_store_dwordx2 v[0:1], v[13:14], off offset:16
; GFX1010-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX1100-SDAG-LABEL: test_writelane_v3p0:
; GFX1100-SDAG: ; %bb.0:
; GFX1100-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1100-SDAG-NEXT: s_clause 0x1
; GFX1100-SDAG-NEXT: global_load_b128 v[11:14], v[0:1], off offset:16
; GFX1100-SDAG-NEXT: global_load_b128 v[15:18], v[0:1], off
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s1, v10
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s5, v5
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s6, v4
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s7, v3
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s8, v2
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s0, v9
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s2, v8
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s3, v7
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s4, v6
; GFX1100-SDAG-NEXT: global_load_b64 v[13:14], v[0:1], off offset:16
; GFX1100-SDAG-NEXT: global_load_b128 v[9:12], v[0:1], off
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s1, v8
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s3, v5
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s4, v4
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s5, v3
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s6, v2
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s0, v7
; GFX1100-SDAG-NEXT: v_readfirstlane_b32 s2, v6
; GFX1100-SDAG-NEXT: s_waitcnt vmcnt(1)
; GFX1100-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1100-SDAG-NEXT: v_writelane_b32 v14, s0, s1
; GFX1100-SDAG-NEXT: s_waitcnt vmcnt(0)
; GFX1100-SDAG-NEXT: v_writelane_b32 v18, s5, s1
; GFX1100-SDAG-NEXT: v_writelane_b32 v17, s6, s1
; GFX1100-SDAG-NEXT: v_writelane_b32 v16, s7, s1
; GFX1100-SDAG-NEXT: v_writelane_b32 v15, s8, s1
; GFX1100-SDAG-NEXT: v_writelane_b32 v13, s2, s1
; GFX1100-SDAG-NEXT: v_writelane_b32 v12, s3, s1
; GFX1100-SDAG-NEXT: v_writelane_b32 v11, s4, s1
; GFX1100-SDAG-NEXT: v_writelane_b32 v10, s5, s1
; GFX1100-SDAG-NEXT: v_writelane_b32 v9, s6, s1
; GFX1100-SDAG-NEXT: v_writelane_b32 v13, s2, s1
; GFX1100-SDAG-NEXT: s_clause 0x1
; GFX1100-SDAG-NEXT: global_store_b128 v[0:1], v[15:18], off
; GFX1100-SDAG-NEXT: global_store_b128 v[0:1], v[11:14], off offset:16
; GFX1100-SDAG-NEXT: global_store_b128 v[0:1], v[9:12], off
; GFX1100-SDAG-NEXT: global_store_b64 v[0:1], v[13:14], off offset:16
; GFX1100-SDAG-NEXT: s_setpc_b64 s[30:31]
%oldval = load <4 x ptr>, ptr addrspace(1) %out
%writelane = call <4 x ptr> @llvm.amdgcn.writelane.v3p0(<4 x ptr> %src, i32 %src1, <4 x ptr> %oldval)
store <4 x ptr> %writelane, ptr addrspace(1) %out, align 4
%oldval = load <3 x ptr>, ptr addrspace(1) %out
%writelane = call <3 x ptr> @llvm.amdgcn.writelane.v3p0(<3 x ptr> %src, i32 %src1, <3 x ptr> %oldval)
store <3 x ptr> %writelane, ptr addrspace(1) %out, align 4
ret void
}

Expand Down