Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 84 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1301,6 +1301,90 @@ bool GCNTTIImpl::isProfitableToSinkOperands(Instruction *I,

if (match(&Op, m_FAbs(m_Value())) || match(&Op, m_FNeg(m_Value())))
Ops.push_back(&Op);

// Zero cost vector instructions (e.g. extractelement 0 of i32 vectors)
// will be optimized away, and sinking them can help SDAG combines.
DataLayout DL = I->getModule()->getDataLayout();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Never copy a DataLayout

Suggested change
DataLayout DL = I->getModule()->getDataLayout();
const DataLayout &DL = I->getModule()->getDataLayout();

auto IsFreeExtractInsert = [&DL, this](VectorType *VecType,
unsigned VecIndex) {
unsigned EltSize = DL.getTypeSizeInBits(VecType->getElementType());
return EltSize >= 32 ||
(EltSize == 16 && VecIndex == 0 && ST->has16BitInsts());
};
Comment on lines +1308 to +1313
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The TTI costs for the vector ops should already be reporting free, shouldn't need to reimplement this


uint64_t VecIndex;
Value *Vec;
if (match(Op.get(), m_ExtractElt(m_Value(Vec), m_ConstantInt(VecIndex)))) {
Instruction *VecOpInst =
dyn_cast<Instruction>(cast<Instruction>(Op.get())->getOperand(0));
// If a zero cost extractvector instruction is the only use of the vector,
// then it may be combined with the def.
if (VecOpInst && VecOpInst->hasOneUse())
continue;

if (IsFreeExtractInsert(cast<VectorType>(Vec->getType()), VecIndex))
Ops.push_back(&Op);

continue;
}

if (match(Op.get(),
m_InsertElt(m_Value(Vec), m_Value(), m_ConstantInt(VecIndex)))) {
if (IsFreeExtractInsert(cast<VectorType>(Vec->getType()), VecIndex))
Ops.push_back(&Op);

continue;
}

if (auto *Shuffle = dyn_cast<ShuffleVectorInst>(Op.get())) {
if (Shuffle->isIdentity()) {
Ops.push_back(&Op);
continue;
}

unsigned EltSize = DL.getTypeSizeInBits(
cast<VectorType>(cast<VectorType>(Shuffle->getType()))
->getElementType());

// For i32 (or greater) shufflevectors, these will be lowered into a
// series of insert / extract elements, which will be coalesced away.
if (EltSize >= 32) {
Ops.push_back(&Op);
continue;
}

if (EltSize < 16 || !ST->has16BitInsts())
continue;

int NumSubElts, SubIndex;
if (Shuffle->changesLength()) {
if (Shuffle->increasesLength() && Shuffle->isIdentityWithPadding()) {
Ops.push_back(&Op);
continue;
}

if (Shuffle->isExtractSubvectorMask(SubIndex) ||
Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex)) {
if (!(SubIndex % 2)) {
Ops.push_back(&Op);
continue;
Comment on lines +1366 to +1370
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

these can be combined

}
}
}

if (Shuffle->isReverse() || Shuffle->isZeroEltSplat() ||
Shuffle->isSingleSource()) {
Ops.push_back(&Op);
continue;
}

if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex)) {
if (!(SubIndex % 2)) {
Comment on lines +1381 to +1382
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this can be combined

Ops.push_back(&Op);
continue;
}
}
}
}

return !Ops.empty();
Expand Down
120 changes: 60 additions & 60 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2149,11 +2149,11 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i
; CI-NEXT: s_cbranch_vccz .LBB11_2
; CI-NEXT: ; %bb.1: ; %frem.else
; CI-NEXT: s_and_b32 s6, s2, 0x80000000
; CI-NEXT: v_mov_b32_e32 v1, s4
; CI-NEXT: v_mov_b32_e32 v0, s2
; CI-NEXT: v_cmp_eq_f32_e64 vcc, |s2|, |v1|
; CI-NEXT: v_mov_b32_e32 v1, s6
; CI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; CI-NEXT: v_mov_b32_e32 v0, s4
; CI-NEXT: v_cmp_eq_f32_e64 vcc, |s2|, |v0|
; CI-NEXT: v_mov_b32_e32 v0, s6
; CI-NEXT: v_mov_b32_e32 v1, s2
; CI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; CI-NEXT: s_mov_b32 s6, 0
; CI-NEXT: .LBB11_2: ; %Flow53
; CI-NEXT: s_xor_b32 s6, s6, 1
Expand Down Expand Up @@ -2224,11 +2224,11 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i
; CI-NEXT: s_cbranch_vccz .LBB11_10
; CI-NEXT: ; %bb.9: ; %frem.else16
; CI-NEXT: s_and_b32 s6, s3, 0x80000000
; CI-NEXT: v_mov_b32_e32 v2, s5
; CI-NEXT: v_mov_b32_e32 v1, s3
; CI-NEXT: v_cmp_eq_f32_e64 vcc, |s3|, |v2|
; CI-NEXT: v_mov_b32_e32 v2, s6
; CI-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
; CI-NEXT: v_mov_b32_e32 v1, s5
; CI-NEXT: v_cmp_eq_f32_e64 vcc, |s3|, |v1|
; CI-NEXT: v_mov_b32_e32 v1, s6
; CI-NEXT: v_mov_b32_e32 v2, s3
; CI-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
; CI-NEXT: s_mov_b32 s6, 0
; CI-NEXT: .LBB11_10: ; %Flow49
; CI-NEXT: s_xor_b32 s6, s6, 1
Expand Down Expand Up @@ -2322,11 +2322,11 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i
; VI-NEXT: s_cbranch_vccz .LBB11_2
; VI-NEXT: ; %bb.1: ; %frem.else
; VI-NEXT: s_and_b32 s6, s2, 0x80000000
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_cmp_eq_f32_e64 vcc, |s2|, |v1|
; VI-NEXT: v_mov_b32_e32 v1, s6
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_cmp_eq_f32_e64 vcc, |s2|, |v0|
; VI-NEXT: v_mov_b32_e32 v0, s6
; VI-NEXT: v_mov_b32_e32 v1, s2
; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; VI-NEXT: s_mov_b32 s6, 0
; VI-NEXT: .LBB11_2: ; %Flow53
; VI-NEXT: s_xor_b32 s6, s6, 1
Expand Down Expand Up @@ -2397,11 +2397,11 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i
; VI-NEXT: s_cbranch_vccz .LBB11_10
; VI-NEXT: ; %bb.9: ; %frem.else16
; VI-NEXT: s_and_b32 s6, s3, 0x80000000
; VI-NEXT: v_mov_b32_e32 v2, s5
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: v_cmp_eq_f32_e64 vcc, |s3|, |v2|
; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_cmp_eq_f32_e64 vcc, |s3|, |v1|
; VI-NEXT: v_mov_b32_e32 v1, s6
; VI-NEXT: v_mov_b32_e32 v2, s3
; VI-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
; VI-NEXT: s_mov_b32 s6, 0
; VI-NEXT: .LBB11_10: ; %Flow49
; VI-NEXT: s_xor_b32 s6, s6, 1
Expand Down Expand Up @@ -2503,11 +2503,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i
; CI-NEXT: s_cbranch_vccz .LBB12_2
; CI-NEXT: ; %bb.1: ; %frem.else
; CI-NEXT: s_and_b32 s2, s4, 0x80000000
; CI-NEXT: v_mov_b32_e32 v1, s8
; CI-NEXT: v_mov_b32_e32 v0, s4
; CI-NEXT: v_cmp_eq_f32_e64 vcc, |s4|, |v1|
; CI-NEXT: v_mov_b32_e32 v1, s2
; CI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; CI-NEXT: v_mov_b32_e32 v0, s8
; CI-NEXT: v_cmp_eq_f32_e64 vcc, |s4|, |v0|
; CI-NEXT: v_mov_b32_e32 v0, s2
; CI-NEXT: v_mov_b32_e32 v1, s4
; CI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: .LBB12_2: ; %Flow127
; CI-NEXT: s_xor_b32 s2, s2, 1
Expand Down Expand Up @@ -2578,11 +2578,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i
; CI-NEXT: s_cbranch_vccz .LBB12_10
; CI-NEXT: ; %bb.9: ; %frem.else16
; CI-NEXT: s_and_b32 s2, s5, 0x80000000
; CI-NEXT: v_mov_b32_e32 v2, s9
; CI-NEXT: v_mov_b32_e32 v1, s5
; CI-NEXT: v_cmp_eq_f32_e64 vcc, |s5|, |v2|
; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
; CI-NEXT: v_mov_b32_e32 v1, s9
; CI-NEXT: v_cmp_eq_f32_e64 vcc, |s5|, |v1|
; CI-NEXT: v_mov_b32_e32 v1, s2
; CI-NEXT: v_mov_b32_e32 v2, s5
; CI-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: .LBB12_10: ; %Flow123
; CI-NEXT: s_xor_b32 s2, s2, 1
Expand Down Expand Up @@ -2653,11 +2653,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i
; CI-NEXT: s_cbranch_vccz .LBB12_18
; CI-NEXT: ; %bb.17: ; %frem.else47
; CI-NEXT: s_and_b32 s2, s6, 0x80000000
; CI-NEXT: v_mov_b32_e32 v3, s10
; CI-NEXT: v_mov_b32_e32 v2, s6
; CI-NEXT: v_cmp_eq_f32_e64 vcc, |s6|, |v3|
; CI-NEXT: v_mov_b32_e32 v3, s2
; CI-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
; CI-NEXT: v_mov_b32_e32 v2, s10
; CI-NEXT: v_cmp_eq_f32_e64 vcc, |s6|, |v2|
; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: v_mov_b32_e32 v3, s6
; CI-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: .LBB12_18: ; %Flow119
; CI-NEXT: s_xor_b32 s2, s2, 1
Expand Down Expand Up @@ -2728,11 +2728,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i
; CI-NEXT: s_cbranch_vccz .LBB12_26
; CI-NEXT: ; %bb.25: ; %frem.else78
; CI-NEXT: s_and_b32 s2, s7, 0x80000000
; CI-NEXT: v_mov_b32_e32 v4, s11
; CI-NEXT: v_mov_b32_e32 v3, s7
; CI-NEXT: v_cmp_eq_f32_e64 vcc, |s7|, |v4|
; CI-NEXT: v_mov_b32_e32 v4, s2
; CI-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
; CI-NEXT: v_mov_b32_e32 v3, s11
; CI-NEXT: v_cmp_eq_f32_e64 vcc, |s7|, |v3|
; CI-NEXT: v_mov_b32_e32 v3, s2
; CI-NEXT: v_mov_b32_e32 v4, s7
; CI-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: .LBB12_26: ; %Flow115
; CI-NEXT: s_xor_b32 s2, s2, 1
Expand Down Expand Up @@ -2834,11 +2834,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i
; VI-NEXT: s_cbranch_vccz .LBB12_2
; VI-NEXT: ; %bb.1: ; %frem.else
; VI-NEXT: s_and_b32 s2, s4, 0x80000000
; VI-NEXT: v_mov_b32_e32 v1, s8
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_cmp_eq_f32_e64 vcc, |s4|, |v1|
; VI-NEXT: v_mov_b32_e32 v1, s2
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: v_cmp_eq_f32_e64 vcc, |s4|, |v0|
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; VI-NEXT: s_mov_b32 s2, 0
; VI-NEXT: .LBB12_2: ; %Flow127
; VI-NEXT: s_xor_b32 s2, s2, 1
Expand Down Expand Up @@ -2909,11 +2909,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i
; VI-NEXT: s_cbranch_vccz .LBB12_10
; VI-NEXT: ; %bb.9: ; %frem.else16
; VI-NEXT: s_and_b32 s2, s5, 0x80000000
; VI-NEXT: v_mov_b32_e32 v2, s9
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_cmp_eq_f32_e64 vcc, |s5|, |v2|
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
; VI-NEXT: v_mov_b32_e32 v1, s9
; VI-NEXT: v_cmp_eq_f32_e64 vcc, |s5|, |v1|
; VI-NEXT: v_mov_b32_e32 v1, s2
; VI-NEXT: v_mov_b32_e32 v2, s5
; VI-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
; VI-NEXT: s_mov_b32 s2, 0
; VI-NEXT: .LBB12_10: ; %Flow123
; VI-NEXT: s_xor_b32 s2, s2, 1
Expand Down Expand Up @@ -2984,11 +2984,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i
; VI-NEXT: s_cbranch_vccz .LBB12_18
; VI-NEXT: ; %bb.17: ; %frem.else47
; VI-NEXT: s_and_b32 s2, s6, 0x80000000
; VI-NEXT: v_mov_b32_e32 v3, s10
; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: v_cmp_eq_f32_e64 vcc, |s6|, |v3|
; VI-NEXT: v_mov_b32_e32 v3, s2
; VI-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
; VI-NEXT: v_mov_b32_e32 v2, s10
; VI-NEXT: v_cmp_eq_f32_e64 vcc, |s6|, |v2|
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: v_mov_b32_e32 v3, s6
; VI-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
; VI-NEXT: s_mov_b32 s2, 0
; VI-NEXT: .LBB12_18: ; %Flow119
; VI-NEXT: s_xor_b32 s2, s2, 1
Expand Down Expand Up @@ -3059,11 +3059,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i
; VI-NEXT: s_cbranch_vccz .LBB12_26
; VI-NEXT: ; %bb.25: ; %frem.else78
; VI-NEXT: s_and_b32 s2, s7, 0x80000000
; VI-NEXT: v_mov_b32_e32 v4, s11
; VI-NEXT: v_mov_b32_e32 v3, s7
; VI-NEXT: v_cmp_eq_f32_e64 vcc, |s7|, |v4|
; VI-NEXT: v_mov_b32_e32 v4, s2
; VI-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
; VI-NEXT: v_mov_b32_e32 v3, s11
; VI-NEXT: v_cmp_eq_f32_e64 vcc, |s7|, |v3|
; VI-NEXT: v_mov_b32_e32 v3, s2
; VI-NEXT: v_mov_b32_e32 v4, s7
; VI-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
; VI-NEXT: s_mov_b32 s2, 0
; VI-NEXT: .LBB12_26: ; %Flow115
; VI-NEXT: s_xor_b32 s2, s2, 1
Expand Down
Loading