Skip to content

Commit b14a59c

Browse files
committed
Customize upstream patch: [AMDGPU] Enable vectorization of i8 values
1 parent 2c69a9e commit b14a59c

File tree

8 files changed

+411
-566
lines changed

8 files changed

+411
-566
lines changed

llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp

Lines changed: 34 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -313,24 +313,6 @@ bool GCNTTIImpl::hasBranchDivergence(const Function *F) const {
313313
return !F || !ST->isSingleLaneExecution(*F);
314314
}
315315

316-
unsigned GCNTTIImpl::getNumberOfParts(Type *Tp) {
317-
// For certain 8 bit ops, we can pack a v4i8 into a single part
318-
// (e.g. v4i8 shufflevectors -> v_perm v4i8, v4i8). Thus, we
319-
// do not limit the numberOfParts for 8 bit vectors to the
320-
// legalization costs of such. It is left up to other target
321-
// queries (e.g. get*InstrCost) to decide the proper handling
322-
// of 8 bit vectors.
323-
if (FixedVectorType *VTy = dyn_cast<FixedVectorType>(Tp)) {
324-
if (ST->shouldCoerceIllegalTypes() &&
325-
DL.getTypeSizeInBits(VTy->getElementType()) == 8) {
326-
unsigned ElCount = VTy->getElementCount().getFixedValue();
327-
return std::max(UINT64_C(1), PowerOf2Ceil(ElCount / 4));
328-
}
329-
}
330-
331-
return BaseT::getNumberOfParts(Tp);
332-
}
333-
334316
unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
335317
// NB: RCID is not an RCID. In fact it is 0 or 1 for scalar or vector
336318
// registers. See getRegisterClassForType for the implementation.
@@ -363,10 +345,12 @@ unsigned GCNTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
363345
if (Opcode == Instruction::Load || Opcode == Instruction::Store)
364346
return 32 * 4 / ElemWidth;
365347

366-
return (ST->shouldCoerceIllegalTypes() && ElemWidth == 8) ? 4
367-
: (ElemWidth == 16) ? 2
368-
: (ElemWidth == 32 && ST->hasPackedFP32Ops()) ? 2
369-
: 1;
348+
// For a given width return the max number of elements that can be combined
349+
// into a wider bit value:
350+
return (ElemWidth == 8 && ST->has16BitInsts()) ? 4
351+
: (ElemWidth == 16 && ST->has16BitInsts()) ? 2
352+
: (ElemWidth == 32 && ST->hasPackedFP32Ops()) ? 2
353+
: 1;
370354
}
371355

372356
unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
@@ -1176,8 +1160,7 @@ InstructionCost GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
11761160

11771161
unsigned ScalarSize = DL.getTypeSizeInBits(VT->getElementType());
11781162
if (ST->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS &&
1179-
(ScalarSize == 16 ||
1180-
(ScalarSize == 8 && ST->shouldCoerceIllegalTypes()))) {
1163+
(ScalarSize == 16 || ScalarSize == 8)) {
11811164
// Larger vector widths may require additional instructions, but are
11821165
// typically cheaper than scalarized versions.
11831166
unsigned NumVectorElts = cast<FixedVectorType>(VT)->getNumElements();
@@ -1452,3 +1435,30 @@ unsigned GCNTTIImpl::getPrefetchDistance() const {
14521435
bool GCNTTIImpl::shouldPrefetchAddressSpace(unsigned AS) const {
14531436
return AMDGPU::isFlatGlobalAddrSpace(AS);
14541437
}
1438+
1439+
InstructionCost GCNTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1440+
Align Alignment,
1441+
unsigned AddressSpace,
1442+
TTI::TargetCostKind CostKind,
1443+
TTI::OperandValueInfo OpInfo,
1444+
const Instruction *I) {
1445+
if (VectorType *VecTy = dyn_cast<VectorType>(Src)) {
1446+
if ((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1447+
VecTy->getElementType()->isIntegerTy(8)) {
1448+
return divideCeil(DL.getTypeSizeInBits(VecTy) - 1,
1449+
getLoadStoreVecRegBitWidth(AddressSpace));
1450+
}
1451+
}
1452+
return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind,
1453+
OpInfo, I);
1454+
}
1455+
1456+
unsigned GCNTTIImpl::getNumberOfParts(Type *Tp) {
1457+
if (VectorType *VecTy = dyn_cast<VectorType>(Tp)) {
1458+
if (VecTy->getElementType()->isIntegerTy(8)) {
1459+
unsigned ElementCount = VecTy->getElementCount().getFixedValue();
1460+
return divideCeil(ElementCount - 1, 4);
1461+
}
1462+
}
1463+
return BaseT::getNumberOfParts(Tp);
1464+
}

llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,6 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
118118
return TTI::PSK_FastHardware;
119119
}
120120

121-
unsigned getNumberOfParts(Type *Tp);
122121
unsigned getNumberOfRegisters(unsigned RCID) const;
123122
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind Vector) const;
124123
unsigned getMinVectorRegisterBitWidth() const;
@@ -278,6 +277,20 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
278277

279278
/// \return if target want to issue a prefetch in address space \p AS.
280279
bool shouldPrefetchAddressSpace(unsigned AS) const override;
280+
281+
/// Account for loads of i8 vector types to have reduced cost. For
282+
/// example the cost of load 4 i8s values is one is the cost of loading
283+
/// a single i32 value.
284+
InstructionCost getMemoryOpCost(
285+
unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
286+
TTI::TargetCostKind CostKind,
287+
TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
288+
const Instruction *I = nullptr);
289+
290+
/// When counting parts on AMD GPUs, account for i8s being grouped
291+
/// together under a single i32 value. Otherwise fall back to base
292+
/// implementation.
293+
unsigned getNumberOfParts(Type *Tp);
281294
};
282295

283296
} // end namespace llvm

0 commit comments

Comments
 (0)