Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 44 additions & 51 deletions llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -385,57 +385,20 @@ static bool isSupportedMemset(MemSetInst *I, AllocaInst *AI,
match(I->getOperand(2), m_SpecificInt(Size)) && !I->isVolatile();
}

static bool hasVariableOffset(GetElementPtrInst *GEP) {
// Iterate over all operands starting from the first index (index 0 is the
// base pointer).
for (unsigned i = 1, e = GEP->getNumOperands(); i != e; ++i) {
Value *Op = GEP->getOperand(i);
// Check if the operand is not a constant integer value
if (!isa<ConstantInt>(Op)) {
return true;
}
}
return false;
}

static Value *
calculateVectorIndex(Value *Ptr, std::map<GetElementPtrInst *, Value *> &GEPIdx,
const DataLayout &DL) {
auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
if (!GEP)
return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));

// If the index of this GEP is a variable that might be deleted,
// update the index with its latest value. We've already handled any GEPs
// with unsupported index types(in GEPToVectorIndex) at this point.
if (hasVariableOffset(GEP)) {
unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
SmallMapVector<Value *, APInt, 4> VarOffsets;
APInt ConstOffset(BW, 0);
if (GEP->collectOffset(DL, BW, VarOffsets, ConstOffset)) {
if (VarOffsets.size() == 1 && ConstOffset.isZero()) {
auto *UpdatedValue = VarOffsets.front().first;
GEPIdx[GEP] = UpdatedValue;
return UpdatedValue;
}
}
}

auto I = GEPIdx.find(GEP);
assert(I != GEPIdx.end() && "Must have entry for GEP!");
return I->second;
}

static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
Type *VecElemTy, const DataLayout &DL) {
static Value *GEPToVectorIndex(GetElementPtrInst *GEP, Type *VecElemTy,
const DataLayout &DL,
AllocaInst *Alloca = nullptr) {
// TODO: Extracting a "multiple of X" from a GEP might be a useful generic
// helper.
unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
SmallMapVector<Value *, APInt, 4> VarOffsets;
APInt ConstOffset(BW, 0);
if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
!GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
return nullptr;

bool CanCollect = GEP->collectOffset(DL, BW, VarOffsets, ConstOffset);

if (Alloca)
if (GEP->getPointerOperand()->stripPointerCasts() != Alloca || !CanCollect)
return nullptr;

unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
if (VarOffsets.size() > 1)
Expand All @@ -459,6 +422,36 @@ static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
return ConstantInt::get(GEP->getContext(), Quot);
}

// Function to check if a Value is an operand of a GetElementPtrInst.
static bool isValueInGEP(GetElementPtrInst *GEP, Value *ValueToCheck) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This still feels like too specific of a check, and the same form of pattern could arise from other instructions. It would be better to detect it as part of the transformation

if (!GEP || !ValueToCheck)
return false;

for (unsigned i = 0; i < GEP->getNumOperands(); ++i)
if (GEP->getOperand(i) == ValueToCheck)
return true;

return false;
}

static Value *
calculateVectorIndex(Value *Ptr, std::map<GetElementPtrInst *, Value *> &GEPIdx,
Type *VecElemTy, const DataLayout &DL) {
auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
if (!GEP)
return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));

// Update the cached index if the value is changed.
if (!isValueInGEP(GEP, GEPIdx[GEP])) {
Value *Index = GEPToVectorIndex(GEP, VecElemTy, DL);
GEPIdx[GEP] = Index;
}

auto I = GEPIdx.find(GEP);
assert(I != GEPIdx.end() && "Must have entry for GEP!");
return I->second;
}

/// Promotes a single user of the alloca to a vector form.
///
/// \param Inst Instruction to be promoted.
Expand Down Expand Up @@ -525,7 +518,7 @@ static Value *promoteAllocaUserToVector(
}

Value *Index = calculateVectorIndex(
cast<LoadInst>(Inst)->getPointerOperand(), GEPVectorIdx, DL);
cast<LoadInst>(Inst)->getPointerOperand(), GEPVectorIdx, VecEltTy, DL);

// We're loading the full vector.
Type *AccessTy = Inst->getType();
Expand Down Expand Up @@ -581,8 +574,8 @@ static Value *promoteAllocaUserToVector(
// to know the current value. If this is a store of a single element, we
// need to know the value.
StoreInst *SI = cast<StoreInst>(Inst);
Value *Index =
calculateVectorIndex(SI->getPointerOperand(), GEPVectorIdx, DL);
Value *Index = calculateVectorIndex(SI->getPointerOperand(), GEPVectorIdx,
VecEltTy, DL);
Value *Val = SI->getValueOperand();

// We're storing the full vector, we can handle this without knowing CurVal.
Expand Down Expand Up @@ -845,7 +838,7 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
// If we can't compute a vector index from this GEP, then we can't
// promote this alloca to vector.
Value *Index = GEPToVectorIndex(GEP, &Alloca, VecEltTy, *DL);
Value *Index = GEPToVectorIndex(GEP, VecEltTy, *DL, &Alloca);
if (!Index)
return RejectUser(Inst, "cannot compute vector index for GEP");

Expand Down Expand Up @@ -881,7 +874,7 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
return nullptr;

return dyn_cast<ConstantInt>(
calculateVectorIndex(Ptr, GEPVectorIdx, *DL));
calculateVectorIndex(Ptr, GEPVectorIdx, VecEltTy, *DL));
};

unsigned OpNum = U->getOperandNo();
Expand Down
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/AMDGPU/promote-alloca-array-aggregate.ll
Original file line number Diff line number Diff line change
Expand Up @@ -127,25 +127,25 @@ define amdgpu_vs void @promote_load_from_store_aggr() #0 {
%gl_PV = type { <4 x i32>, i32, [1 x i32], [1 x i32] }
@pv1 = external addrspace(1) global %gl_PV

; This should should not crash on variable offset that can be
; optimized out (variable foo4 in the test)
define amdgpu_vs void @promote_load_from_store_aggr_varoff() local_unnamed_addr {
; This should not crash on an aliased variable offset that can be
; optimized out (variable %aliasToG1 in the test)
define amdgpu_vs void @promote_load_from_store_aggr_varoff(<4 x i32> %input) {
; CHECK-LABEL: @promote_load_from_store_aggr_varoff(
; CHECK-NEXT: [[FOO3_UNPACK2:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds (i8, ptr addrspace(1) @block4, i64 8), align 4
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <3 x i32> undef, i32 [[FOO3_UNPACK2]], i32 2
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <3 x i32> [[TMP1]], i32 [[FOO3_UNPACK2]]
; CHECK-NEXT: [[FOO12:%.*]] = insertelement <4 x i32> poison, i32 [[TMP2]], i64 3
; CHECK-NEXT: [[FOO12:%.*]] = insertelement <4 x i32> %input, i32 [[TMP2]], i64 3
; CHECK-NEXT: store <4 x i32> [[FOO12]], ptr addrspace(1) @pv1, align 16
; CHECK-NEXT: ret void
;
%f1 = alloca [3 x i32], align 4, addrspace(5)
%G1 = getelementptr inbounds i8, ptr addrspace(5) %f1, i32 8
%foo3.unpack2 = load i32, ptr addrspace(1) getelementptr inbounds (i8, ptr addrspace(1) @block4, i64 8), align 4
store i32 %foo3.unpack2, ptr addrspace(5) %G1, align 4
%foo4 = load i32, ptr addrspace(5) %G1, align 4
%foo5 = getelementptr [3 x i32], ptr addrspace(5) %f1, i32 0, i32 %foo4
%aliasToG1 = load i32, ptr addrspace(5) %G1, align 4
%foo5 = getelementptr [3 x i32], ptr addrspace(5) %f1, i32 0, i32 %aliasToG1
%foo6 = load i32, ptr addrspace(5) %foo5, align 4
%foo12 = insertelement <4 x i32> poison, i32 %foo6, i64 3
%foo12 = insertelement <4 x i32> %input, i32 %foo6, i64 3
store <4 x i32> %foo12, ptr addrspace(1) @pv1, align 16
ret void
}
Expand Down
Loading