Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions llvm/include/llvm/Transforms/IPO/Attributor.h
Original file line number Diff line number Diff line change
Expand Up @@ -1355,6 +1355,12 @@ struct InformationCache {
/// Return the flat address space if the associated target has.
LLVM_ABI std::optional<unsigned> getFlatAddressSpace() const;

virtual bool shouldTrackUse(const AbstractAttribute *QueryingAA,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we need this?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @shiltian , this is used to forward propagate (which is in initialization of each AAAlign).

When calling followUsesInMBEC if this returns true, it will put user's user into the list for checking known alignment

Value &AssociatedValue, const Use *U,
const Instruction *I) const {
return false;
}

private:
struct FunctionInfo {
LLVM_ABI ~FunctionInfo();
Expand Down Expand Up @@ -2042,6 +2048,19 @@ struct Attributor {
SimplificationCallbacks[IRP].emplace_back(CB);
}

using AlignmentCallbackTy =
std::function<void(const IRPosition &, const AbstractAttribute *,
SmallVectorImpl<AA::ValueAndContext> &)>;
void registerAlignmentCallback(const IRPosition &IRP,
const AlignmentCallbackTy &CB) {
AlignmentCallBacks[IRP].emplace_back(CB);
}

SmallVector<AlignmentCallbackTy, 1>
getAlignmentCallback(const IRPosition &IRP) {
return AlignmentCallBacks.lookup(IRP);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What if the lookup fails? I'd prefer to do similar style as the simplification CB.

}

/// Return true if there is a simplification callback for \p IRP.
bool hasSimplificationCallback(const IRPosition &IRP) {
return SimplificationCallbacks.count(IRP);
Expand Down Expand Up @@ -2093,6 +2112,9 @@ struct Attributor {
DenseMap<IRPosition, SmallVector<SimplifictionCallbackTy, 1>>
SimplificationCallbacks;

/// The vector with AAAlign callbacks registered by outside AAs.
DenseMap<IRPosition, SmallVector<AlignmentCallbackTy, 1>> AlignmentCallBacks;

/// The vector with all simplification callbacks for global variables
/// registered by outside AAs.
DenseMap<const GlobalVariable *,
Expand Down
37 changes: 36 additions & 1 deletion llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,18 @@ static bool funcRequiresHostcallPtr(const Function &F) {
F.hasFnAttribute(Attribute::SanitizeMemTag);
}

static bool isAlignAndMakeBuffer(const AbstractAttribute *AA,
const Instruction *I) {
if (isa<AAAlign>(AA)) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably shouldn't need to ever identify AAs like this?

if (const auto *II = dyn_cast<IntrinsicInst>(I)) {
if (II->getIntrinsicID() == Intrinsic::amdgcn_make_buffer_rsrc)
return true;
}
}

return false;
}

namespace {
class AMDGPUInformationCache : public InformationCache {
public:
Expand Down Expand Up @@ -235,6 +247,12 @@ class AMDGPUInformationCache : public InformationCache {
return ST.getMaxWavesPerEU();
}

bool shouldTrackUse(const AbstractAttribute *QueryingAA,
Value &AssociatedValue, const Use *U,
const Instruction *I) const override {
return isAlignAndMakeBuffer(QueryingAA, I);
}

private:
/// Check if the ConstantExpr \p CE uses an addrspacecast from private or
/// local to flat. These casts may require the queue pointer.
Expand Down Expand Up @@ -1381,7 +1399,7 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
&AAAMDMaxNumWorkgroups::ID, &AAAMDWavesPerEU::ID, &AAAMDGPUNoAGPR::ID,
&AACallEdges::ID, &AAPointerInfo::ID, &AAPotentialConstantValues::ID,
&AAUnderlyingObjects::ID, &AAAddressSpace::ID, &AAIndirectCallInfo::ID,
&AAInstanceInfo::ID});
&AAInstanceInfo::ID, &AAAlign::ID});
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Adding this to AMDGPUAttributor should be a separate patch, you can do this in the base attributor first

Copy link
Contributor

@shiltian shiltian Jun 25, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

agreed. you can add support for some generic LLVM intrinsics and test it.


AttributorConfig AC(CGUpdater);
AC.IsClosedWorldModule = Options.IsClosedWorld;
Expand Down Expand Up @@ -1432,6 +1450,23 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
} else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I)) {
A.getOrCreateAAFor<AAAddressSpace>(
IRPosition::value(*CmpX->getPointerOperand()));
} else if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
if (II->getIntrinsicID() == Intrinsic::amdgcn_make_buffer_rsrc) {
IRPosition IRP = IRPosition::inst(*II);

Attributor::AlignmentCallbackTy ACB =
[](const IRPosition &IRP, const AbstractAttribute *AA,
SmallVectorImpl<AA::ValueAndContext> &Values) {
if (auto *I = dyn_cast<Instruction>(&IRP.getAssociatedValue()))
if (isAlignAndMakeBuffer(AA, I)) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You don't need to check whether it is AAAign here since this CB is only for align.

Values.push_back(
AA::ValueAndContext{*I->getOperand(0), nullptr});
}
};
A.registerAlignmentCallback(IRP, ACB);

A.getOrCreateAAFor<AAAlign>(IRP);
}
}
}
}
Expand Down
14 changes: 12 additions & 2 deletions llvm/lib/Transforms/IPO/AttributorAttributes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5202,6 +5202,10 @@ static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
TrackUse = true;
return 0;
}
if (A.getInfoCache().shouldTrackUse(&QueryingAA, AssociatedValue, U, I)) {
TrackUse = true;
return 0;
}

MaybeAlign MA;
if (const auto *CB = dyn_cast<CallBase>(I)) {
Expand Down Expand Up @@ -5369,8 +5373,14 @@ struct AAAlignFloating : AAAlignImpl {
bool Stripped;
bool UsedAssumedInformation = false;
SmallVector<AA::ValueAndContext> Values;
if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
AA::AnyScope, UsedAssumedInformation)) {
const auto &AligmentCBs = A.getAlignmentCallback(getIRPosition());
if (!AligmentCBs.empty()) {
for (const auto &CB : AligmentCBs) {
CB(getIRPosition(), this, Values);
}
} else if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
AA::AnyScope,
UsedAssumedInformation)) {
Values.push_back({getAssociatedValue(), getCtxI()});
Stripped = false;
} else {
Expand Down
40 changes: 40 additions & 0 deletions llvm/test/CodeGen/AMDGPU/attr-amdgpu-align.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-attributor %s -o - | FileCheck %s

define float @align_back_prop(ptr addrspace(1) align 4 %x) {
; CHECK-LABEL: define float @align_back_prop(
; CHECK-SAME: ptr addrspace(1) align 8 [[X:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[FAT_PTR:%.*]] = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) [[X]], i16 0, i32 256, i32 0)
; CHECK-NEXT: [[Y:%.*]] = load float, ptr addrspace(7) [[FAT_PTR]], align 8
; CHECK-NEXT: ret float [[Y]]
;
%fat.ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %x, i16 0, i32 256, i32 0)
%y = load float, ptr addrspace(7) %fat.ptr, align 8
ret float %y
}

define float @align_foward_prop(ptr addrspace(1) align 8 %x) {
; CHECK-LABEL: define float @align_foward_prop(
; CHECK-SAME: ptr addrspace(1) align 8 [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[FAT_PTR:%.*]] = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) [[X]], i16 0, i32 256, i32 0)
; CHECK-NEXT: [[Y:%.*]] = load float, ptr addrspace(7) [[FAT_PTR]], align 8
; CHECK-NEXT: ret float [[Y]]
;
%fat.ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %x, i16 0, i32 256, i32 0)
%y = load float, ptr addrspace(7) %fat.ptr, align 4
ret float %y
}

define float @align_mix_prop(ptr addrspace(1) align 4 %x) {
; CHECK-LABEL: define float @align_mix_prop(
; CHECK-SAME: ptr addrspace(1) align 8 [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[FAT_PTR:%.*]] = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) [[X]], i16 0, i32 256, i32 0)
; CHECK-NEXT: [[Y:%.*]] = load float, ptr addrspace(7) [[FAT_PTR]], align 8
; CHECK-NEXT: [[Z:%.*]] = load float, ptr addrspace(1) [[X]], align 8
; CHECK-NEXT: ret float [[Z]]
;
%fat.ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %x, i16 0, i32 256, i32 0)
%y = load float, ptr addrspace(7) %fat.ptr, align 2
%z = load float, ptr addrspace(1) %x, align 8
ret float %z
}
Loading