-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[AMDGPU] Add alignment attr & propagate alignment through make.buffer.rsrc inst #145278
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
[AMDGPU] Add alignment attr & propagate alignment through make.buffer.rsrc inst #145278
Conversation
@llvm/pr-subscribers-clang @llvm/pr-subscribers-backend-amdgpu Author: None (Shoreshen) Changes
Full diff: https://github.com/llvm/llvm-project/pull/145278.diff 4 Files Affected:
diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h
index e6eb756df987d..64285c2114976 100644
--- a/llvm/include/llvm/Transforms/IPO/Attributor.h
+++ b/llvm/include/llvm/Transforms/IPO/Attributor.h
@@ -1355,6 +1355,12 @@ struct InformationCache {
/// Return the flat address space if the associated target has.
LLVM_ABI std::optional<unsigned> getFlatAddressSpace() const;
+ virtual bool shouldTrackUse(const AbstractAttribute *QueryingAA,
+ Value &AssociatedValue, const Use *U,
+ const Instruction *I) const {
+ return false;
+ }
+
private:
struct FunctionInfo {
LLVM_ABI ~FunctionInfo();
@@ -2042,6 +2048,19 @@ struct Attributor {
SimplificationCallbacks[IRP].emplace_back(CB);
}
+ using AlignmentCallbackTy =
+ std::function<void(const IRPosition &, const AbstractAttribute *,
+ SmallVectorImpl<AA::ValueAndContext> &)>;
+ void registerAlignmentCallback(const IRPosition &IRP,
+ const AlignmentCallbackTy &CB) {
+ AlignmentCallBacks[IRP].emplace_back(CB);
+ }
+
+ SmallVector<AlignmentCallbackTy, 1>
+ getAlignmentCallback(const IRPosition &IRP) {
+ return AlignmentCallBacks.lookup(IRP);
+ }
+
/// Return true if there is a simplification callback for \p IRP.
bool hasSimplificationCallback(const IRPosition &IRP) {
return SimplificationCallbacks.count(IRP);
@@ -2093,6 +2112,9 @@ struct Attributor {
DenseMap<IRPosition, SmallVector<SimplifictionCallbackTy, 1>>
SimplificationCallbacks;
+ /// The vector with AAAlign callbacks registered by outside AAs.
+ DenseMap<IRPosition, SmallVector<AlignmentCallbackTy, 1>> AlignmentCallBacks;
+
/// The vector with all simplification callbacks for global variables
/// registered by outside AAs.
DenseMap<const GlobalVariable *,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
index f4d3a014f9921..0731dcfbcd05c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
@@ -138,6 +138,18 @@ static bool funcRequiresHostcallPtr(const Function &F) {
F.hasFnAttribute(Attribute::SanitizeMemTag);
}
+static bool isAlignAndMakeBuffer(const AbstractAttribute *AA,
+ const Instruction *I) {
+ if (isa<AAAlign>(AA)) {
+ if (const auto *II = dyn_cast<IntrinsicInst>(I)) {
+ if (II->getIntrinsicID() == Intrinsic::amdgcn_make_buffer_rsrc)
+ return true;
+ }
+ }
+
+ return false;
+}
+
namespace {
class AMDGPUInformationCache : public InformationCache {
public:
@@ -235,6 +247,12 @@ class AMDGPUInformationCache : public InformationCache {
return ST.getMaxWavesPerEU();
}
+ bool shouldTrackUse(const AbstractAttribute *QueryingAA,
+ Value &AssociatedValue, const Use *U,
+ const Instruction *I) const override {
+ return isAlignAndMakeBuffer(QueryingAA, I);
+ }
+
private:
/// Check if the ConstantExpr \p CE uses an addrspacecast from private or
/// local to flat. These casts may require the queue pointer.
@@ -1381,7 +1399,7 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
&AAAMDMaxNumWorkgroups::ID, &AAAMDWavesPerEU::ID, &AAAMDGPUNoAGPR::ID,
&AACallEdges::ID, &AAPointerInfo::ID, &AAPotentialConstantValues::ID,
&AAUnderlyingObjects::ID, &AAAddressSpace::ID, &AAIndirectCallInfo::ID,
- &AAInstanceInfo::ID});
+ &AAInstanceInfo::ID, &AAAlign::ID});
AttributorConfig AC(CGUpdater);
AC.IsClosedWorldModule = Options.IsClosedWorld;
@@ -1432,6 +1450,23 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
} else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I)) {
A.getOrCreateAAFor<AAAddressSpace>(
IRPosition::value(*CmpX->getPointerOperand()));
+ } else if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
+ if (II->getIntrinsicID() == Intrinsic::amdgcn_make_buffer_rsrc) {
+ IRPosition IRP = IRPosition::inst(*II);
+
+ Attributor::AlignmentCallbackTy ACB =
+ [](const IRPosition &IRP, const AbstractAttribute *AA,
+ SmallVectorImpl<AA::ValueAndContext> &Values) {
+ if (auto *I = dyn_cast<Instruction>(&IRP.getAssociatedValue()))
+ if (isAlignAndMakeBuffer(AA, I)) {
+ Values.push_back(
+ AA::ValueAndContext{*I->getOperand(0), nullptr});
+ }
+ };
+ A.registerAlignmentCallback(IRP, ACB);
+
+ A.getOrCreateAAFor<AAAlign>(IRP);
+ }
}
}
}
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 3799a696f67af..cca03b30e75c7 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -5202,6 +5202,10 @@ static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
TrackUse = true;
return 0;
}
+ if (A.getInfoCache().shouldTrackUse(&QueryingAA, AssociatedValue, U, I)) {
+ TrackUse = true;
+ return 0;
+ }
MaybeAlign MA;
if (const auto *CB = dyn_cast<CallBase>(I)) {
@@ -5369,8 +5373,14 @@ struct AAAlignFloating : AAAlignImpl {
bool Stripped;
bool UsedAssumedInformation = false;
SmallVector<AA::ValueAndContext> Values;
- if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
- AA::AnyScope, UsedAssumedInformation)) {
+ const auto &AligmentCBs = A.getAlignmentCallback(getIRPosition());
+ if (!AligmentCBs.empty()) {
+ for (const auto &CB : AligmentCBs) {
+ CB(getIRPosition(), this, Values);
+ }
+ } else if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
+ AA::AnyScope,
+ UsedAssumedInformation)) {
Values.push_back({getAssociatedValue(), getCtxI()});
Stripped = false;
} else {
diff --git a/llvm/test/CodeGen/AMDGPU/attr-amdgpu-align.ll b/llvm/test/CodeGen/AMDGPU/attr-amdgpu-align.ll
new file mode 100644
index 0000000000000..85f77735bf2b6
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/attr-amdgpu-align.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-attributor %s -o - | FileCheck %s
+
+define float @align_back_prop(ptr addrspace(1) align 4 %x) {
+; CHECK-LABEL: define float @align_back_prop(
+; CHECK-SAME: ptr addrspace(1) align 8 [[X:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[FAT_PTR:%.*]] = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) [[X]], i16 0, i32 256, i32 0)
+; CHECK-NEXT: [[Y:%.*]] = load float, ptr addrspace(7) [[FAT_PTR]], align 8
+; CHECK-NEXT: ret float [[Y]]
+;
+ %fat.ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %x, i16 0, i32 256, i32 0)
+ %y = load float, ptr addrspace(7) %fat.ptr, align 8
+ ret float %y
+}
+
+define float @align_foward_prop(ptr addrspace(1) align 8 %x) {
+; CHECK-LABEL: define float @align_foward_prop(
+; CHECK-SAME: ptr addrspace(1) align 8 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[FAT_PTR:%.*]] = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) [[X]], i16 0, i32 256, i32 0)
+; CHECK-NEXT: [[Y:%.*]] = load float, ptr addrspace(7) [[FAT_PTR]], align 8
+; CHECK-NEXT: ret float [[Y]]
+;
+ %fat.ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %x, i16 0, i32 256, i32 0)
+ %y = load float, ptr addrspace(7) %fat.ptr, align 4
+ ret float %y
+}
+
+define float @align_mix_prop(ptr addrspace(1) align 4 %x) {
+; CHECK-LABEL: define float @align_mix_prop(
+; CHECK-SAME: ptr addrspace(1) align 8 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[FAT_PTR:%.*]] = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) [[X]], i16 0, i32 256, i32 0)
+; CHECK-NEXT: [[Y:%.*]] = load float, ptr addrspace(7) [[FAT_PTR]], align 8
+; CHECK-NEXT: [[Z:%.*]] = load float, ptr addrspace(1) [[X]], align 8
+; CHECK-NEXT: ret float [[Z]]
+;
+ %fat.ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %x, i16 0, i32 256, i32 0)
+ %y = load float, ptr addrspace(7) %fat.ptr, align 2
+ %z = load float, ptr addrspace(1) %x, align 8
+ ret float %z
+}
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This seems reasonable overall though i'll leave final review to folks familiar with the attributor.
One question: how much work would it be to materialize the returned alignment as a attribute on the make.buffer.rsrc
call? I figure it'll help make it more obvious what's going on.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Some fundamental issues need to be resolved first.
@@ -1355,6 +1355,12 @@ struct InformationCache { | |||
/// Return the flat address space if the associated target has. | |||
LLVM_ABI std::optional<unsigned> getFlatAddressSpace() const; | |||
|
|||
virtual bool shouldTrackUse(const AbstractAttribute *QueryingAA, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why do we need this?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Hi @shiltian , this is used to forward propagate (which is in initialization of each AAAlign).
When calling followUsesInMBEC if this returns true, it will put user's user into the list for checking known alignment
[](const IRPosition &IRP, const AbstractAttribute *AA, | ||
SmallVectorImpl<AA::ValueAndContext> &Values) { | ||
if (auto *I = dyn_cast<Instruction>(&IRP.getAssociatedValue())) | ||
if (isAlignAndMakeBuffer(AA, I)) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You don't need to check whether it is AAAign here since this CB is only for align.
|
||
SmallVector<AlignmentCallbackTy, 1> | ||
getAlignmentCallback(const IRPosition &IRP) { | ||
return AlignmentCallBacks.lookup(IRP); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What if the lookup fails? I'd prefer to do similar style as the simplification CB.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Description should be more clear about what this is. This isn't adding anything to the intrinsic itself.
I think the only thing special about this case is it's an intrinsic that propagates a pointer operand, similar to llvm.ptrmask. It looks like AAAlign also fails to propagate the alignment (which may have been improved by the ptrmask). Can you start by adding support for llvm.ptrmask, and then generalizing the support to other pointer intrinsics?
@@ -138,6 +138,18 @@ static bool funcRequiresHostcallPtr(const Function &F) { | |||
F.hasFnAttribute(Attribute::SanitizeMemTag); | |||
} | |||
|
|||
static bool isAlignAndMakeBuffer(const AbstractAttribute *AA, | |||
const Instruction *I) { | |||
if (isa<AAAlign>(AA)) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Probably shouldn't need to ever identify AAs like this?
@@ -1381,7 +1399,7 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM, | |||
&AAAMDMaxNumWorkgroups::ID, &AAAMDWavesPerEU::ID, &AAAMDGPUNoAGPR::ID, | |||
&AACallEdges::ID, &AAPointerInfo::ID, &AAPotentialConstantValues::ID, | |||
&AAUnderlyingObjects::ID, &AAAddressSpace::ID, &AAIndirectCallInfo::ID, | |||
&AAInstanceInfo::ID}); | |||
&AAInstanceInfo::ID, &AAAlign::ID}); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Adding this to AMDGPUAttributor should be a separate patch, you can do this in the base attributor first
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
agreed. you can add support for some generic LLVM intrinsics and test it.
Good point, seconding "let's teach AAAlign about ptrmask" |
Hi @krzysz00 do you mean add alignment attribute on the call to intrinsic?? |
Hi @arsenm , to support llvm.ptrmask need to update AAAlign's ability. Currently the only thing we can do is to tell AAAlign that the alignment of the call is the same to some operand of the call. By my understanding it is little bit different with |
ptrmask shouldn't be any different than getelementptr: https://godbolt.org/z/nbe4x7qnd It requires propagating the source attribute, and then interpreting it through the instruction to reach the return |
SmallVector<AA::ValueAndContext> Values; | ||
const auto &AligmentCBs = A.getAlignmentCallback(getIRPosition()); | ||
|
||
if (!AligmentCBs.empty()) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
since lookup
will do a default construct, you don't need to check emptiness here.
for (const auto &CB : AligmentCBs) { | ||
CB(getIRPosition(), this, Values); | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
for (const auto &CB : AligmentCBs) { | |
CB(getIRPosition(), this, Values); | |
} | |
for (const auto &CB : AligmentCBs) | |
CB(getIRPosition(), this, Values); |
@@ -5500,7 +5504,34 @@ struct AAAlignCallSiteReturned final | |||
using Base = AACalleeToCallSite<AAAlign, AAAlignImpl>; | |||
AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) | |||
: Base(IRP, A) {} | |||
ChangeStatus updateImpl(Attributor &A) override { | |||
SmallVector<AA::ValueAndContext> Values; | |||
const auto &AligmentCBs = A.getAlignmentCallback(getIRPosition()); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
no auto
|
||
if (!Values.empty()) { | ||
StateType T; | ||
for (const auto &VAC : Values) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
no auto
This PR mainly add alignment attribute to AMDGPU backend for
make.buffer.rsrc
. It will do:make.buffer.rsrc
make.buffer.rsrc
For the following example:
The PR will do:
make.buffer.rsrc
, it requires the alignment of%fat.ptr
is 8 (required by load)make.buffer.rsrc
, it requires the alignment of%x
is 4%fat.ptr
) through (to%x
), so%x
also have the alignment of 8