Skip to content

Commit db52ccb

Browse files
committed
[AMDGPU][Attributor] Infer inreg attribute in AMDGPUAttributor
This patch introduces `AAAMDGPUUniformArgument` that can infer `inreg` function argument attribute. The idea is, for a function argument, if the corresponding call site arguments are always uniform, we can mark it as `inreg` thus pass it via SGPR. In addition, this AA is also able to propagate the inreg attribute if feasible.
1 parent 5bcbcf8 commit db52ccb

File tree

2 files changed

+193
-1
lines changed

2 files changed

+193
-1
lines changed

llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp

Lines changed: 119 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,11 @@
1313
#include "AMDGPU.h"
1414
#include "GCNSubtarget.h"
1515
#include "Utils/AMDGPUBaseInfo.h"
16+
<<<<<<< HEAD
17+
=======
18+
#include "llvm/Analysis/TargetTransformInfo.h"
19+
#include "llvm/CodeGen/TargetPassConfig.h"
20+
>>>>>>> 844ed4358374 ([AMDGPU][Attributor] Infer inreg attribute in `AMDGPUAttributor`)
1621
#include "llvm/IR/IntrinsicsAMDGPU.h"
1722
#include "llvm/IR/IntrinsicsR600.h"
1823
#include "llvm/Target/TargetMachine.h"
@@ -1296,6 +1301,114 @@ struct AAAMDGPUNoAGPR
12961301

12971302
const char AAAMDGPUNoAGPR::ID = 0;
12981303

1304+
struct AAAMDGPUUniform : public StateWrapper<BooleanState, AbstractAttribute> {
1305+
using Base = StateWrapper<BooleanState, AbstractAttribute>;
1306+
AAAMDGPUUniform(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
1307+
1308+
/// Create an abstract attribute view for the position \p IRP.
1309+
static AAAMDGPUUniform &createForPosition(const IRPosition &IRP,
1310+
Attributor &A);
1311+
1312+
/// See AbstractAttribute::getName()
1313+
StringRef getName() const override { return "AAAMDGPUUniform"; }
1314+
1315+
const std::string getAsStr(Attributor *A) const override {
1316+
return getAssumed() ? "uniform" : "divergent";
1317+
}
1318+
1319+
void trackStatistics() const override {}
1320+
1321+
/// See AbstractAttribute::getIdAddr()
1322+
const char *getIdAddr() const override { return &ID; }
1323+
1324+
/// This function should return true if the type of the \p AA is
1325+
/// AAAMDGPUUniform
1326+
static bool classof(const AbstractAttribute *AA) {
1327+
return (AA->getIdAddr() == &ID);
1328+
}
1329+
1330+
/// Unique ID (due to the unique address)
1331+
static const char ID;
1332+
};
1333+
1334+
const char AAAMDGPUUniform::ID = 0;
1335+
1336+
/// This AA is to infer the inreg attribute for a function argument.
1337+
struct AAAMDGPUUniformArgument : public AAAMDGPUUniform {
1338+
AAAMDGPUUniformArgument(const IRPosition &IRP, Attributor &A)
1339+
: AAAMDGPUUniform(IRP, A) {}
1340+
1341+
void initialize(Attributor &A) override {
1342+
Argument *Arg = getAssociatedArgument();
1343+
CallingConv::ID CC = Arg->getParent()->getCallingConv();
1344+
if (Arg->hasAttribute(Attribute::InReg)) {
1345+
indicateOptimisticFixpoint();
1346+
return;
1347+
}
1348+
1349+
if (AMDGPU::isEntryFunctionCC(CC)) {
1350+
// We only use isArgPassedInSGPR on kernel entry function argument, so
1351+
// even if we will use SPGR for non-uniform i1 argument passing, it will
1352+
// not affect this.
1353+
if (AMDGPU::isArgPassedInSGPR(Arg))
1354+
indicateOptimisticFixpoint();
1355+
else
1356+
indicatePessimisticFixpoint();
1357+
}
1358+
}
1359+
1360+
ChangeStatus updateImpl(Attributor &A) override {
1361+
unsigned ArgNo = getAssociatedArgument()->getArgNo();
1362+
TargetMachine &TM =
1363+
static_cast<AMDGPUInformationCache &>(A.getInfoCache()).TM;
1364+
1365+
auto isUniform = [&](AbstractCallSite ACS) -> bool {
1366+
CallBase *CB = ACS.getInstruction();
1367+
Value *V = CB->getArgOperand(ArgNo);
1368+
if (auto *Arg = dyn_cast<Argument>(V)) {
1369+
auto *AA = A.getOrCreateAAFor<AAAMDGPUUniform>(
1370+
IRPosition::argument(*Arg), this, DepClassTy::REQUIRED);
1371+
return AA && AA->isValidState();
1372+
}
1373+
TargetTransformInfo TTI = TM.getTargetTransformInfo(*CB->getFunction());
1374+
return TTI.isAlwaysUniform(V);
1375+
};
1376+
1377+
bool UsedAssumedInformation = true;
1378+
if (!A.checkForAllCallSites(isUniform, *this, /*RequireAllCallSites=*/true,
1379+
UsedAssumedInformation))
1380+
return indicatePessimisticFixpoint();
1381+
1382+
if (!UsedAssumedInformation)
1383+
return indicateOptimisticFixpoint();
1384+
1385+
return ChangeStatus::UNCHANGED;
1386+
}
1387+
1388+
ChangeStatus manifest(Attributor &A) override {
1389+
Argument *Arg = getAssociatedArgument();
1390+
// If the argument already has inreg attribute, we will not do anything
1391+
// about it.
1392+
if (Arg->hasAttribute(Attribute::InReg))
1393+
return ChangeStatus::UNCHANGED;
1394+
if (AMDGPU::isEntryFunctionCC(Arg->getParent()->getCallingConv()))
1395+
return ChangeStatus::UNCHANGED;
1396+
LLVMContext &Ctx = Arg->getContext();
1397+
return A.manifestAttrs(getIRPosition(),
1398+
{Attribute::get(Ctx, Attribute::InReg)});
1399+
}
1400+
};
1401+
1402+
AAAMDGPUUniform &AAAMDGPUUniform::createForPosition(const IRPosition &IRP,
1403+
Attributor &A) {
1404+
switch (IRP.getPositionKind()) {
1405+
case IRPosition::IRP_ARGUMENT:
1406+
return *new (A.Allocator) AAAMDGPUUniformArgument(IRP, A);
1407+
default:
1408+
llvm_unreachable("not a valid position for AAAMDGPUUniform");
1409+
}
1410+
}
1411+
12991412
/// Performs the final check and updates the 'amdgpu-waves-per-eu' attribute
13001413
/// based on the finalized 'amdgpu-flat-work-group-size' attribute.
13011414
/// Both attributes start with narrow ranges that expand during iteration.
@@ -1382,7 +1495,7 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
13821495
&AAAMDMaxNumWorkgroups::ID, &AAAMDWavesPerEU::ID, &AAAMDGPUNoAGPR::ID,
13831496
&AACallEdges::ID, &AAPointerInfo::ID, &AAPotentialConstantValues::ID,
13841497
&AAUnderlyingObjects::ID, &AANoAliasAddrSpace::ID, &AAAddressSpace::ID,
1385-
&AAIndirectCallInfo::ID});
1498+
&AAIndirectCallInfo::ID, &AAAMDGPUUniform::ID});
13861499

13871500
AttributorConfig AC(CGUpdater);
13881501
AC.IsClosedWorldModule = Options.IsClosedWorld;
@@ -1435,6 +1548,11 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
14351548
A.getOrCreateAAFor<AAAddressSpace>(IRPosition::value(*Ptr));
14361549
A.getOrCreateAAFor<AANoAliasAddrSpace>(IRPosition::value(*Ptr));
14371550
}
1551+
1552+
if (!AMDGPU::isEntryFunctionCC(F->getCallingConv())) {
1553+
for (auto &Arg : F->args())
1554+
A.getOrCreateAAFor<AAAMDGPUUniform>(IRPosition::argument(Arg));
1555+
}
14381556
}
14391557
}
14401558

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2+
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-attributor %s -o - | FileCheck %s
3+
4+
@g1 = protected addrspace(1) externally_initialized global i32 0, align 4
5+
@g2 = protected addrspace(1) externally_initialized global i32 0, align 4
6+
@g3 = protected addrspace(1) externally_initialized global i32 0, align 4
7+
@g4 = protected addrspace(1) externally_initialized global i32 0, align 4
8+
9+
define internal void @callee_with_always_uniform_argument(ptr addrspace(1) %x, i32 %y) {
10+
; CHECK-LABEL: define internal void @callee_with_always_uniform_argument(
11+
; CHECK-SAME: ptr addrspace(1) inreg [[X:%.*]], i32 inreg [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
12+
; CHECK-NEXT: [[ENTRY:.*:]]
13+
; CHECK-NEXT: [[X_VAL:%.*]] = load i32, ptr addrspace(1) [[X]], align 4
14+
; CHECK-NEXT: store i32 [[X_VAL]], ptr addrspace(1) @g3, align 4
15+
; CHECK-NEXT: store i32 [[Y]], ptr addrspace(1) @g4, align 4
16+
; CHECK-NEXT: ret void
17+
;
18+
entry:
19+
%x.val = load i32, ptr addrspace(1) %x, align 4
20+
store i32 %x.val, ptr addrspace(1) @g3, align 4
21+
store i32 %y, ptr addrspace(1) @g4, align 4
22+
ret void
23+
}
24+
25+
define amdgpu_kernel void @kernel_with_readfirstlane(ptr addrspace(1) %p, i32 %x) {
26+
; CHECK-LABEL: define amdgpu_kernel void @kernel_with_readfirstlane(
27+
; CHECK-SAME: ptr addrspace(1) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
28+
; CHECK-NEXT: [[ENTRY:.*:]]
29+
; CHECK-NEXT: [[P0:%.*]] = call ptr addrspace(1) @llvm.amdgcn.readfirstlane.p1(ptr addrspace(1) [[P]])
30+
; CHECK-NEXT: call void @callee_with_always_uniform_argument(ptr addrspace(1) [[P0]], i32 [[X]])
31+
; CHECK-NEXT: ret void
32+
;
33+
entry:
34+
%p0 = call ptr addrspace(1) @llvm.amdgcn.readfirstlane.p1(ptr addrspace(1) %p)
35+
call void @callee_with_always_uniform_argument(ptr addrspace(1) %p0, i32 %x)
36+
ret void
37+
}
38+
39+
define internal void @callee_without_always_uniform_argument(ptr addrspace(1) %x, i32 %y) {
40+
; CHECK-LABEL: define internal void @callee_without_always_uniform_argument(
41+
; CHECK-SAME: ptr addrspace(1) [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
42+
; CHECK-NEXT: [[ENTRY:.*:]]
43+
; CHECK-NEXT: [[X_VAL:%.*]] = load i32, ptr addrspace(1) [[X]], align 4
44+
; CHECK-NEXT: store i32 [[X_VAL]], ptr addrspace(1) @g3, align 4
45+
; CHECK-NEXT: store i32 [[Y]], ptr addrspace(1) @g4, align 4
46+
; CHECK-NEXT: ret void
47+
;
48+
entry:
49+
%x.val = load i32, ptr addrspace(1) %x, align 4
50+
store i32 %x.val, ptr addrspace(1) @g3, align 4
51+
store i32 %y, ptr addrspace(1) @g4, align 4
52+
ret void
53+
}
54+
55+
define amdgpu_kernel void @kernel_with_divergent_callsite_argument(ptr addrspace(1) %p, i32 %x) {
56+
; CHECK-LABEL: define amdgpu_kernel void @kernel_with_divergent_callsite_argument(
57+
; CHECK-SAME: ptr addrspace(1) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
58+
; CHECK-NEXT: [[ENTRY:.*:]]
59+
; CHECK-NEXT: [[ID_X:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
60+
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr addrspace(1) [[P]], i32 [[ID_X]]
61+
; CHECK-NEXT: [[D:%.*]] = load i32, ptr addrspace(1) [[GEP]], align 4
62+
; CHECK-NEXT: call void @callee_without_always_uniform_argument(ptr addrspace(1) [[GEP]], i32 [[D]])
63+
; CHECK-NEXT: ret void
64+
;
65+
entry:
66+
%id.x = call i32 @llvm.amdgcn.workitem.id.x()
67+
%gep = getelementptr i32, ptr addrspace(1) %p, i32 %id.x
68+
%d = load i32, ptr addrspace(1) %gep
69+
call void @callee_without_always_uniform_argument(ptr addrspace(1) %gep, i32 %d)
70+
ret void
71+
}
72+
73+
declare ptr addrspace(1) @llvm.amdgcn.readfirstlane.p1(ptr addrspace(1))
74+
declare noundef i32 @llvm.amdgcn.workitem.id.x()

0 commit comments

Comments
 (0)