diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl index 4b5232c0010aa..1c327b2fcc981 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl @@ -83,7 +83,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_flags_constant(global voi // CHECK-LABEL: @test_amdgcn_make_buffer_p0_nullptr( // CHECK-NEXT: entry: // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[NUM:%.*]] to i64 -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr null, i16 [[STRIDE:%.*]], i64 [[CONV]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call align 4294967296 ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr null, i16 [[STRIDE:%.*]], i64 [[CONV]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_p0_nullptr(short stride, int num, int flags) { @@ -93,7 +93,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_p0_nullptr(short stride, int num, // CHECK-LABEL: @test_amdgcn_make_buffer_p1_nullptr( // CHECK-NEXT: entry: // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[NUM:%.*]] to i64 -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) null, i16 [[STRIDE:%.*]], i64 [[CONV]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call align 4294967296 ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) null, i16 [[STRIDE:%.*]], i64 [[CONV]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_p1_nullptr(short stride, int num, int flags) { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp index 56ab040706a13..07f1b120dc4f2 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp @@ -1584,6 +1584,117 @@ AAAMDGPUClusterDims::createForPosition(const IRPosition &IRP, Attributor &A) { llvm_unreachable("AAAMDGPUClusterDims is only valid for function position"); } +struct AAAMDGPUMakeBufferRsrcAlign + : public IRAttribute< + Attribute::Alignment, + StateWrapper, + AbstractAttribute>, + AAAMDGPUMakeBufferRsrcAlign> { + using Base = IRAttribute< + Attribute::Alignment, + StateWrapper, + AbstractAttribute>, + AAAMDGPUMakeBufferRsrcAlign>; + + AAAMDGPUMakeBufferRsrcAlign(const IRPosition &IRP, Attributor &A) + : Base(IRP) {} + + void initialize(Attributor &A) override {} + + ChangeStatus updateImpl(Attributor &A) override { + Instruction *I = getIRPosition().getCtxI(); + const auto *AlignAA = A.getAAFor( + *this, IRPosition::value(*(I->getOperand(0))), DepClassTy::REQUIRED); + if (AlignAA) + return clampStateAndIndicateChange( + this->getState(), AlignAA->getAssumedAlign().value()); + + return indicatePessimisticFixpoint(); + } + + /// Create an abstract attribute view for the position \p IRP. + static AAAMDGPUMakeBufferRsrcAlign &createForPosition(const IRPosition &IRP, + Attributor &A) { + if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED) + if (Instruction *I = dyn_cast(&IRP.getAssociatedValue())) + if (const IntrinsicInst *II = dyn_cast(I)) + if (II->getIntrinsicID() == Intrinsic::amdgcn_make_buffer_rsrc) + return *new (A.Allocator) AAAMDGPUMakeBufferRsrcAlign(IRP, A); + llvm_unreachable("AAAMDGPUMakeBufferRsrcAlign is only valid for call site " + "return position on make.buffer.rsrc intrinsic"); + } + + // Manifest users + ChangeStatus manifest(Attributor &A) override { + ChangeStatus Changed = ChangeStatus::UNCHANGED; + + // Check for users that allow alignment annotations. + Value &AssociatedValue = getAssociatedValue(); + if (isa(AssociatedValue)) + return ChangeStatus::UNCHANGED; + + for (const Use &U : AssociatedValue.uses()) { + if (auto *SI = dyn_cast(U.getUser())) { + if (SI->getPointerOperand() == &AssociatedValue) { + if (SI->getAlign() > getAssumedAlign()) { + SI->setAlignment(getAssumedAlign()); + Changed = ChangeStatus::CHANGED; + } + } + } else if (auto *LI = dyn_cast(U.getUser())) { + if (LI->getPointerOperand() == &AssociatedValue) { + if (LI->getAlign() > getAssumedAlign()) { + LI->setAlignment(getAssumedAlign()); + Changed = ChangeStatus::CHANGED; + } + } + } else if (auto *RMW = dyn_cast(U.getUser())) { + if (RMW->getAlign() > getAssumedAlign()) { + RMW->setAlignment(getAssumedAlign()); + Changed = ChangeStatus::CHANGED; + } + } else if (auto *CAS = dyn_cast(U.getUser())) { + if (CAS->getAlign() > getAssumedAlign()) { + CAS->setAlignment(getAssumedAlign()); + Changed = ChangeStatus::CHANGED; + } + } + } + + // Manifest intrinsic it self + Changed |= Base::manifest(A); + + return Changed; + } + + StringRef getName() const override { return "AAAMDGPUMakeBufferRsrcAlign"; } + + const std::string getAsStr(Attributor *) const override { + std::string Buffer = "AAAMDGPUMakeBufferRsrcAlign["; + raw_string_ostream OS(Buffer); + OS << getState().getKnown() << ',' << getState().getAssumed() << ']'; + return OS.str(); + } + + const char *getIdAddr() const override { return &ID; } + + void trackStatistics() const override {} + + Align getAssumedAlign() const { return Align(getAssumed()); } + + void getDeducedAttributes(Attributor &A, LLVMContext &Ctx, + SmallVectorImpl &Attrs) const override { + if (getAssumedAlign() > 1) + Attrs.emplace_back( + Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); + } + + /// Unique ID (due to the unique address) + static const char ID; +}; + +const char AAAMDGPUMakeBufferRsrcAlign::ID = 0; + static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM, AMDGPUAttributorOptions Options, ThinOrFullLTOPhase LTOPhase) { @@ -1603,7 +1714,8 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM, &AAAMDGPUMinAGPRAlloc::ID, &AACallEdges::ID, &AAPointerInfo::ID, &AAPotentialConstantValues::ID, &AAUnderlyingObjects::ID, &AANoAliasAddrSpace::ID, &AAAddressSpace::ID, &AAIndirectCallInfo::ID, - &AAAMDGPUClusterDims::ID}); + &AAAMDGPUClusterDims::ID, &AAAlign::ID, + &AAAMDGPUMakeBufferRsrcAlign::ID}); AttributorConfig AC(CGUpdater); AC.IsClosedWorldModule = Options.IsClosedWorld; @@ -1657,6 +1769,10 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM, Ptr = RMW->getPointerOperand(); else if (auto *CmpX = dyn_cast(&I)) Ptr = CmpX->getPointerOperand(); + else if (const IntrinsicInst *II = dyn_cast(&I)) + if (II->getIntrinsicID() == Intrinsic::amdgcn_make_buffer_rsrc) + A.getOrCreateAAFor( + IRPosition::value(*II)); if (Ptr) { A.getOrCreateAAFor(IRPosition::value(*Ptr)); diff --git a/llvm/test/CodeGen/AMDGPU/attr-amdgpu-align.ll b/llvm/test/CodeGen/AMDGPU/attr-amdgpu-align.ll new file mode 100644 index 0000000000000..8d2bfab09460b --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/attr-amdgpu-align.ll @@ -0,0 +1,26 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-attributor %s -o - | FileCheck %s + +define float @load_gt_base(ptr align 4 %p) { +; CHECK-LABEL: define float @load_gt_base( +; CHECK-SAME: ptr align 4 [[P:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[PTR:%.*]] = call align 4 ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr align 4 [[P]], i16 0, i64 0, i32 0) +; CHECK-NEXT: [[LOADED:%.*]] = load float, ptr addrspace(7) [[PTR]], align 4 +; CHECK-NEXT: ret float [[LOADED]] +; + %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr %p, i16 0, i64 0, i32 0) + %loaded = load float, ptr addrspace(7) %ptr, align 8 + ret float %loaded +} + +define float @load_lt_base(ptr align 8 %p) { +; CHECK-LABEL: define float @load_lt_base( +; CHECK-SAME: ptr align 8 [[P:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[PTR:%.*]] = call align 8 ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr align 8 [[P]], i16 0, i64 0, i32 0) +; CHECK-NEXT: [[LOADED:%.*]] = load float, ptr addrspace(7) [[PTR]], align 4 +; CHECK-NEXT: ret float [[LOADED]] +; + %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr %p, i16 0, i64 0, i32 0) + %loaded = load float, ptr addrspace(7) %ptr, align 4 + ret float %loaded +}