From f4f6b93e4cadf587565163102122ae1d58fbe67d Mon Sep 17 00:00:00 2001 From: Shilei Tian Date: Sun, 28 Jul 2024 18:48:54 -0400 Subject: [PATCH 1/3] [Attributor][AMD] Enable AAIndirectCallInfo for AMDAttributorPass --- llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp | 2 +- .../CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll | 14 +++++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp index 9d3c9e1e2ef9f..51968063e8919 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp @@ -1038,7 +1038,7 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM) { &AAPotentialValues::ID, &AAAMDFlatWorkGroupSize::ID, &AAAMDWavesPerEU::ID, &AAAMDGPUNoAGPR::ID, &AACallEdges::ID, &AAPointerInfo::ID, &AAPotentialConstantValues::ID, - &AAUnderlyingObjects::ID}); + &AAUnderlyingObjects::ID, &AAIndirectCallInfo::ID}); AttributorConfig AC(CGUpdater); AC.Allowed = &Allowed; diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll index 33b1cc65dc569..e5d440b96349f 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll @@ -231,7 +231,19 @@ define amdgpu_kernel void @indirect_calls_none_agpr(i1 %cond) { ; CHECK-LABEL: define amdgpu_kernel void @indirect_calls_none_agpr( ; CHECK-SAME: i1 [[COND:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[FPTR:%.*]] = select i1 [[COND]], ptr @empty, ptr @also_empty -; CHECK-NEXT: call void [[FPTR]]() +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq ptr [[FPTR]], @also_empty +; CHECK-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP3:%.*]] +; CHECK: 2: +; CHECK-NEXT: call void @also_empty() +; CHECK-NEXT: br label [[TMP6:%.*]] +; CHECK: 3: +; CHECK-NEXT: br i1 true, label [[TMP4:%.*]], label [[TMP5:%.*]] +; CHECK: 4: +; CHECK-NEXT: call void @empty() +; CHECK-NEXT: br label [[TMP6]] +; CHECK: 5: +; CHECK-NEXT: unreachable +; CHECK: 6: ; CHECK-NEXT: ret void ; %fptr = select i1 %cond, ptr @empty, ptr @also_empty From ed46483b388d1a8803b93116beda75108a3bf478 Mon Sep 17 00:00:00 2001 From: Shilei Tian Date: Sun, 28 Jul 2024 15:28:09 -0400 Subject: [PATCH 2/3] [LLVM][PassBuilder] Extend the function signature of callback for optimizer pipeline extension point These callbacks can be invoked in multiple places when building an optimization pipeline, both in compile time and link time. However, there is no indicator on what pipeline it is currently building. In this patch, an extra argument is added to indicate its (Thin)LTO stage such that the callback can check it if needed. There is no test expected from this, and the benefit of this change will be demonstrated in https://github.com/llvm/llvm-project/pull/66488. --- clang/lib/CodeGen/BackendUtil.cpp | 19 ++++++++++--------- llvm/include/llvm/Passes/PassBuilder.h | 10 +++++++--- llvm/lib/Passes/PassBuilderPipelines.cpp | 14 +++++++++----- llvm/lib/Target/AMDGPU/AMDGPU.h | 7 ++++++- llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp | 11 +++++++---- .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 15 +++++++++------ llvm/tools/opt/NewPMDriver.cpp | 2 +- 7 files changed, 49 insertions(+), 29 deletions(-) diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp index e765bbf637a66..64f0020a170aa 100644 --- a/clang/lib/CodeGen/BackendUtil.cpp +++ b/clang/lib/CodeGen/BackendUtil.cpp @@ -643,7 +643,7 @@ static void addKCFIPass(const Triple &TargetTriple, const LangOptions &LangOpts, // Ensure we lower KCFI operand bundles with -O0. PB.registerOptimizerLastEPCallback( - [&](ModulePassManager &MPM, OptimizationLevel Level) { + [&](ModulePassManager &MPM, OptimizationLevel Level, ThinOrFullLTOPhase) { if (Level == OptimizationLevel::O0 && LangOpts.Sanitize.has(SanitizerKind::KCFI)) MPM.addPass(createModuleToFunctionPassAdaptor(KCFIPass())); @@ -662,8 +662,8 @@ static void addKCFIPass(const Triple &TargetTriple, const LangOptions &LangOpts, static void addSanitizers(const Triple &TargetTriple, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, PassBuilder &PB) { - auto SanitizersCallback = [&](ModulePassManager &MPM, - OptimizationLevel Level) { + auto SanitizersCallback = [&](ModulePassManager &MPM, OptimizationLevel Level, + ThinOrFullLTOPhase) { if (CodeGenOpts.hasSanitizeCoverage()) { auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts); MPM.addPass(SanitizerCoveragePass( @@ -749,7 +749,7 @@ static void addSanitizers(const Triple &TargetTriple, PB.registerOptimizerEarlyEPCallback( [SanitizersCallback](ModulePassManager &MPM, OptimizationLevel Level) { ModulePassManager NewMPM; - SanitizersCallback(NewMPM, Level); + SanitizersCallback(NewMPM, Level, ThinOrFullLTOPhase::None); if (!NewMPM.isEmpty()) { // Sanitizers can abandon. NewMPM.addPass(RequireAnalysisPass()); @@ -1018,11 +1018,12 @@ void EmitAssemblyHelper::RunOptimizationPipeline( // TODO: Consider passing the MemoryProfileOutput to the pass builder via // the PGOOptions, and set this up there. if (!CodeGenOpts.MemoryProfileOutput.empty()) { - PB.registerOptimizerLastEPCallback( - [](ModulePassManager &MPM, OptimizationLevel Level) { - MPM.addPass(createModuleToFunctionPassAdaptor(MemProfilerPass())); - MPM.addPass(ModuleMemProfilerPass()); - }); + PB.registerOptimizerLastEPCallback([](ModulePassManager &MPM, + OptimizationLevel Level, + ThinOrFullLTOPhase) { + MPM.addPass(createModuleToFunctionPassAdaptor(MemProfilerPass())); + MPM.addPass(ModuleMemProfilerPass()); + }); } if (CodeGenOpts.FatLTO) { diff --git a/llvm/include/llvm/Passes/PassBuilder.h b/llvm/include/llvm/Passes/PassBuilder.h index 474a19531ff5d..4c2763404ff05 100644 --- a/llvm/include/llvm/Passes/PassBuilder.h +++ b/llvm/include/llvm/Passes/PassBuilder.h @@ -497,7 +497,8 @@ class PassBuilder { /// This extension point allows adding optimizations at the very end of the /// function optimization pipeline. void registerOptimizerLastEPCallback( - const std::function &C) { + const std::function &C) { OptimizerLastEPCallbacks.push_back(C); } @@ -630,7 +631,8 @@ class PassBuilder { void invokeOptimizerEarlyEPCallbacks(ModulePassManager &MPM, OptimizationLevel Level); void invokeOptimizerLastEPCallbacks(ModulePassManager &MPM, - OptimizationLevel Level); + OptimizationLevel Level, + ThinOrFullLTOPhase Phase); void invokeFullLinkTimeOptimizationEarlyEPCallbacks(ModulePassManager &MPM, OptimizationLevel Level); void invokeFullLinkTimeOptimizationLastEPCallbacks(ModulePassManager &MPM, @@ -755,7 +757,9 @@ class PassBuilder { // Module callbacks SmallVector, 2> OptimizerEarlyEPCallbacks; - SmallVector, 2> + SmallVector, + 2> OptimizerLastEPCallbacks; SmallVector, 2> FullLinkTimeOptimizationEarlyEPCallbacks; diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp index 757b20dcd6693..56fa105d89390 100644 --- a/llvm/lib/Passes/PassBuilderPipelines.cpp +++ b/llvm/lib/Passes/PassBuilderPipelines.cpp @@ -365,9 +365,10 @@ void PassBuilder::invokeOptimizerEarlyEPCallbacks(ModulePassManager &MPM, C(MPM, Level); } void PassBuilder::invokeOptimizerLastEPCallbacks(ModulePassManager &MPM, - OptimizationLevel Level) { + OptimizationLevel Level, + ThinOrFullLTOPhase Phase) { for (auto &C : OptimizerLastEPCallbacks) - C(MPM, Level); + C(MPM, Level, Phase); } void PassBuilder::invokeFullLinkTimeOptimizationEarlyEPCallbacks( ModulePassManager &MPM, OptimizationLevel Level) { @@ -1524,7 +1525,7 @@ PassBuilder::buildModuleOptimizationPipeline(OptimizationLevel Level, MPM.addPass(createModuleToFunctionPassAdaptor(std::move(OptimizePM), PTO.EagerlyInvalidateAnalyses)); - invokeOptimizerLastEPCallbacks(MPM, Level); + invokeOptimizerLastEPCallbacks(MPM, Level, LTOPhase); // Split out cold code. Splitting is done late to avoid hiding context from // other optimizations and inadvertently regressing performance. The tradeoff @@ -1671,7 +1672,8 @@ PassBuilder::buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level) { // optimization is going to be done in PostLink stage, but clang can't add // callbacks there in case of in-process ThinLTO called by linker. invokeOptimizerEarlyEPCallbacks(MPM, Level); - invokeOptimizerLastEPCallbacks(MPM, Level); + invokeOptimizerLastEPCallbacks(MPM, Level, + ThinOrFullLTOPhase::ThinLTOPreLink); // Emit annotation remarks. addAnnotationRemarksPass(MPM); @@ -2159,7 +2161,9 @@ ModulePassManager PassBuilder::buildO0DefaultPipeline(OptimizationLevel Level, CoroPM.addPass(GlobalDCEPass()); MPM.addPass(CoroConditionalWrapper(std::move(CoroPM))); - invokeOptimizerLastEPCallbacks(MPM, Level); + invokeOptimizerLastEPCallbacks(MPM, Level, + LTOPreLink ? ThinOrFullLTOPhase::ThinLTOPreLink + : ThinOrFullLTOPhase::None); if (LTOPreLink) addRequiredLTOPreLinkPasses(MPM); diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h index 46cc5f349555a..50aef36724f70 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPU.h +++ b/llvm/lib/Target/AMDGPU/AMDGPU.h @@ -287,8 +287,13 @@ class AMDGPUAttributorPass : public PassInfoMixin { private: TargetMachine &TM; + /// Asserts whether we can assume whole program visibility. + bool HasWholeProgramVisibility = false; + public: - AMDGPUAttributorPass(TargetMachine &TM) : TM(TM){}; + AMDGPUAttributorPass(TargetMachine &TM, + bool HasWholeProgramVisibility = false) + : TM(TM), HasWholeProgramVisibility(HasWholeProgramVisibility) {}; PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM); }; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp index 51968063e8919..ab98da31b050f 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp @@ -1023,7 +1023,8 @@ static void addPreloadKernArgHint(Function &F, TargetMachine &TM) { } } -static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM) { +static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM, + bool HasWholeProgramVisibility) { SetVector Functions; for (Function &F : M) { if (!F.isIntrinsic()) @@ -1041,6 +1042,7 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM) { &AAUnderlyingObjects::ID, &AAIndirectCallInfo::ID}); AttributorConfig AC(CGUpdater); + AC.IsClosedWorldModule = HasWholeProgramVisibility; AC.Allowed = &Allowed; AC.IsModulePass = true; AC.DefaultInitializeLiveInternals = false; @@ -1086,7 +1088,7 @@ class AMDGPUAttributorLegacy : public ModulePass { bool runOnModule(Module &M) override { AnalysisGetter AG(this); - return runImpl(M, AG, *TM); + return runImpl(M, AG, *TM, /*HasWholeProgramVisibility=*/false); } void getAnalysisUsage(AnalysisUsage &AU) const override { @@ -1107,8 +1109,9 @@ PreservedAnalyses llvm::AMDGPUAttributorPass::run(Module &M, AnalysisGetter AG(FAM); // TODO: Probably preserves CFG - return runImpl(M, AG, TM) ? PreservedAnalyses::none() - : PreservedAnalyses::all(); + return runImpl(M, AG, TM, HasWholeProgramVisibility) + ? PreservedAnalyses::none() + : PreservedAnalyses::all(); } char AMDGPUAttributorLegacy::ID = 0; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index c8fb68d1c0b0c..50cc2d871d4ec 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -735,12 +735,15 @@ void AMDGPUTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { }); // FIXME: Why is AMDGPUAttributor not in CGSCC? - PB.registerOptimizerLastEPCallback( - [this](ModulePassManager &MPM, OptimizationLevel Level) { - if (Level != OptimizationLevel::O0) { - MPM.addPass(AMDGPUAttributorPass(*this)); - } - }); + PB.registerOptimizerLastEPCallback([this](ModulePassManager &MPM, + OptimizationLevel Level, + ThinOrFullLTOPhase Phase) { + if (Level != OptimizationLevel::O0) { + MPM.addPass(AMDGPUAttributorPass( + *this, Phase == ThinOrFullLTOPhase::FullLTOPostLink || + Phase == ThinOrFullLTOPhase::ThinLTOPostLink)); + } + }); PB.registerFullLinkTimeOptimizationLastEPCallback( [this](ModulePassManager &PM, OptimizationLevel Level) { diff --git a/llvm/tools/opt/NewPMDriver.cpp b/llvm/tools/opt/NewPMDriver.cpp index 374698083763b..522a8c06d83c0 100644 --- a/llvm/tools/opt/NewPMDriver.cpp +++ b/llvm/tools/opt/NewPMDriver.cpp @@ -310,7 +310,7 @@ static void registerEPCallbacks(PassBuilder &PB) { }); if (tryParsePipelineText(PB, OptimizerLastEPPipeline)) PB.registerOptimizerLastEPCallback( - [&PB](ModulePassManager &PM, OptimizationLevel) { + [&PB](ModulePassManager &PM, OptimizationLevel, ThinOrFullLTOPhase) { ExitOnError Err("Unable to parse OptimizerLastEP pipeline: "); Err(PB.parsePassPipeline(PM, OptimizerLastEPPipeline)); }); From 0e498ef8a9204d4766a5e3bf60e7363d80f9836b Mon Sep 17 00:00:00 2001 From: Shilei Tian Date: Sun, 28 Jul 2024 19:24:31 -0400 Subject: [PATCH 3/3] [Attributor][AMDGPU] Improve the handling of indirect calls --- llvm/include/llvm/Transforms/IPO/Attributor.h | 9 +++++---- llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp | 18 ++++++++++++++++++ llvm/lib/Transforms/IPO/Attributor.cpp | 2 +- .../Transforms/IPO/AttributorAttributes.cpp | 3 ++- .../AMDGPU/amdgpu-attributor-no-agpr.ll | 16 +++------------- 5 files changed, 29 insertions(+), 19 deletions(-) diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h index 34557238ecb23..596ee39c35a37 100644 --- a/llvm/include/llvm/Transforms/IPO/Attributor.h +++ b/llvm/include/llvm/Transforms/IPO/Attributor.h @@ -1448,7 +1448,7 @@ struct AttributorConfig { /// Callback function to determine if an indirect call targets should be made /// direct call targets (with an if-cascade). std::function + Function &AssummedCallee, bool IsSingleton)> IndirectCalleeSpecializationCallback = nullptr; /// Helper to update an underlying call graph and to delete functions. @@ -1718,10 +1718,11 @@ struct Attributor { /// Return true if we should specialize the call site \b CB for the potential /// callee \p Fn. bool shouldSpecializeCallSiteForCallee(const AbstractAttribute &AA, - CallBase &CB, Function &Callee) { + CallBase &CB, Function &Callee, + bool IsSingleton) { return Configuration.IndirectCalleeSpecializationCallback - ? Configuration.IndirectCalleeSpecializationCallback(*this, AA, - CB, Callee) + ? Configuration.IndirectCalleeSpecializationCallback( + *this, AA, CB, Callee, IsSingleton) : true; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp index ab98da31b050f..b8ab11a7b420b 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp @@ -14,6 +14,7 @@ #include "GCNSubtarget.h" #include "Utils/AMDGPUBaseInfo.h" #include "llvm/Analysis/CycleAnalysis.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/CodeGen/TargetPassConfig.h" #include "llvm/IR/IntrinsicsAMDGPU.h" #include "llvm/IR/IntrinsicsR600.h" @@ -1041,11 +1042,28 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM, &AAPointerInfo::ID, &AAPotentialConstantValues::ID, &AAUnderlyingObjects::ID, &AAIndirectCallInfo::ID}); + /// Helper to decide if we should specialize the indirect \p CB for \p Callee. + /// \p IsSingleton indicates whether the \p Callee is the only assumed callee. + auto IndirectCalleeSpecializationCallback = + [&](Attributor &A, const AbstractAttribute &AA, CallBase &CB, + Function &Callee, bool IsSingleton) { + if (AMDGPU::isEntryFunctionCC(Callee.getCallingConv())) + return false; + // Singleton functions should be specialized. + if (IsSingleton) + return true; + // Otherwise specialize uniform values. + const auto &TTI = TM.getTargetTransformInfo(*CB.getCaller()); + return TTI.isAlwaysUniform(CB.getCalledOperand()); + }; + AttributorConfig AC(CGUpdater); AC.IsClosedWorldModule = HasWholeProgramVisibility; AC.Allowed = &Allowed; AC.IsModulePass = true; AC.DefaultInitializeLiveInternals = false; + AC.IndirectCalleeSpecializationCallback = + IndirectCalleeSpecializationCallback; AC.IPOAmendableCB = [](const Function &F) { return F.getCallingConv() == CallingConv::AMDGPU_KERNEL; }; diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp index 910c0aeacc42e..879a26bcf328d 100644 --- a/llvm/lib/Transforms/IPO/Attributor.cpp +++ b/llvm/lib/Transforms/IPO/Attributor.cpp @@ -3836,7 +3836,7 @@ static bool runAttributorOnFunctions(InformationCache &InfoCache, if (MaxSpecializationPerCB.getNumOccurrences()) { AC.IndirectCalleeSpecializationCallback = [&](Attributor &, const AbstractAttribute &AA, CallBase &CB, - Function &Callee) { + Function &Callee, bool IsSingleton) { if (MaxSpecializationPerCB == 0) return false; auto &Set = IndirectCalleeTrackingMap[&CB]; diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp index 2816a85743faa..3f02ea1cbd6cb 100644 --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -12347,7 +12347,8 @@ struct AAIndirectCallInfoCallSite : public AAIndirectCallInfo { SmallVector SkippedAssumedCallees; SmallVector> NewCalls; for (Function *NewCallee : AssumedCallees) { - if (!A.shouldSpecializeCallSiteForCallee(*this, *CB, *NewCallee)) { + if (!A.shouldSpecializeCallSiteForCallee(*this, *CB, *NewCallee, + AssumedCallees.size() == 1)) { SkippedAssumedCallees.push_back(NewCallee); SpecializedForAllCallees = false; continue; diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll index e5d440b96349f..d89dae1933365 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll @@ -231,19 +231,7 @@ define amdgpu_kernel void @indirect_calls_none_agpr(i1 %cond) { ; CHECK-LABEL: define amdgpu_kernel void @indirect_calls_none_agpr( ; CHECK-SAME: i1 [[COND:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[FPTR:%.*]] = select i1 [[COND]], ptr @empty, ptr @also_empty -; CHECK-NEXT: [[TMP1:%.*]] = icmp eq ptr [[FPTR]], @also_empty -; CHECK-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP3:%.*]] -; CHECK: 2: -; CHECK-NEXT: call void @also_empty() -; CHECK-NEXT: br label [[TMP6:%.*]] -; CHECK: 3: -; CHECK-NEXT: br i1 true, label [[TMP4:%.*]], label [[TMP5:%.*]] -; CHECK: 4: -; CHECK-NEXT: call void @empty() -; CHECK-NEXT: br label [[TMP6]] -; CHECK: 5: -; CHECK-NEXT: unreachable -; CHECK: 6: +; CHECK-NEXT: call void [[FPTR]](), !callees [[META0:![0-9]+]] ; CHECK-NEXT: ret void ; %fptr = select i1 %cond, ptr @empty, ptr @also_empty @@ -265,3 +253,5 @@ attributes #0 = { "amdgpu-no-agpr" } ; CHECK: attributes #[[ATTR8:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) "target-cpu"="gfx90a" } ; CHECK: attributes #[[ATTR9]] = { "amdgpu-no-agpr" } ;. +; CHECK: [[META0]] = !{ptr @also_empty, ptr @empty} +;.