From fe961e7077906dc68912ea37a686723889ad368a Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 21 Dec 2021 02:30:29 -0800 Subject: [PATCH 1/9] Fix a build warning in MoveKillsCopyableValuesChecker --- lib/SILOptimizer/Mandatory/MoveKillsCopyableValuesChecker.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/SILOptimizer/Mandatory/MoveKillsCopyableValuesChecker.cpp b/lib/SILOptimizer/Mandatory/MoveKillsCopyableValuesChecker.cpp index 833784782d0e3..aa3ea0c5bdf1e 100644 --- a/lib/SILOptimizer/Mandatory/MoveKillsCopyableValuesChecker.cpp +++ b/lib/SILOptimizer/Mandatory/MoveKillsCopyableValuesChecker.cpp @@ -430,7 +430,6 @@ namespace { class MoveKillsCopyableValuesCheckerPass : public SILFunctionTransform { void run() override { auto *fn = getFunction(); - auto &astContext = fn->getASTContext(); // Don't rerun diagnostics on deserialized functions. if (getFunction()->wasDeserializedCanonical()) From 321351c746606193641ea81666a851f9bf922b41 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 21 Dec 2021 02:27:45 -0800 Subject: [PATCH 2/9] Add SILFunction::preserveDebugInfo() --- include/swift/SIL/SILFunction.h | 9 +++++++++ lib/SIL/IR/SILFunction.cpp | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/include/swift/SIL/SILFunction.h b/include/swift/SIL/SILFunction.h index b735222eeaccc..c9b6242bdf40a 100644 --- a/include/swift/SIL/SILFunction.h +++ b/include/swift/SIL/SILFunction.h @@ -839,6 +839,15 @@ class SILFunction OptMode = unsigned(mode); } + /// True if debug information must be preserved (-Onone). + /// + /// If this is false (-O), then the presence of debug info must not affect the + /// outcome of any transformations. + /// + /// Typically used to determine whether a debug_value is a normal SSA use or + /// incidental use. + bool preserveDebugInfo() const; + PerformanceConstraints getPerfConstraints() const { return perfConstraints; } void setPerfConstraints(PerformanceConstraints perfConstr) { diff --git a/lib/SIL/IR/SILFunction.cpp b/lib/SIL/IR/SILFunction.cpp index c3c9cf017808c..efa44efebb4e2 100644 --- a/lib/SIL/IR/SILFunction.cpp +++ b/lib/SIL/IR/SILFunction.cpp @@ -281,6 +281,10 @@ OptimizationMode SILFunction::getEffectiveOptimizationMode() const { return getModule().getOptions().OptMode; } +bool SILFunction::preserveDebugInfo() const { + return getEffectiveOptimizationMode() <= OptimizationMode::NoOptimization; +} + bool SILFunction::shouldOptimize() const { return getEffectiveOptimizationMode() != OptimizationMode::NoOptimization; } From ec9d4b444faf8f7bd04dbb0f17cb9710059d0219 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 21 Dec 2021 02:28:35 -0800 Subject: [PATCH 3/9] Add SILInstruction::getNextInstruction() and getPreviousInstruction() --- include/swift/SIL/SILInstruction.h | 16 +++++++++++++++ lib/SIL/IR/SILInstruction.cpp | 10 ++++++++++ lib/SILOptimizer/Utils/ShrinkBorrowScope.cpp | 21 ++------------------ 3 files changed, 28 insertions(+), 19 deletions(-) diff --git a/include/swift/SIL/SILInstruction.h b/include/swift/SIL/SILInstruction.h index 11c05b4e22aa4..4b9096a52d040 100644 --- a/include/swift/SIL/SILInstruction.h +++ b/include/swift/SIL/SILInstruction.h @@ -459,6 +459,14 @@ class SILInstruction : public llvm::ilist_node { locationStorage = loc.storage; } + /// Return the next instruction or nullptr if this is the last instruction in + /// its block. + SILInstruction *getPreviousInstruction(); + + /// Return the previous instruction or nullptr if this is the first + /// instruction in its block. + SILInstruction *getNextInstruction(); + /// This method unlinks 'self' from the containing basic block and deletes it. void eraseFromParent(); @@ -1013,6 +1021,14 @@ class SingleValueInstruction : public SILInstruction, public ValueBase { /// If this is an instruction which "defines" an opened archetype, it is /// returned. CanArchetypeType getOpenedArchetype() const; + + SILInstruction *getPreviousInstruction() { + return SILInstruction::getPreviousInstruction(); + } + + SILInstruction *getNextInstruction() { + return SILInstruction::getNextInstruction(); + } }; struct SILNodeOffsetChecker { diff --git a/lib/SIL/IR/SILInstruction.cpp b/lib/SIL/IR/SILInstruction.cpp index 90477dd544cd1..9350cd991537f 100644 --- a/lib/SIL/IR/SILInstruction.cpp +++ b/lib/SIL/IR/SILInstruction.cpp @@ -103,6 +103,16 @@ SILModule &SILInstruction::getModule() const { return getFunction()->getModule(); } +SILInstruction *SILInstruction::getPreviousInstruction() { + auto pos = getIterator(); + return pos == getParent()->begin() ? nullptr : &*std::prev(pos); +} + +SILInstruction *SILInstruction::getNextInstruction() { + auto nextPos = std::next(getIterator()); + return nextPos == getParent()->end() ? nullptr : &*nextPos; +} + void SILInstruction::removeFromParent() { #ifndef NDEBUG for (auto result : getResults()) { diff --git a/lib/SILOptimizer/Utils/ShrinkBorrowScope.cpp b/lib/SILOptimizer/Utils/ShrinkBorrowScope.cpp index 3ba665162cc0c..1be4b01160511 100644 --- a/lib/SILOptimizer/Utils/ShrinkBorrowScope.cpp +++ b/lib/SILOptimizer/Utils/ShrinkBorrowScope.cpp @@ -20,23 +20,6 @@ using namespace swift; -//===----------------------------------------------------------------------===// -// MARK: Local utilities -//===----------------------------------------------------------------------===// - -// TODO: Move to be member function on SILInstruction. -static SILInstruction *getPreviousInstruction(SILInstruction *inst) { - auto pos = inst->getIterator(); - return pos == inst->getParent()->begin() ? nullptr - : &*std::prev(inst->getIterator()); -} - -// TODO: Move to be member function on SILInstruction. -static SILInstruction *getNextInstruction(SILInstruction *inst) { - auto nextPos = std::next(inst->getIterator()); - return nextPos == inst->getParent()->end() ? nullptr : &*nextPos; -} - //===----------------------------------------------------------------------===// // MARK: ShrinkBorrowScope //===----------------------------------------------------------------------===// @@ -318,7 +301,7 @@ void ShrinkBorrowScope::findBarriers() { assert(tryHoistOverInstruction(block->getTerminator())); } SILInstruction *barrier = nullptr; - while ((instruction = getPreviousInstruction(instruction))) { + while ((instruction = instruction->getPreviousInstruction())) { if (instruction == introducer) { barrier = instruction; break; @@ -354,7 +337,7 @@ bool ShrinkBorrowScope::rewrite() { // Insert the new end_borrow instructions that occur after deinit barriers. for (auto pair : barrierInstructions) { - auto *insertionPoint = getNextInstruction(pair.second); + auto *insertionPoint = pair.second->getNextInstruction(); createdBorrow |= createEndBorrow(insertionPoint); } From 0325e296fee35d5e8b689d97da04abe7a7a55b24 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 21 Dec 2021 02:23:43 -0800 Subject: [PATCH 4/9] Add an isDeinitBarrier() utility. Needs to be common across ShrinkBorrowScopes and SSADestroyHoisting. --- include/swift/SIL/MemAccessUtils.h | 6 +++++ lib/SIL/Utils/MemAccessUtils.cpp | 37 ++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/include/swift/SIL/MemAccessUtils.h b/include/swift/SIL/MemAccessUtils.h index 2c604d8c0e7b8..52d56697aeaea 100644 --- a/include/swift/SIL/MemAccessUtils.h +++ b/include/swift/SIL/MemAccessUtils.h @@ -232,6 +232,12 @@ inline bool accessKindMayConflict(SILAccessKind a, SILAccessKind b) { return !(a == SILAccessKind::Read && b == SILAccessKind::Read); } +/// Return true if \p instruction is a deinitialization barrier. +/// +/// Deinitialization barriers constrain variable lifetimes. Lexical end_borrow +/// and destroy_addr cannot be hoisted above them. +bool isDeinitBarrier(SILInstruction *instruction); + } // end namespace swift //===----------------------------------------------------------------------===// diff --git a/lib/SIL/Utils/MemAccessUtils.cpp b/lib/SIL/Utils/MemAccessUtils.cpp index 81b757d6db4d6..14ead6bd56996 100644 --- a/lib/SIL/Utils/MemAccessUtils.cpp +++ b/lib/SIL/Utils/MemAccessUtils.cpp @@ -393,6 +393,43 @@ bool swift::isLetAddress(SILValue address) { return isLetForBase(base); } +//===----------------------------------------------------------------------===// +// MARK: Deinitialization barriers. +//===----------------------------------------------------------------------===// + +static bool isBarrierApply(FullApplySite) { + // TODO: check side effect analysis + return true; +} + +static bool mayAccessPointer(SILInstruction *instruction) { + if (!instruction->mayReadOrWriteMemory()) + return false; + bool fail = false; + visitAccessedAddress(instruction, [&fail](Operand *operand) { + auto accessStorage = AccessStorage::compute(operand->get()); + if (accessStorage.getKind() != AccessRepresentation::Kind::Unidentified) + fail = true; + }); + return fail; +} + +static bool mayLoadWeakOrUnowned(SILInstruction *instruction) { + // TODO: It is possible to do better here by looking at the address that is + // being loaded. + return isa(instruction) || isa(instruction); +} + +bool swift::isDeinitBarrier(SILInstruction *instruction) { + if (instruction->maySynchronize()) { + if (auto apply = FullApplySite::isa(instruction)) { + return isBarrierApply(apply); + } + return true; + } + return mayLoadWeakOrUnowned(instruction) || mayAccessPointer(instruction); +} + //===----------------------------------------------------------------------===// // MARK: AccessRepresentation //===----------------------------------------------------------------------===// From 0836b6af61ed9ef4a2f33968ff7f6d11b5f6704d Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 21 Dec 2021 02:27:06 -0800 Subject: [PATCH 5/9] MemAccessUtils comments --- include/swift/SIL/MemAccessUtils.h | 5 ++--- lib/SIL/Utils/MemAccessUtils.cpp | 14 +++++++++----- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/include/swift/SIL/MemAccessUtils.h b/include/swift/SIL/MemAccessUtils.h index 52d56697aeaea..1c17b93c9ca6d 100644 --- a/include/swift/SIL/MemAccessUtils.h +++ b/include/swift/SIL/MemAccessUtils.h @@ -1309,9 +1309,8 @@ struct AccessUseVisitor { /// /// Return true if all uses were collected. This is always true as long the \p /// visitor's visitUse method returns true. -bool visitAccessStorageUses(AccessUseVisitor &visitor, - AccessStorage storage, - SILFunction *function); +bool visitAccessStorageUses(AccessUseVisitor &visitor, AccessStorage storage, + SILFunction *function); /// Visit the uses of \p accessPath. /// diff --git a/lib/SIL/Utils/MemAccessUtils.cpp b/lib/SIL/Utils/MemAccessUtils.cpp index 14ead6bd56996..4df35f9aa2a3e 100644 --- a/lib/SIL/Utils/MemAccessUtils.cpp +++ b/lib/SIL/Utils/MemAccessUtils.cpp @@ -1449,7 +1449,7 @@ namespace { // load %elt1 // non-use (unseen) // %elt2 = struct_element_addr %base, #2 // outer projection (followed) // load %elt2 // exact use -// %sub = struct_element_addr %elt2, #i // inner projection (followed) +// %sub = struct_element_addr %elt2, %i // inner projection (followed) // load %sub // inner use // // A use may be a BranchInst if the corresponding phi does not have common @@ -1467,8 +1467,12 @@ class AccessPathDefUseTraversal { // The origin of the def-use traversal. AccessStorage storage; - // Remaining access path indices from the most recently visited def to any - // exact use in def-use order. + // Indices of the path to match from inner to outer component. + // A cursor is used to represent the most recently visited def. + // During def-use traversal, the cursor starts at the end of pathIndicies and + // decrements with each projection. + // The first index represents an exact match. + // Index < 0 represents some subobject of the requested path. SmallVector pathIndices; // A point in the def-use traversal. isRef() is true only for object access @@ -1884,8 +1888,8 @@ bool swift::visitAccessPathUses(AccessUseVisitor &visitor, } bool swift::visitAccessStorageUses(AccessUseVisitor &visitor, - AccessStorage storage, - SILFunction *function) { + AccessStorage storage, + SILFunction *function) { IndexTrieNode *emptyPath = function->getModule().getIndexTrieRoot(); return visitAccessPathUses(visitor, AccessPath(storage, emptyPath, 0), function); From 547d87f347801b1da2c79b192b22ad11a7891243 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 21 Dec 2021 02:26:20 -0800 Subject: [PATCH 6/9] Add a UniqueAddressUses utility. Analyze and classify the leaf uses of unique storage. Storage that has a unique set of roots within this function includes alloc_stack, alloc_box, exclusive argument, and global variables. All access to the storage within this function is derived from these roots. Gather the kinds of uses that are typically relevant to algorithms: - loads (including copies out of, not including inout args) - stores (including copies into and inout args) - destroys (of the entire aggregate) - debugUses (only populated when preserveDebugInfo == false) --- include/swift/SIL/MemAccessUtils.h | 39 +++++++++++ lib/SIL/Utils/MemAccessUtils.cpp | 104 +++++++++++++++++++++++++++++ 2 files changed, 143 insertions(+) diff --git a/include/swift/SIL/MemAccessUtils.h b/include/swift/SIL/MemAccessUtils.h index 1c17b93c9ca6d..f30205124b72d 100644 --- a/include/swift/SIL/MemAccessUtils.h +++ b/include/swift/SIL/MemAccessUtils.h @@ -1324,6 +1324,45 @@ bool visitAccessPathUses(AccessUseVisitor &visitor, AccessPath accessPath, } // end namespace swift +//===----------------------------------------------------------------------===// +// MARK: UniqueAddressUses +//===----------------------------------------------------------------------===// + +namespace swift { + +/// Analyze and classify the leaf uses of unique storage. +/// +/// Storage that has a unique set of roots within this function includes +/// alloc_stack, alloc_box, exclusive argument, and global variables. All access +/// to the storage within this function is derived from these roots. +/// +/// Gather the kinds of uses that are typically relevant to algorithms: +/// - loads (including copies out of, not including inout args) +/// - stores (including copies into and inout args) +/// - destroys (of the entire aggregate) +/// - debugUses (only populated when preserveDebugInfo == false) +/// - unknownUses (e.g. address_to_pointer, box escape) +struct UniqueStorageUseVisitor { + static bool findUses(UniqueStorageUseVisitor &visitor); + + SILFunction *function; + AccessStorage storage; + + UniqueStorageUseVisitor(AccessStorage storage, SILFunction *function) + : function(function), storage(storage) {} + + virtual ~UniqueStorageUseVisitor() = default; + + virtual bool visitLoad(Operand *use) = 0; + virtual bool visitStore(Operand *use) = 0; + virtual bool visitDestroy(Operand *use) = 0; + virtual bool visitDealloc(Operand *use) = 0; + virtual bool visitDebugUse(Operand *use) = 0; + virtual bool visitUnknownUse(Operand *use) = 0; +}; + +} // namespace swift + //===----------------------------------------------------------------------===// // MARK: Helper API for specific formal access patterns //===----------------------------------------------------------------------===// diff --git a/lib/SIL/Utils/MemAccessUtils.cpp b/lib/SIL/Utils/MemAccessUtils.cpp index 4df35f9aa2a3e..d5c1913a54b8f 100644 --- a/lib/SIL/Utils/MemAccessUtils.cpp +++ b/lib/SIL/Utils/MemAccessUtils.cpp @@ -1923,6 +1923,110 @@ bool AccessPath::collectUses(SmallVectorImpl &uses, return visitAccessPathUses(collector, *this, function); } +//===----------------------------------------------------------------------===// +// MARK: UniqueStorageUseVisitor +//===----------------------------------------------------------------------===// + +struct GatherUniqueStorageUses : public AccessUseVisitor { + UniqueStorageUseVisitor &visitor; + + GatherUniqueStorageUses(UniqueStorageUseVisitor &visitor) + : AccessUseVisitor(AccessUseType::Overlapping, + NestedAccessType::IgnoreAccessBegin), + visitor(visitor) {} + + bool visitUse(Operand *use, AccessUseType useTy) override; +}; + +bool UniqueStorageUseVisitor::findUses(UniqueStorageUseVisitor &visitor) { + assert(visitor.storage.isUniquelyIdentified()); + + GatherUniqueStorageUses gather(visitor); + return visitAccessStorageUses(gather, visitor.storage, visitor.function); +} + +bool GatherUniqueStorageUses::visitUse(Operand *use, AccessUseType useTy) { + unsigned operIdx = use->getOperandNumber(); + auto *user = use->getUser(); + assert(!user->isTypeDependentOperand(*use)); + + // TODO: handle non-escaping partial-applies just like a full apply. The + // address uses are the points where the partial apply is invoked. + if (FullApplySite apply = FullApplySite::isa(user)) { + switch (apply.getArgumentConvention(*use)) { + case SILArgumentConvention::Indirect_Inout: + case SILArgumentConvention::Indirect_InoutAliasable: + case SILArgumentConvention::Indirect_Out: + visitor.visitStore(use); + break; + case SILArgumentConvention::Indirect_In_Guaranteed: + case SILArgumentConvention::Indirect_In: + case SILArgumentConvention::Indirect_In_Constant: + visitor.visitLoad(use); + break; + case SILArgumentConvention::Direct_Unowned: + case SILArgumentConvention::Direct_Owned: + case SILArgumentConvention::Direct_Guaranteed: + // most likely an escape of a box + visitor.visitUnknownUse(use); + break; + } + return true; + } + switch (user->getKind()) { + case SILInstructionKind::DestroyAddrInst: + case SILInstructionKind::DestroyValueInst: + if (useTy == AccessUseType::Exact) { + visitor.visitDestroy(use); + return true; + } + visitor.visitUnknownUse(use); + return true; + + case SILInstructionKind::DebugValueInst: + visitor.visitDebugUse(use); + return true; + + case SILInstructionKind::LoadInst: + case SILInstructionKind::LoadWeakInst: + case SILInstructionKind::LoadUnownedInst: + case SILInstructionKind::ExistentialMetatypeInst: + visitor.visitLoad(use); + return true; + + case SILInstructionKind::StoreInst: + case SILInstructionKind::StoreWeakInst: + case SILInstructionKind::StoreUnownedInst: + if (operIdx == CopyLikeInstruction::Dest) { + visitor.visitStore(use); + return true; + } + break; + + case SILInstructionKind::InjectEnumAddrInst: + visitor.visitStore(use); + return true; + + case SILInstructionKind::CopyAddrInst: + if (operIdx == CopyLikeInstruction::Dest) { + visitor.visitStore(use); + return true; + } + assert(operIdx == CopyLikeInstruction::Src); + visitor.visitLoad(use); + return true; + + case SILInstructionKind::DeallocStackInst: + visitor.visitDealloc(use); + return true; + + default: + break; + } + visitor.visitUnknownUse(use); + return true; +} + //===----------------------------------------------------------------------===// // MARK: Helper API for specific formal access patterns //===----------------------------------------------------------------------===// From 0dd0ba6c792c237ee06f3f168ee94e5a3b09ba64 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Wed, 22 Dec 2021 01:51:31 -0800 Subject: [PATCH 7/9] Fix sil-opt -opt-mode=none Set the single-pass pipeline's isMandatory flag based on the opt-mode. --- tools/sil-opt/SILOpt.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/sil-opt/SILOpt.cpp b/tools/sil-opt/SILOpt.cpp index f4b8cf303938e..d4f7f0df3aaf2 100644 --- a/tools/sil-opt/SILOpt.cpp +++ b/tools/sil-opt/SILOpt.cpp @@ -465,10 +465,13 @@ static llvm::cl::opt EnableRequirementMachine( static void runCommandLineSelectedPasses(SILModule *Module, irgen::IRGenModule *IRGenMod) { - auto &opts = Module->getOptions(); + const SILOptions &opts = Module->getOptions(); + // If a specific pass was requested with -opt-mode=None, run the pass as a + // mandatory pass. + bool isMandatory = opts.OptMode == OptimizationMode::NoOptimization; executePassPipelinePlan( Module, SILPassPipelinePlan::getPassPipelineForKinds(opts, Passes), - /*isMandatory*/ false, IRGenMod); + isMandatory, IRGenMod); if (Module->getOptions().VerifyAll) Module->verify(); From 3e532b2a8d7cd6257de652ae31581b8d960acb53 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 21 Dec 2021 02:21:19 -0800 Subject: [PATCH 8/9] Add a general BackwardReachability analysis. Pessimistic, non-iterative data flow for analyzing backward reachability from a set of last uses to their dominating def or nearest barrier. Meet: ReachableEnd(predecessor) = intersection(ReachableBegin, successors) Intended for frequently called utilities where minimizing the cost of data flow is more important than analyzing reachability across loops. Expected to visit very few blocks because barriers often occur close to a last use. Note: this does not require initializing bitsets for all blocks in the function for each SSA value being analyzed. --- .../SILOptimizer/Analysis/Reachability.h | 129 ++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 include/swift/SILOptimizer/Analysis/Reachability.h diff --git a/include/swift/SILOptimizer/Analysis/Reachability.h b/include/swift/SILOptimizer/Analysis/Reachability.h new file mode 100644 index 0000000000000..521aeaead9564 --- /dev/null +++ b/include/swift/SILOptimizer/Analysis/Reachability.h @@ -0,0 +1,129 @@ +//===--- Reachability.h ---------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +/// +/// Reachability data flow analysis using a path-discovery worklist. For +/// efficient data flow propagation based on a single SSA value and its uses. +/// +/// TODO: Add an optimistic data flow for more aggresive optimization: +/// - Add another set for blocks reachable by barriers +/// - Change the meet operation to a union +/// - Propagate past barriers up to the SSA def +/// - Iterate to a fix-point. +/// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_SILOPTIMIZER_ANALYSIS_REACHABILITY_H +#define SWIFT_SILOPTIMIZER_ANALYSIS_REACHABILITY_H + +#include "swift/SIL/BasicBlockDatastructures.h" +#include "swift/SIL/SILBasicBlock.h" + +namespace swift { + +/// Pessimistic, non-iterative data flow for analyzing backward reachability +/// from a set of last uses to their dominating def or nearest barrier. +/// +/// Intended for frequently called utilities where minimizing the cost of data +/// flow is more important than analyzing reachability across loops. Expected to +/// visit very few blocks because barriers often occur close to a last use. +/// +/// BlockReachability { +/// // True if the beginning of \p block is reachable. +/// // Typically a BasicBlockSet wrapper. +/// bool hasReachableBegin(SILBasicBlock *block); +/// +/// // Mark the beginning of a block reachable. Only called once per block. +/// // Typically a BasicBlockSet wrapper. +/// boid markReachableBegin(SILBasicBlock *block); +/// +/// // Mark the end of a block reachable. Only called once per block. +/// // Typically a BasicBlockSet wrapper. +/// void markReachableEnd(SILBasicBlock *block); +/// +/// // Return true if \p inst is a barrier. Called once for each reachable +/// // instruction, assuming that each lastUse is itself a barrier. +/// // Used by the data flow client to perform additional book-keeping, +/// // such as recording debug_value instructions. +/// bool checkReachableBarrier(SILInstruction *inst); +/// }; +template +class BackwardReachability { + SILFunction *function; + BlockReachability &reachableBlocks; + BasicBlockWorklist cfgWorklist; + +public: + BackwardReachability(SILFunction *function, + BlockReachability &reachableBlocks) + : function(function), reachableBlocks(reachableBlocks), + cfgWorklist(function) {} + + // Initialize data flow starting points before running solveBackward. + void initLastUse(SILInstruction *lastUsePoint) { + auto *lastUseBlock = lastUsePoint->getParent(); + if (canReachBlockBegin(lastUsePoint)) { + pushPreds(lastUseBlock); + } + } + + // Data flow "meet": interesection of successor reachability. + void solveBackward() { + while (SILBasicBlock *block = cfgWorklist.popAndForget()) { + if (!meetOverSuccessors(block)) + continue; + + reachableBlocks.markReachableEnd(block); + + if (canReachBlockBegin(block->getTerminator())) { + pushPreds(block); + } + } + } + +protected: + BackwardReachability(BackwardReachability const &) = delete; + BackwardReachability &operator=(BackwardReachability const &) = delete; + + // Perform a "meet" over successor begin reachability. + // Return true if \p predecessor's end is pessimistically reachable. + // + // Meet: + // ReachableEnd(predecessor) := intersection(ReachableBegin, successors) + bool meetOverSuccessors(SILBasicBlock *block) { + return llvm::all_of(block->getSuccessorBlocks(), [this](auto *successor) { + return reachableBlocks.hasReachableBegin(successor); + }); + } + + // Local data flow. Computes the block's flow function. + bool canReachBlockBegin(SILInstruction *lastReachablePoint) { + do { + if (reachableBlocks.checkReachableBarrier(lastReachablePoint)) + return false; + lastReachablePoint = lastReachablePoint->getPreviousInstruction(); + } while (lastReachablePoint); + return true; + } + + // Propagate global data flow from \p succBB to its predecessors. + void pushPreds(SILBasicBlock *succBB) { + reachableBlocks.markReachableBegin(succBB); + + for (SILBasicBlock *predBB : succBB->getPredecessorBlocks()) { + cfgWorklist.pushIfNotVisited(predBB); + } + } +}; + +} // end namespace swift + +#endif From c8a2130554b94dbb56d83e3cec1e4043485cff02 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Fri, 3 Dec 2021 08:42:05 -0800 Subject: [PATCH 9/9] Add a SSADestroyHoisting utility and pass Extract and rewrite the destroy hoisting algorithm originally from CopyForwarding (in 2014). This is now a light-weight utility for hoisting destroy_addr instructions. Shrinking an object's memory lifetime can allow removal of copy_addr and other optimization. This is extremely low-overhead and can run at any optimization level without dependency on any analysis. This algorithm is: - Incremental - SSA-based - Canonical - Free from alias analysis See file-level comments. The immediate purpose is to specify and test the constraints introduced by adding lexical variable lifetimes to SIL semantics. It can be used as a template for end_borrow hoisting. Ultimately, this utility can be invoked within any pass that needs to optimize a particular uniquely identified address. It will be used to remove much of the complexity from CopyForwarding. --- .../swift/SILOptimizer/PassManager/Passes.def | 2 + lib/SILOptimizer/Transforms/CMakeLists.txt | 1 + .../Transforms/SSADestroyHoisting.cpp | 517 ++++++++++++++++++ test/SILOptimizer/hoist_destroy_addr.sil | 231 ++++++++ 4 files changed, 751 insertions(+) create mode 100644 lib/SILOptimizer/Transforms/SSADestroyHoisting.cpp create mode 100644 test/SILOptimizer/hoist_destroy_addr.sil diff --git a/include/swift/SILOptimizer/PassManager/Passes.def b/include/swift/SILOptimizer/PassManager/Passes.def index b57489b9ee6d6..381ac0cb9b484 100644 --- a/include/swift/SILOptimizer/PassManager/Passes.def +++ b/include/swift/SILOptimizer/PassManager/Passes.def @@ -194,6 +194,8 @@ PASS(DefiniteInitialization, "definite-init", "Definite Initialization for Diagnostics") PASS(DestroyHoisting, "destroy-hoisting", "Hoisting of value destroys") +PASS(SSADestroyHoisting, "ssa-destroy-hoisting", + "Hoist destroy_addr for uniquely identified values") PASS(Devirtualizer, "devirtualizer", "Indirect Call Devirtualization") PASS(DiagnoseInfiniteRecursion, "diagnose-infinite-recursion", diff --git a/lib/SILOptimizer/Transforms/CMakeLists.txt b/lib/SILOptimizer/Transforms/CMakeLists.txt index e31f0d2733ee8..9b6fa3075db29 100644 --- a/lib/SILOptimizer/Transforms/CMakeLists.txt +++ b/lib/SILOptimizer/Transforms/CMakeLists.txt @@ -35,6 +35,7 @@ target_sources(swiftSILOptimizer PRIVATE SILLowerAggregateInstrs.cpp SILMem2Reg.cpp SILSROA.cpp + SSADestroyHoisting.cpp SimplifyCFG.cpp Sink.cpp SpeculativeDevirtualizer.cpp diff --git a/lib/SILOptimizer/Transforms/SSADestroyHoisting.cpp b/lib/SILOptimizer/Transforms/SSADestroyHoisting.cpp new file mode 100644 index 0000000000000..c9a06c4686eff --- /dev/null +++ b/lib/SILOptimizer/Transforms/SSADestroyHoisting.cpp @@ -0,0 +1,517 @@ +//===--- SSADestroyHoisting.cpp - SSA-based destroy hoisting --------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +/// +/// This is a light-weight utility for hoisting destroy instructions for unique +/// storage--typically alloc_stac or owned incoming arguments. Shrinking an +/// object's memory lifetime can allow removal of copy_addr and other +/// optimization. +/// +/// This algorithm is: +/// - Incremental +/// - SSA-based +/// - Canonical +/// - Free from alias analysis +/// +/// Incremental: Handle a single in-memory value at a time. The value's address +/// typically originates from an alloc_stack or owned function argument +/// (@in). It does not depend on any analysis result, which would need to be +/// preserved by a pass. +/// +/// SSA-based: Starting with uniquely identified (exclusive) storage, +/// discovers all known uses based on recognizable SIL patterns. Bails-out on +/// unknown uses. Derivation of a raw pointer is considered a "known use". +/// +/// Canonical: Assumes that aggregate values, which are allocated in a single +/// operation, are also destroyed in a single operation. This canonical form is +/// not fully enforced, so violations result in a bail-out. +/// +/// Free from alias analysis: this only handles exclusively identified +/// addresses to owned values, which cannot be derived from object references. +/// +/// ---------------------------------------------------------------------------- +/// +/// DestroyAddr hoisting stops at either a direct use, or a deinitialization +/// barrier. Direct uses are checked by guaranteeing that all storage uses are +/// known. +/// +/// Deinitialization barriers: +/// +/// Case #1. Weak reference loads: Any load of a weak or unowned referenceto an +/// object that may be deallocated when this variable is destroyed. Any use of +/// the weak reference is considered a barrier, even if the referenced object is +/// not accessed. This only applies to loads within the current lexical +/// scope. Programmers must properly check escaping weak references for null. +/// +/// Case #2. Derived pointers: Any memory access based on a raw pointer to +/// memory that may be deallocated when this variable is destroyed. This only +/// applies to pointer access within this variable's lexical scope. Programmers +/// must manage escaping pointers explicitly via Builtin.fixLifetime. +/// +/// Case #3. Synchronization points: If the object potentially has a custom +/// deinitializer with side effects, then any external function call, which may +/// contain a memory barrier or system call, prevents hoisting. If the external +/// function call is annotated as "read-only", then it is safe. Since Swift does +/// not directly support atomics, no SIL instructions are currently considered +/// synchronization points. +/// +/// ---------------------------------------------------------------------------- +/// +/// TODO: replace the destroy hoisting in CopyForwarding::forwardCopiesOf and +/// ensure related tests still pass. This requires hoisting over certain +/// calls. We can do this as long as the call takes a copy of the storage value +/// as an argument. The copy will be guarded by the callee's lexical scope, so +/// the deinits cannot be invoked by the hoisted destroy (in fact it should be +/// possible to eliminate the destroy). +/// +/// TODO: As a utility, hoistDestroys should be repeatable. Subsequent runs +/// without changing input should have no effect, including putting new +/// instructions on a worklist. MergeDestroys currently breaks this because the +/// destroys are inserted first before they are merged. This will trigger the +/// createdNewInst callback and cause hadCallbackInvocation() to return true +/// even when the merged result is identical to the input. Fix this by keeping +/// track of the newly created destroys, defer calling createdNewInst, and defer +/// deleting dead instructions. When merging, check if the merged destroy is +/// inserted at the old destroy to reuse it and bypass triggering callbacks. +/// +/// TODO: enforce an invariant that destroy_addrs jointly post-dominate any +/// exclusive owned address, that would simplify the algorithm. +/// +/// ===--------------------------------------------------------------------===// + +#define DEBUG_TYPE "ssa-destroy-hoisting" + +#include "swift/Basic/GraphNodeWorklist.h" +#include "swift/SIL/BasicBlockDatastructures.h" +#include "swift/SIL/MemAccessUtils.h" +#include "swift/SIL/SILBasicBlock.h" +#include "swift/SIL/SILBuilder.h" +#include "swift/SIL/SILInstruction.h" +#include "swift/SILOptimizer/Analysis/Reachability.h" +#include "swift/SILOptimizer/PassManager/Transforms.h" +#include "swift/SILOptimizer/Utils/InstructionDeleter.h" + +using namespace swift; + +namespace { + +/// Step #1: Find all known uses of the unique storage object. +struct KnownStorageUses : UniqueStorageUseVisitor { + bool preserveDebugInfo; + + SmallPtrSet storageUsers; + SmallVector originalDestroys; + SmallPtrSet debugInsts; + + KnownStorageUses(AccessStorage storage, SILFunction *function) + : UniqueStorageUseVisitor(storage, function), + preserveDebugInfo(function->preserveDebugInfo()) {} + + bool empty() const { + return storageUsers.empty() && originalDestroys.empty() + && debugInsts.empty(); + } + + SILFunction *getFunction() const { return function; } + + AccessStorage getStorage() const { return storage; } + + // Return true if all leaf users of the root address are recognized. + // + // Populate addressUsers, originalDestroys, and debugInsts. + bool findUses() { + assert(empty() && "already initialized"); + + return UniqueStorageUseVisitor::findUses(*this); + } + +protected: + KnownStorageUses(KnownStorageUses const &) = delete; + KnownStorageUses &operator=(KnownStorageUses const &) = delete; + + bool recordUser(SILInstruction *user) { + storageUsers.insert(user); + return true; + } + + bool visitLoad(Operand *use) override { return recordUser(use->getUser()); } + + bool visitStore(Operand *use) override { return recordUser(use->getUser()); } + + bool visitDestroy(Operand *use) override { + originalDestroys.push_back(use->getUser()); + return true; + } + + bool visitDealloc(Operand *use) override { return true; } + + bool visitDebugUse(Operand *use) override { + if (preserveDebugInfo) { + storageUsers.insert(use->getUser()); + } else { + debugInsts.insert(use->getUser()); + } + return true; + } + + bool visitUnknownUse(Operand *use) override { + auto *user = use->getUser(); + // Recognize any leaf users not already recognized by UniqueAddressUses. + // + // Destroy hoisting considers address_to_pointer to be a leaf use because + // any potential pointer access is already considered to be a + // deinitializtion barrier. + if (isa(user)) { + storageUsers.insert(use->getUser()); + return true; + } + LLVM_DEBUG(llvm::dbgs() << "Unknown user " << *user); + return false; + } +}; + +/// Step #2: Perform backward dataflow from KnownStorageUses.originalDestroys to +/// KnownStorageUses.storageUsers to find deinitialization barriers. +class DeinitBarriers { +public: + // Data flow state: blocks whose beginning is backward reachable from a + // destroy without first reaching a barrier or storage use. + BasicBlockSetVector destroyReachesBeginBlocks; + + // Data flow state: blocks whose end is backward reachable from a destroy + // without first reaching a barrier or storage use. + BasicBlockSet destroyReachesEndBlocks; + + // Deinit barriers or storage uses within a block, reachable from a destroy. + SmallVector barriers; + + // Debug instructions that are no longer within this lifetime after shrinking. + SmallVector deadUsers; + + explicit DeinitBarriers(SILFunction *function) + : destroyReachesBeginBlocks(function), + destroyReachesEndBlocks(function) + {} + + void compute(const KnownStorageUses &knownUses) { + DestroyReachability(knownUses, *this).solveBackward(); + } + +private: + // Conforms to BackwardReachability::BlockReachability + class DestroyReachability { + const KnownStorageUses &knownUses; + DeinitBarriers &result; + SILInstruction *storageDefInst = nullptr; // null for function args + + BackwardReachability reachability; + + public: + DestroyReachability(const KnownStorageUses &knownUses, + DeinitBarriers &result) + : knownUses(knownUses), result(result), + reachability(knownUses.getFunction(), *this) { + + auto rootValue = knownUses.getStorage().getRoot(); + assert(rootValue && "HoistDestroys requires a single storage root"); + storageDefInst = rootValue->getDefiningInstruction(); + + // Seed backward reachability with destroy points. + for (SILInstruction *destroy : knownUses.originalDestroys) { + reachability.initLastUse(destroy); + } + } + + bool hasReachableBegin(SILBasicBlock *block) { + return result.destroyReachesBeginBlocks.contains(block); + } + + void markReachableBegin(SILBasicBlock *block) { + result.destroyReachesBeginBlocks.insert(block); + } + + void markReachableEnd(SILBasicBlock *block) { + result.destroyReachesEndBlocks.insert(block); + } + + bool checkReachableBarrier(SILInstruction *inst); + + void solveBackward() { reachability.solveBackward(); } + }; +}; + +/// Return true if \p inst is a barrier. +/// +/// Called exactly once for each reachable instruction. This is guaranteed to +/// hold as a barrier occurs between any original destroys that are reachable +/// from each. Any path reaching multiple destroys requires initialization, +/// which is a storageUser and therefore a barrier. +bool DeinitBarriers::DestroyReachability::checkReachableBarrier( + SILInstruction *inst) { + if (knownUses.debugInsts.contains(inst)) { + result.deadUsers.push_back(inst); + return false; + } + if (inst == storageDefInst) { + result.barriers.push_back(inst); + return true; + } + if (knownUses.storageUsers.contains(inst)) { + result.barriers.push_back(inst); + return true; + } + if (isDeinitBarrier(inst)) { + result.barriers.push_back(inst); + return true; + } + return false; +} + +/// Algorithm for hoisting the destroys of a single uniquely identified storage +/// object. +class HoistDestroys { + SILValue storageRoot; + InstructionDeleter &deleter; + + // Book-keeping for the rewriting stage. + SmallPtrSet reusedDestroys; + + BasicBlockSetVector destroyMergeBlocks; + +public: + HoistDestroys(SILValue storageRoot, InstructionDeleter &deleter) + : storageRoot(storageRoot), deleter(deleter), + destroyMergeBlocks(getFunction()) {} + + bool perform(); + +protected: + SILFunction *getFunction() const { return storageRoot->getFunction(); } + + bool foldBarrier(SILInstruction *barrier); + + void insertDestroy(SILInstruction *barrier, SILInstruction *insertBefore, + const KnownStorageUses &knownUses); + + void createDestroy(SILInstruction *insertBefore, + const SILDebugScope *scope); + + void createSuccessorDestroys(SILBasicBlock *barrierBlock); + + bool rewriteDestroys(const KnownStorageUses &knownUses, + const DeinitBarriers &deinitBarriers); + + void mergeDestroys(SILBasicBlock *mergeBlock); +}; + +} // namespace + +bool HoistDestroys::perform() { + auto storage = AccessStorage::compute(storageRoot); + if (!storage.isUniquelyIdentified()) + return false; + + KnownStorageUses knownUses(storage, getFunction()); + if (!knownUses.findUses()) + return false; + + DeinitBarriers deinitBarriers(getFunction()); + deinitBarriers.compute(knownUses); + + // No SIL changes happen before rewriting. + return rewriteDestroys(knownUses, deinitBarriers); +} + +bool HoistDestroys::rewriteDestroys(const KnownStorageUses &knownUses, + const DeinitBarriers &deinitBarriers) { + // Place a new destroy after each barrier instruction. + for (SILInstruction *barrier : deinitBarriers.barriers) { + auto *barrierBlock = barrier->getParent(); + if (barrier != barrierBlock->getTerminator()) { + if (!foldBarrier(barrier)) + insertDestroy(barrier, barrier->getNextInstruction(), knownUses); + continue; + } + for (auto *successor : barrierBlock->getSuccessorBlocks()) { + insertDestroy(barrier, &successor->front(), knownUses); + } + } + // Place a new destroy at each CFG edge in which the successor's beginning is + // reached but the predecessors end is not reached. + for (auto *beginReachedBlock : deinitBarriers.destroyReachesBeginBlocks) { + SILInstruction *barrier = nullptr; + if (auto *predecessor = beginReachedBlock->getSinglePredecessorBlock()) { + if (deinitBarriers.destroyReachesEndBlocks.contains(predecessor)) + continue; + + barrier = predecessor->getTerminator(); + + } else if (!beginReachedBlock->pred_empty()) { + // This is the only successor, so the destroy must reach the predecessors. + assert(llvm::all_of( + beginReachedBlock->getPredecessorBlocks(), [&](auto *predecessor) { + return deinitBarriers.destroyReachesEndBlocks.contains(predecessor); + })); + continue; + } + // The destroy does not reach the end of any predecessors. + insertDestroy(barrier, &beginReachedBlock->front(), knownUses); + } + // Delete dead users before merging destroys. + for (auto *deadInst : deinitBarriers.deadUsers) { + deleter.forceDelete(deadInst); + } + for (auto *destroyInst : knownUses.originalDestroys) { + if (reusedDestroys.contains(destroyInst)) + continue; + + deleter.forceDelete(destroyInst); + } + deleter.cleanupDeadInstructions(); + + for (auto *mergeBlock : destroyMergeBlocks) { + mergeDestroys(mergeBlock); + } + return deleter.hadCallbackInvocation(); +} + +bool HoistDestroys::foldBarrier(SILInstruction *barrier) { + if (auto *load = dyn_cast(barrier)) { + if (load->getOperand() == storageRoot) { + assert(load->getOwnershipQualifier() == LoadOwnershipQualifier::Copy); + load->setOwnershipQualifier(LoadOwnershipQualifier::Take); + return true; + } + } + if (auto *copy = dyn_cast(barrier)) { + if (copy->getSrc() == storageRoot) { + assert(!copy->isTakeOfSrc()); + copy->setIsTakeOfSrc(IsTake); + return true; + } + } + return false; +} + +// \p barrier may be null if the destroy is at function entry. +void HoistDestroys::insertDestroy(SILInstruction *barrier, + SILInstruction *insertBefore, + const KnownStorageUses &knownUses) { + if (auto *branch = dyn_cast(insertBefore)) { + destroyMergeBlocks.insert(branch->getDestBB()); + } + // Avoid mutating SIL for no reason. This could lead to infinite loops. + if (isa(insertBefore) + || isa(insertBefore)) { + if (llvm::find(knownUses.originalDestroys, insertBefore) + != knownUses.originalDestroys.end()) { + reusedDestroys.insert(insertBefore); + return; + } + } + const SILDebugScope *scope = barrier + ? barrier->getDebugScope() : getFunction()->getDebugScope(); + createDestroy(insertBefore, scope); +} + +void HoistDestroys::createDestroy(SILInstruction *insertBefore, + const SILDebugScope *scope) { + auto loc = RegularLocation::getAutoGeneratedLocation(); + SILInstruction *newDestroy; + if (storageRoot->getType().isAddress()) { + newDestroy = + SILBuilder(insertBefore, scope).createDestroyAddr(loc, storageRoot); + } else { + newDestroy = + SILBuilder(insertBefore, scope).createDestroyValue(loc, storageRoot); + } + deleter.getCallbacks().createdNewInst(newDestroy); +} + +void HoistDestroys::mergeDestroys(SILBasicBlock *mergeBlock) { + SmallVector deadDestroys; + for (auto *predecessors : mergeBlock->getPredecessorBlocks()) { + auto *tailDestroy = predecessors->getTerminator()->getPreviousInstruction(); + if (!tailDestroy || (!isa(tailDestroy) + && !isa(tailDestroy))) { + return; + } + if (tailDestroy->getOperand(0) != storageRoot) + return; + + deadDestroys.push_back(tailDestroy); + } + if (deadDestroys.size() < 2) // ignore trivial fall-thru + return; + + createDestroy(&mergeBlock->front(), deadDestroys[0]->getDebugScope()); + + for (auto *deadDestroy : deadDestroys) { + deleter.forceDelete(deadDestroy); + } +} + +// ============================================================================= +// Top-Level API +// ============================================================================= + +bool hoistDestroys(SILValue root, InstructionDeleter &deleter) { + LLVM_DEBUG(llvm::dbgs() << "Performing destroy hoisting on " << root); + + SILFunction *function = root->getFunction(); + if (!function) + return false; + + // The algorithm assumes no critical edges. + assert(function->hasOwnership() && "requires OSSA"); + + return HoistDestroys(root, deleter).perform(); +} + +// ============================================================================= +// Pipeline Pass +// ============================================================================= + +namespace { +class SSADestroyHoisting : public swift::SILFunctionTransform { + void run() override; +}; +} // end anonymous namespace + +// TODO: Handle alloc_box the same way, as long as the box doesn't escape. +// +// TODO: Handle address and boxes that are captured in no-escape closures. +void SSADestroyHoisting::run() { + if (!getFunction()->hasOwnership()) + return; + + InstructionDeleter deleter; + bool changed = false; + for (auto *arg : getFunction()->getArguments()) { + if (arg->getType().isAddress()) { + changed |= hoistDestroys(arg, deleter); + } + } + for (auto &block : *getFunction()) { + for (auto &inst : block) { + if (auto *alloc = dyn_cast(&inst)) { + changed |= hoistDestroys(alloc, deleter); + } + } + } + if (changed) { + invalidateAnalysis(SILAnalysis::InvalidationKind::Instructions); + } +} + +SILTransform *swift::createSSADestroyHoisting() { + return new SSADestroyHoisting(); +} diff --git a/test/SILOptimizer/hoist_destroy_addr.sil b/test/SILOptimizer/hoist_destroy_addr.sil new file mode 100644 index 0000000000000..5a1721b7e2533 --- /dev/null +++ b/test/SILOptimizer/hoist_destroy_addr.sil @@ -0,0 +1,231 @@ +// RUN: %target-sil-opt -opt-mode=none -enable-sil-verify-all %s -ssa-destroy-hoisting | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECKDEB +// RUN: %target-sil-opt -opt-mode=speed -enable-sil-verify-all %s -ssa-destroy-hoisting | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECKOPT +// +// TODO: migrate the remaining tests from destroy_hoisting.sil. + +sil_stage canonical + +import Builtin + +class X { +} + +enum TwoCases { + case A(X) + case B +} + +struct S { + var x: X +} + +struct Outer { + var s: S + var ox: X +} + +struct Mixed { + var x: X + var i: Builtin.Int64 +} + +public struct S2 { + let s: S +} + + +public enum E { + case A + case B +} + +sil @unknown : $@convention(thin) () -> () +sil @use_S : $@convention(thin) (@in_guaranteed S) -> () + +sil @f_out : $@convention(thin) () -> @out T +sil @f_bool : $@convention(thin) () -> Builtin.Int1 + +// CHECK-LABEL: sil [ossa] @test_simple +// CHECK: bb0(%0 : $*S): +// CHECK-NEXT: destroy_addr %0 +// CHECK-NEXT: br bb1 +// CHECK: bb1: +// CHECK-NEXT: tuple +// CHECK-NEXT: return +sil [ossa] @test_simple : $@convention(thin) (@in S) -> () { +bb0(%0 : $*S): + br bb1 +bb1: + destroy_addr %0 : $*S + %r = tuple () + return %r : $() +} + +// CHECK-LABEL: sil [ossa] @combine_load +// CHECK: bb0(%0 : $*S): +// CHECK-NEXT: load [take] %0 +// CHECK-NEXT: br bb1 +// CHECK: bb1: +// CHECK-NEXT: return +sil [ossa] @combine_load : $@convention(thin) (@in S) -> @owned S { +bb0(%0 : $*S): + %v = load [copy] %0 : $*S + br bb1 +bb1: + destroy_addr %0 : $*S + return %v : $S +} + +// CHECK-LABEL: sil [ossa] @combine_copy_addr +// CHECK: bb0(%0 : $*S, %1 : $*S): +// CHECK-NEXT: copy_addr [take] %1 to [initialization] %0 +// CHECK-NEXT: br bb1 +// CHECK: bb1: +// CHECK-NEXT: tuple +// CHECK-NEXT: return +sil [ossa] @combine_copy_addr : $@convention(thin) (@in S) -> @out S { +bb0(%0 : $*S, %1 : $*S): + copy_addr %1 to [initialization] %0 : $*S + br bb1 +bb1: + destroy_addr %1 : $*S + %r = tuple () + return %r : $() +} + +// CHECK-LABEL: sil [ossa] @tail_merging +// CHECK: bb1: +// CHECK: apply +// CHECK-NEXT: br bb3 +// CHECK: bb2: +// CHECK-NEXT: br bb3 +// CHECK: bb3: +// CHECK-NEXT: destroy_addr %0 +// CHECK-NEXT: br bb4 +// CHECK: bb4: +// CHECK-NEXT: tuple +// CHECK-NEXT: return +sil [ossa] @tail_merging : $@convention(thin) (@in S) -> () { +bb0(%0 : $*S): + cond_br undef, bb1, bb2 +bb1: + %f = function_ref @use_S : $@convention(thin) (@in_guaranteed S) -> () + %a = apply %f(%0) : $@convention(thin) (@in_guaranteed S) -> () + br bb3 +bb2: + br bb3 +bb3: + br bb4 +bb4: + destroy_addr %0 : $*S + %r = tuple () + return %r : $() +} + +// CHECK-LABEL: sil hidden [ossa] @backward_init : $@convention(thin) () -> @out T { +// CHECK: [[A:%.*]] = alloc_stack $T +// CHECK: apply +// CHECK: debug_value [[A]] : $*T, expr op_deref +// CHECK-OPT: copy_addr [take] [[A]] to [initialization] %0 : $*T +// CHECKOPT-NOT: destroy_addr +// CHECKOPT-NOT: debug_value [[A]] +// CHECKDEB: copy_addr [[A]] to [initialization] %0 : $*T +// CHECKDEB: debug_value [[A]] +// CHECKDEB-NEXT: destroy_addr [[A]] : $*T +// CHECK-LABEL: } // end sil function 'backward_init' +sil hidden [ossa] @backward_init : $@convention(thin) () -> @out T { +bb0(%0 : $*T): + %l1 = alloc_stack $T + %f1 = function_ref @f_out : $@convention(thin) <τ_0_0> () -> @out τ_0_0 + %c1 = apply %f1(%l1) : $@convention(thin) <τ_0_0> () -> @out τ_0_0 + debug_value %l1 : $*T, expr op_deref + copy_addr %l1 to [initialization] %0 : $*T + debug_value %0 : $*T, expr op_deref + debug_value %l1 : $*T, expr op_deref + destroy_addr %l1 : $*T + dealloc_stack %l1 : $*T + %t = tuple () + return %t : $() +} + +// With optimization, the destroy_addr is hoisted above debug_value in +// bb2. Dead debug instructions then need to be deleted before the +// destroy can be merged back onto bb3. +// +// CHECK-LABEL: sil hidden [ossa] @destroyDiamond : $@convention(thin) (@in_guaranteed T, Builtin.Int1) -> () { +// CHECK: bb0(%0 : $*T, %1 : $Builtin.Int1): +// CHECK: [[ALLOC:%.*]] = alloc_stack $T, var, name "t" +// CHECK-NOT: destroy +// CHECK: cond_br %{{.*}}, bb1, bb2 +// CHECK: bb1: +// CHECK: apply %{{.*}}() : $@convention(thin) () -> () +// CHECK-NOT: destroy_addr +// CHECK: br bb3 +// CHECK: bb2: +// CHECKDEB: debug_value [[ALLOC]] : $*T, let, name "t" +// CHECK-NOT: debug_val [[ALLOC]] +// CHECK: br bb3 +// CHECK: bb3: +// CHECK: destroy_addr [[ALLOC]] : $*T +// CHECK: return +// CHECK-LABEL: } // end sil function 'destroyDiamond' +sil hidden [ossa] @destroyDiamond : $@convention(thin) (@in_guaranteed T, Builtin.Int1) -> () { +bb0(%0 : $*T, %1 : $Builtin.Int1): + debug_value %0 : $*T, let, name "arg", argno 1, expr op_deref + debug_value %1 : $Builtin.Int1, let, name "z", argno 2 + %4 = alloc_stack $T, var, name "t" + copy_addr %0 to [initialization] %4 : $*T + cond_br %1, bb1, bb2 + +bb1: + %8 = function_ref @unknown : $@convention(thin) () -> () + %9 = apply %8() : $@convention(thin) () -> () + br bb3 + +bb2: + debug_value %4 : $*T, let, name "t" + br bb3 + +bb3: + destroy_addr %4 : $*T + dealloc_stack %4 : $*T + %14 = tuple () + return %14 : $() +} + +// CHECK-LABEL: sil hidden [ossa] @destroyLoop : $@convention(thin) (@in_guaranteed T) -> () { +// CHECK: [[ALLOC:%.*]] = alloc_stack $T, var, name "t" +// CHECK: br bb1 +// CHECK: bb1: +// CHECK: apply %{{.*}}() : $@convention(thin) () -> Builtin.Int1 +// CHECK-NEXT: cond_br %{{.*}}, bb2, bb3 +// CHECK: bb2: +// CHECK-NEXT: br bb1 +// CHECK: bb3: +// CHECKDEB: debug_value [[ALLOC]] : $*T, let, name "t" +// CHECKOPT-NONE: debug_value +// CHECK: destroy_addr [[ALLOC]] : $*T +// CHECK: dealloc_stack [[ALLOC]] : $*T +// CHECK-LABEL: } // end sil function 'destroyLoop' +sil hidden [ossa] @destroyLoop : $@convention(thin) (@in_guaranteed T) -> () { +bb0(%0 : $*T): + %a = alloc_stack $T, var, name "t" + copy_addr %0 to [initialization] %a : $*T + br bb1 + +bb1: + %f = function_ref @f_bool : $@convention(thin) () -> Builtin.Int1 + %c = apply %f() : $@convention(thin) () -> Builtin.Int1 + cond_br %c, bb2, bb3 + +bb2: + br bb1 + +bb3: + debug_value %a : $*T, let, name "t" + destroy_addr %a : $*T + dealloc_stack %a : $*T + %16 = tuple () + return %16 : $() +} +