diff --git a/include/swift/Basic/STLExtras.h b/include/swift/Basic/STLExtras.h index a0025dfb4b917..96a7c9e6a6334 100644 --- a/include/swift/Basic/STLExtras.h +++ b/include/swift/Basic/STLExtras.h @@ -639,12 +639,12 @@ inline T accumulate(const Container &C, T init, BinaryOperation op) { } template -inline bool binary_search(const Container &C, T value) { +inline bool binary_search(const Container &C, const T &value) { return std::binary_search(C.begin(), C.end(), value); } template -inline bool binary_search(const Container &C, T value, BinaryOperation op) { +inline bool binary_search(const Container &C, const T &value, BinaryOperation op) { return std::binary_search(C.begin(), C.end(), value, op); } diff --git a/lib/SIL/OwnershipUtils.cpp b/lib/SIL/OwnershipUtils.cpp index bbb30c28a500d..feeab6da18e6f 100644 --- a/lib/SIL/OwnershipUtils.cpp +++ b/lib/SIL/OwnershipUtils.cpp @@ -163,6 +163,13 @@ bool swift::getUnderlyingBorrowIntroducingValues( continue; } + // If v produces .none ownership, then we can ignore it. It is important + // that we put this before checking for guaranteed forwarding instructions, + // since we want to ignore guaranteed forwarding instructions that in this + // specific case produce a .none value. + if (v.getOwnershipKind() == ValueOwnershipKind::None) + continue; + // Otherwise if v is an ownership forwarding value, add its defining // instruction if (isGuaranteedForwardingValue(v)) { @@ -173,10 +180,9 @@ bool swift::getUnderlyingBorrowIntroducingValues( continue; } - // If v produces any ownership, then we can ignore it. Otherwise, we need to - // return false since this is an introducer we do not understand. - if (v.getOwnershipKind() != ValueOwnershipKind::None) - return false; + // Otherwise, this is an introducer we do not understand. Bail and return + // false. + return false; } return true; diff --git a/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp b/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp index 9304d9bd786df..4ad37b981c35a 100644 --- a/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp +++ b/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp @@ -13,6 +13,8 @@ #define DEBUG_TYPE "predictable-memopt" #include "PMOMemoryUseCollector.h" +#include "swift/Basic/BlotSetVector.h" +#include "swift/Basic/STLExtras.h" #include "swift/SIL/BasicBlockUtils.h" #include "swift/SIL/BranchPropagatedUser.h" #include "swift/SIL/OwnershipUtils.h" @@ -422,8 +424,14 @@ class AvailableValueAggregator { /// take. SmallVector insertedInsts; + /// The list of phi nodes inserted by the SSA updater. SmallVector insertedPhiNodes; + /// A set of copy_values whose lifetime we balanced while inserting phi + /// nodes. This means that these copy_value must be skipped in + /// addMissingDestroysForCopiedValues. + SmallPtrSet copyValueProcessedWithPhiNodes; + public: AvailableValueAggregator(SILInstruction *Inst, MutableArrayRef AvailableValueList, @@ -445,9 +453,6 @@ class AvailableValueAggregator { bool isTopLevel = true); bool canTake(SILType loadTy, unsigned firstElt) const; - SingleValueInstruction *addMissingDestroysForCopiedValues(LoadInst *li, - SILValue newVal); - void print(llvm::raw_ostream &os) const; void dump() const LLVM_ATTRIBUTE_USED; @@ -467,13 +472,11 @@ class AvailableValueAggregator { /// reference counts of the intermediate copies and phis to ensure that all /// forwarding operations in the CFG are strongly control equivalent (i.e. run /// the same number of times). - void fixupOwnership(LoadBorrowInst *lbi, SILValue newVal) { - // Sort inserted insts so we can bisect upon it and mark copy_value as needing - // to be skipped. - sort(insertedInsts); - SmallBitVector instsToSkip(insertedInsts.size()); - addHandOffCopyDestroysForPhis(lbi, newVal, instsToSkip); - addMissingDestroysForCopiedValues(lbi, newVal, instsToSkip); + void fixupOwnership(SILInstruction *load, SILValue newVal) { + assert(isa(load) || isa(load)); + + addHandOffCopyDestroysForPhis(load, newVal); + addMissingDestroysForCopiedValues(load, newVal); } private: @@ -490,15 +493,13 @@ class AvailableValueAggregator { /// If as a result of us copying values, we may have unconsumed destroys, find /// the appropriate location and place the values there. Only used when /// ownership is enabled. - void addMissingDestroysForCopiedValues(LoadBorrowInst *li, SILValue newVal, - const SmallBitVector &instsToSkip); + void addMissingDestroysForCopiedValues(SILInstruction *load, SILValue newVal); /// As a result of us using the SSA updater, insert hand off copy/destroys at /// each phi and make sure that intermediate phis do not leak by inserting /// destroys along paths that go through the intermediate phi that do not also /// go through the - void addHandOffCopyDestroysForPhis(SILInstruction *load, SILValue newVal, - SmallBitVector &instsToSkipOut); + void addHandOffCopyDestroysForPhis(SILInstruction *load, SILValue newVal); }; } // end anonymous namespace @@ -726,9 +727,15 @@ AvailableValueAggregator::aggregateFullyAvailableValue(SILType loadTy, // Finally, grab the value from the SSA updater. SILValue result = updater.GetValueInMiddleOfBlock(B.getInsertionBB()); - assert(result.getOwnershipKind().isCompatibleWith(ValueOwnershipKind::Owned)); - return result; + if (isTake() || !B.hasOwnership()) { + return result; + } + + // Be careful with this value and insert a copy in our load block to prevent + // any weird control equivalence issues. + SILBuilderWithScope builder(&*B.getInsertionPoint(), &insertedInsts); + return builder.emitCopyValueOperation(Loc, result); } SILValue AvailableValueAggregator::aggregateTupleSubElts(TupleType *TT, @@ -759,7 +766,7 @@ SILValue AvailableValueAggregator::aggregateTupleSubElts(TupleType *TT, // If we are going to use this to promote a borrowed value, insert borrow // operations. Eventually I am going to do this for everything, but this // should make it easier to bring up. - if (expectedOwnership == AvailableValueExpectedOwnership::Borrow) { + if (!isTake()) { for (unsigned i : indices(ResultElts)) { ResultElts[i] = B.emitBeginBorrowOperation(Loc, ResultElts[i]); } @@ -793,7 +800,7 @@ SILValue AvailableValueAggregator::aggregateStructSubElts(StructDecl *sd, firstElt += numSubElt; } - if (expectedOwnership == AvailableValueExpectedOwnership::Borrow) { + if (!isTake()) { for (unsigned i : indices(resultElts)) { resultElts[i] = B.emitBeginBorrowOperation(Loc, resultElts[i]); } @@ -840,7 +847,13 @@ SILValue AvailableValueAggregator::handlePrimitiveValue(SILType loadTy, !builder.hasOwnership() || eltVal.getOwnershipKind().isCompatibleWith(ValueOwnershipKind::Owned)); assert(eltVal->getType() == loadTy && "Subelement types mismatch"); - return eltVal; + + if (!builder.hasOwnership()) { + return eltVal; + } + + SILBuilderWithScope builder2(&*B.getInsertionPoint(), &insertedInsts); + return builder2.emitCopyValueOperation(Loc, eltVal); } // If we have an available value, then we want to extract the subelement from @@ -892,126 +905,249 @@ SILValue AvailableValueAggregator::handlePrimitiveValue(SILType loadTy, assert(!B.hasOwnership() || eltVal.getOwnershipKind().isCompatibleWith(ValueOwnershipKind::Owned)); assert(eltVal->getType() == loadTy && "Subelement types mismatch"); - return eltVal; + if (!B.hasOwnership()) + return eltVal; + SILBuilderWithScope builder(&*B.getInsertionPoint(), &insertedInsts); + return builder.emitCopyValueOperation(Loc, eltVal); } -SingleValueInstruction * -AvailableValueAggregator::addMissingDestroysForCopiedValues(LoadInst *li, - SILValue newVal) { - assert(B.hasOwnership() && - "We assume this is only called if we have ownership"); +static SILInstruction *getNonPhiBlockIncomingValueDef(SILValue incomingValue, + CopyValueInst *phiCopy) { + auto *phiBlock = phiCopy->getParent(); + if (phiBlock == incomingValue->getParentBlock()) { + return nullptr; + } - SmallPtrSet visitedBlocks; - SmallVector leakingBlocks; - bool foundLoop = false; - auto loc = RegularLocation::getAutoGeneratedLocation(); - while (!insertedInsts.empty()) { - auto *cvi = dyn_cast(insertedInsts.pop_back_val()); - if (!cvi) - continue; + if (auto *cvi = dyn_cast(incomingValue)) { + return cvi; + } - // Clear our state. - visitedBlocks.clear(); - leakingBlocks.clear(); - // The linear lifetime checker doesn't care if the passed in load is - // actually a user of our copy_value. What we care about is that the load is - // guaranteed to be in the block where we have reformed the tuple in a - // consuming manner. This means if we add it as the consuming use of the - // copy, we can find the leaking places if any exist. - // - // Then perform the linear lifetime check. If we succeed, continue. We have - // no further work to do. - auto errorKind = ownership::ErrorBehaviorKind::ReturnFalse; - LinearLifetimeChecker checker(visitedBlocks, deadEndBlocks); - auto error = checker.checkValue( - cvi, {BranchPropagatedUser(&li->getAllOperands()[0])}, {}, errorKind, - &leakingBlocks); - if (!error.getFoundError()) - continue; + assert(isa(incomingValue)); - // Ok, we found some leaking blocks. Since we are using the linear lifetime - // checker with memory, we do not have any guarantees that the store is out - // side of a loop and a load is in a loop. In such a case, we want to - // replace the load with a copy_value. - foundLoop |= error.getFoundOverConsume(); + // Otherwise, our copy_value may not be post-dominated by our phi. To + // work around that, we need to insert destroys along the other + // paths. So set base to the first instruction in our argument's block, + // so we can insert destroys for our base. + return &*incomingValue->getParentBlock()->begin(); +} - // Ok, we found some leaking blocks. Insert destroys at the - // beginning of these blocks for our copy_value. - for (auto *bb : leakingBlocks) { - SILBuilderWithScope b(bb->begin()); - b.emitDestroyValueOperation(loc, cvi); +static bool +terminatorHasAnyKnownPhis(TermInst *ti, + ArrayRef insertedPhiNodesSorted) { + for (auto succArgList : ti->getSuccessorBlockArguments()) { + if (llvm::any_of(succArgList, [&](SILPhiArgument *arg) { + return binary_search(insertedPhiNodesSorted, arg); + })) { + return true; } } - // If we didn't find a loop, we are done, just return svi to get RAUWed. - if (!foundLoop) { - return li; + return false; +} + +namespace { + +class PhiNodeCopyCleanupInserter { + llvm::SmallMapVector incomingValues; + + /// Map from index -> (incomingValueIndex, copy). + /// + /// We are going to stable_sort this array using the indices of + /// incomingValueIndex. This will ensure that we always visit in + /// insertion order our incoming values (since the indices we are + /// sorting by are the count of incoming values we have seen so far + /// when we see the incoming value) and maintain the internal + /// insertion sort within our range as well. This ensures that we + /// visit our incoming values in visitation order and that within + /// their own values, also visit them in visitation order with + /// respect to each other. + SmallVector, 16> copiesToCleanup; + + /// The lifetime frontier that we use to compute lifetime endpoints + /// when emitting cleanups. + ValueLifetimeAnalysis::Frontier lifetimeFrontier; + +public: + PhiNodeCopyCleanupInserter() = default; + + void trackNewCleanup(SILValue incomingValue, CopyValueInst *copy) { + auto entry = std::make_pair(incomingValue, incomingValues.size()); + auto iter = incomingValues.insert(entry); + // If we did not succeed, then iter.first.second is the index of + // incoming value. Otherwise, it will be nextIndex. + copiesToCleanup.emplace_back(iter.first->second, copy); } - // If we found a loop, then we know that our leaking blocks are the exiting - // blocks of the loop and the value has been lifetime extended over the loop. + void emit(DeadEndBlocks &deadEndBlocks) &&; +}; - // If we have a load, we need to put in a copy so that the destroys within - // the loop are properly balanced. - newVal = SILBuilderWithScope(li).emitCopyValueOperation(loc, newVal); +} // end anonymous namespace - li->replaceAllUsesWith(newVal); - SILValue addr = li->getOperand(); - li->eraseFromParent(); - if (auto *addrI = addr->getDefiningInstruction()) - recursivelyDeleteTriviallyDeadInstructions(addrI); - return nullptr; +void PhiNodeCopyCleanupInserter::emit(DeadEndBlocks &deadEndBlocks) && { + // READ THIS: We are being very careful here to avoid allowing for + // non-determinism to enter here. + // + // 1. First we create a list of indices of our phi node data. Then we use a + // stable sort those indices into the order in which our phi node cleanups + // would be in if we compared just using incomingValues. We use a stable + // sort here to ensure that within the same "cohort" of values, our order + // is insertion order. + // + // 2. We go through the list of phiNodeCleanupStates in insertion order. We + // also maintain a set of already visited base values. When we visit the + // first phiNodeCleanupState for a specific phi, we process the phi + // then. This ensures that we always process the phis in insertion order as + // well. + SmallVector copiesToCleanupIndicesSorted; + llvm::copy(indices(copiesToCleanup), + std::back_inserter(copiesToCleanupIndicesSorted)); + + stable_sort(copiesToCleanupIndicesSorted, + [&](unsigned lhsIndex, unsigned rhsIndex) { + unsigned lhs = copiesToCleanup[lhsIndex].first; + unsigned rhs = copiesToCleanup[rhsIndex].first; + return lhs < rhs; + }); + + for (auto ii = copiesToCleanupIndicesSorted.begin(), + ie = copiesToCleanupIndicesSorted.end(); + ii != ie;) { + unsigned incomingValueIndex = copiesToCleanup[*ii].first; + + // First find the end of the values for which ii does not equal baseValue. + auto rangeEnd = std::find_if_not(std::next(ii), ie, [&](unsigned index) { + return incomingValueIndex == copiesToCleanup[index].first; + }); + + SWIFT_DEFER { + // Once we have finished processing, set ii to rangeEnd. This ensures that + // the code below does not need to worry about updating the iterator. + ii = rangeEnd; + }; + + SILValue incomingValue = + std::next(incomingValues.begin(), incomingValueIndex)->first; + CopyValueInst *phiCopy = copiesToCleanup[*ii].second; + auto *insertPt = getNonPhiBlockIncomingValueDef(incomingValue, phiCopy); + auto loc = RegularLocation::getAutoGeneratedLocation(); + + // Before we do anything, see if we have a single cleanup state. In such a + // case, we could have that we have a phi node as an incoming value and a + // copy_value in that same block. In such a case, we want to just insert the + // copy and continue. This means that + // cleanupState.getNonPhiBlockIncomingValueDef() should always return a + // non-null value in the code below. + if (std::next(ii) == rangeEnd && isa(incomingValue) && + !insertPt) { + SILBasicBlock *phiBlock = phiCopy->getParent(); + SILBuilderWithScope builder(phiBlock->getTerminator()); + builder.createDestroyValue(loc, incomingValue); + continue; + } + + // Otherwise, we know that we have for this incomingValue, multiple + // potential insert pts that we need to handle at the same time with our + // lifetime query. Gather up those uses. + SmallVector users; + transform(llvm::make_range(ii, rangeEnd), std::back_inserter(users), + [&](unsigned index) { return copiesToCleanup[index].second; }); + + // Then lifetime extend our base over the copy_value. + assert(lifetimeFrontier.empty()); + auto *def = getNonPhiBlockIncomingValueDef(incomingValue, phiCopy); + assert(def && "Should never have a nullptr here since we handled all of " + "the single block cases earlier"); + ValueLifetimeAnalysis analysis(def, users); + bool foundCriticalEdges = !analysis.computeFrontier( + lifetimeFrontier, ValueLifetimeAnalysis::DontModifyCFG, &deadEndBlocks); + (void)foundCriticalEdges; + assert(!foundCriticalEdges); + + while (!lifetimeFrontier.empty()) { + auto *insertPoint = lifetimeFrontier.pop_back_val(); + SILBuilderWithScope builder(insertPoint); + builder.createDestroyValue(loc, incomingValue); + } + } } void AvailableValueAggregator::addHandOffCopyDestroysForPhis( - SILInstruction *load, SILValue newVal, SmallBitVector &instsToSkip) { + SILInstruction *load, SILValue newVal) { assert(isa(load) || isa(load)); - ValueLifetimeAnalysis::Frontier lifetimeFrontier; SmallPtrSet visitedBlocks; SmallVector leakingBlocks; SmallVector, 8> incomingValues; auto loc = RegularLocation::getAutoGeneratedLocation(); - LLVM_DEBUG(llvm::dbgs() << "Inserted Phis!\n"); #ifndef NDEBUG + LLVM_DEBUG(llvm::dbgs() << "Inserted Phis!\n"); for (auto *phi : insertedPhiNodes) { LLVM_DEBUG(llvm::dbgs() << "Phi: " << *phi); } #endif // Before we begin, identify the offset for all phis that are intermediate - // phis inserted by the SSA updater. + // phis inserted by the SSA updater. We are taking advantage of the fact that + // the SSA updater just constructs the web without knowledge of ownership. So + // if a phi node is only used by another phi node that we inserted, then we + // have an intermediate phi node. + // + // TODO: There should be a better way of doing this than doing a copy + sort. + SmallVector insertedPhiNodesSorted; + llvm::copy(insertedPhiNodes, std::back_inserter(insertedPhiNodesSorted)); + llvm::sort(insertedPhiNodesSorted); + SmallBitVector intermediatePhiOffsets(insertedPhiNodes.size()); for (unsigned i : indices(insertedPhiNodes)) { - if (insertedPhiNodes[i]->getSingleUserOfType()) { - intermediatePhiOffsets.set(i); + if (TermInst *termInst = + insertedPhiNodes[i]->getSingleUserOfType()) { + // Only set the value if we find termInst has a successor with a phi node + // in our insertedPhiNodes. + if (terminatorHasAnyKnownPhis(termInst, insertedPhiNodesSorted)) { + intermediatePhiOffsets.set(i); + } } } // First go through all of our phi nodes doing the following: // // 1. If any of the phi node have a copy_value as an operand, we know that the - // copy_value does not dominate our final definition. In such a case since - // we may not have that the copy_value is post-dominated by the phi, we - // need to insert a copy_value at the phi to allow for post-domination and - // then use the ValueLifetimeChecker to determine the rest of the frontier - // for the value. + // copy_value does not dominate our final definition since otherwise the + // SSA updater would not have inserted a phi node here. In such a case + // since we may not have that the copy_value is post-dominated by the phi, + // we need to insert a copy_value at the phi to allow for post-domination + // and then use the ValueLifetimeChecker to determine the rest of the + // frontier for the base value. // // 2. If our phi node is used by another phi node, we run into a similar // problem where we could have that our original phi node does not dominate - // our final definition and may not be strongly control dependent on our - // phi. To work around this problem, we insert at the phi a copy_value to - // allow for the phi to post_dominate its copy and then extend the lifetime - // of the phied value over that copy. + // our final definition (since the SSA updater would not have inserted the + // phi) and may not be strongly control dependent on our phi. To work + // around this problem, we insert at the phi a copy_value to allow for the + // phi to post_dominate its copy and then extend the lifetime of the phied + // value over that copy. + // + // As an extra complication to this, when we insert compensating releases for + // any copy_values from (1), we need to insert the destroy_value on "base + // values" (either a copy_value or the first instruction of a phi argument's + // block) /after/ we have found all of the base_values to ensure that if the + // same base value is used by multiple phis, we do not insert too many destroy + // value. + // + // NOTE: At first glance one may think that such a problem could not occur + // with phi nodes as well. Sadly if we allow for double backedge loops, it is + // possible (there may be more cases). + PhiNodeCopyCleanupInserter cleanupInserter; + for (unsigned i : indices(insertedPhiNodes)) { - auto *phiArg = insertedPhiNodes[i]; + auto *phi = insertedPhiNodes[i]; - // If our phiArg is not owned, continue. No fixes are needed. - if (phiArg->getOwnershipKind() != ValueOwnershipKind::Owned) + // If our phi is not owned, continue. No fixes are needed. + if (phi->getOwnershipKind() != ValueOwnershipKind::Owned) continue; - LLVM_DEBUG(llvm::dbgs() << "Visiting inserted phi: " << *phiArg); + LLVM_DEBUG(llvm::dbgs() << "Visiting inserted phi: " << *phi); // Otherwise, we have a copy_value that may not be strongly control // equivalent with our phi node. In such a case, we need to use // ValueLifetimeAnalysis to lifetime extend the copy such that we can @@ -1021,8 +1157,8 @@ void AvailableValueAggregator::addHandOffCopyDestroysForPhis( leakingBlocks.clear(); incomingValues.clear(); - phiArg->getIncomingPhiValues(incomingValues); - unsigned phiIndex = phiArg->getIndex(); + phi->getIncomingPhiValues(incomingValues); + unsigned phiIndex = phi->getIndex(); for (auto pair : incomingValues) { SILValue value = pair.second; @@ -1034,13 +1170,10 @@ void AvailableValueAggregator::addHandOffCopyDestroysForPhis( // Otherwise, value should be from a copy_value or a phi node. assert(isa(value) || isa(value)); - // If we have a copy_value Set a bit for it in instsToSkip so that when we - // start processing insertedInstrs we know that we handled it here - // already. + // If we have a copy_value, remove it from the inserted insts set so we + // skip it when we start processing insertedInstrs. if (auto *cvi = dyn_cast(value)) { - auto iter = lower_bound(insertedInsts, cvi); - assert(iter != insertedInsts.end() && *iter == cvi); - instsToSkip[std::distance(insertedInsts.begin(), iter)] = true; + copyValueProcessedWithPhiNodes.insert(cvi); // Then check if our termInst is in the same block as our copy_value. In // such a case, we can just use the copy_value as our phi's value @@ -1055,54 +1188,13 @@ void AvailableValueAggregator::addHandOffCopyDestroysForPhis( // that for our actual phi. auto *termInst = pair.first->getTerminator(); SILBuilderWithScope builder(termInst); - auto *phiCopy = builder.createCopyValue(loc, value); + CopyValueInst *phiCopy = builder.createCopyValue(loc, value); termInst->setOperand(phiIndex, phiCopy); - // Normalize on our base now that we have inserted the copy_value into the - // terminator block. If we have a copy_value, just use it directly as our - // base. We know it isn't in the block of our phiCopy due to a check - // above. - SILInstruction *base = nullptr; - if (auto *cvi = dyn_cast(value)) { - assert(cvi->getParent() != phiCopy->getParent() && - "Just to check invariant from above"); - base = cvi; - } else { - assert(isa(value)); - // If we have a phi argument and our incoming value block is the same as - // our phi block, we know that the copy_value we inserted will only be - // used by the phi. So insert a destroy_value in the incoming value - // block after the copy_value that we inserted and then continue. - if (pair.first == value->getParentBlock()) { - builder.createDestroyValue(loc, value); - continue; - } - - // Otherwise, our copy_value may not be post-dominated by our phi. To - // work around that, we need to insert destroys along the other - // paths. So set base to the first instruction in our argument's block, - // so we can insert destroys for our base. - base = &*value->getParentBlock()->begin(); - } - assert(base && "Should have been assigned"); - - // Then lifetime extend our base over the copy_value. - assert(lifetimeFrontier.empty()); - ValueLifetimeAnalysis analysis(base, phiCopy); - bool foundCriticalEdges = !analysis.computeFrontier( - lifetimeFrontier, ValueLifetimeAnalysis::DontModifyCFG, - &deadEndBlocks); - (void)foundCriticalEdges; - assert(!foundCriticalEdges); - - while (!lifetimeFrontier.empty()) { - auto *insertPoint = lifetimeFrontier.pop_back_val(); - SILBuilderWithScope builder(insertPoint); - builder.createDestroyValue(loc, value); - } - - visitedBlocks.clear(); - leakingBlocks.clear(); + // Now that we know our base, phi, phiCopy for this specific incoming + // value, append it to the phiNodeClenaupState so we can insert + // destroy_values late after we visit all insertedPhiNodes. + cleanupInserter.trackNewCleanup(value, phiCopy); } // Then see if our phi is an intermediate phi. If it is an intermediate phi, @@ -1131,8 +1223,8 @@ void AvailableValueAggregator::addHandOffCopyDestroysForPhis( auto errorKind = ownership::ErrorBehaviorKind::ReturnFalse; LinearLifetimeChecker checker(visitedBlocks, deadEndBlocks); auto error = checker.checkValue( - phiArg, {BranchPropagatedUser(&load->getAllOperands()[0])}, {}, - errorKind, &leakingBlocks); + phi, {BranchPropagatedUser(&load->getAllOperands()[0])}, {}, errorKind, + &leakingBlocks); if (!error.getFoundError()) { // If we did not find an error, then our copy_value must be strongly @@ -1140,7 +1232,7 @@ void AvailableValueAggregator::addHandOffCopyDestroysForPhis( // for the copy_value. auto next = std::next(load->getIterator()); SILBuilderWithScope builder(next); - builder.emitDestroyValueOperation(next->getLoc(), phiArg); + builder.emitDestroyValueOperation(next->getLoc(), phi); continue; } @@ -1151,23 +1243,29 @@ void AvailableValueAggregator::addHandOffCopyDestroysForPhis( if (!error.getFoundOverConsume()) { auto next = std::next(load->getIterator()); SILBuilderWithScope builder(next); - builder.emitDestroyValueOperation(next->getLoc(), phiArg); + builder.emitDestroyValueOperation(next->getLoc(), phi); } // Ok, we found some leaking blocks. Insert destroys at the beginning of // these blocks for our copy_value. for (auto *bb : leakingBlocks) { SILBuilderWithScope b(bb->begin()); - b.emitDestroyValueOperation(loc, phiArg); + b.emitDestroyValueOperation(loc, phi); } } + + // Alright! In summary, we just lifetime extended all of our phis, + // lifetime extended them to the load block, and inserted phi copies + // at all of our intermediate phi nodes. Now we need to cleanup and + // insert all of the compensating destroy_value that we need. + std::move(cleanupInserter).emit(deadEndBlocks); + // Clear the phi node array now that we are done. insertedPhiNodes.clear(); } void AvailableValueAggregator::addMissingDestroysForCopiedValues( - LoadBorrowInst *lbi, SILValue newVal, - const SmallBitVector &instsToSkip) { + SILInstruction *load, SILValue newVal) { assert(B.hasOwnership() && "We assume this is only called if we have ownership"); @@ -1175,21 +1273,16 @@ void AvailableValueAggregator::addMissingDestroysForCopiedValues( SmallVector leakingBlocks; auto loc = RegularLocation::getAutoGeneratedLocation(); - for (unsigned i : indices(insertedInsts)) { - // If we already handled this instruction above when handling phi nodes, - // just continue. - if (instsToSkip[i]) - continue; - + for (auto *inst : insertedInsts) { // Otherwise, see if this is a load [copy]. It if it a load [copy], then we // know that the load [copy] must be in the load block meaing we can just // put a destroy_value /after/ the load_borrow to ensure that the value // lives long enough for us to copy_value it or a derived value for the // begin_borrow. - if (auto *li = dyn_cast(insertedInsts[i])) { + if (auto *li = dyn_cast(inst)) { if (li->getOwnershipQualifier() == LoadOwnershipQualifier::Copy) { - assert(li->getParent() == lbi->getParent()); - auto next = std::next(lbi->getIterator()); + assert(li->getParent() == load->getParent()); + auto next = std::next(load->getIterator()); SILBuilderWithScope builder(next); builder.emitDestroyValueOperation(next->getLoc(), li); continue; @@ -1198,10 +1291,15 @@ void AvailableValueAggregator::addMissingDestroysForCopiedValues( // Our copy_value may have been unset above if it was used by a phi // (implying it does not dominate our final user). - auto *cvi = dyn_cast(insertedInsts[i]); + auto *cvi = dyn_cast(inst); if (!cvi) continue; + // If we already handled this copy_value above when handling phi nodes, just + // continue. + if (copyValueProcessedWithPhiNodes.count(cvi)) + continue; + // Clear our state. visitedBlocks.clear(); leakingBlocks.clear(); @@ -1217,14 +1315,14 @@ void AvailableValueAggregator::addMissingDestroysForCopiedValues( auto errorKind = ownership::ErrorBehaviorKind::ReturnFalse; LinearLifetimeChecker checker(visitedBlocks, deadEndBlocks); auto error = checker.checkValue( - cvi, {BranchPropagatedUser(&lbi->getAllOperands()[0])}, {}, errorKind, + cvi, {BranchPropagatedUser(&load->getAllOperands()[0])}, {}, errorKind, &leakingBlocks); if (!error.getFoundError()) { // If we did not find an error, then our copy_value must be strongly // control equivalent as our load_borrow. So just insert a destroy_value // for the copy_value. - auto next = std::next(lbi->getIterator()); + auto next = std::next(load->getIterator()); SILBuilderWithScope builder(next); builder.emitDestroyValueOperation(next->getLoc(), cvi); continue; @@ -1235,7 +1333,7 @@ void AvailableValueAggregator::addMissingDestroysForCopiedValues( // this if we found a loop since our leaking blocks will lifetime extend the // value over the loop. if (!error.getFoundOverConsume()) { - auto next = std::next(lbi->getIterator()); + auto next = std::next(load->getIterator()); SILBuilderWithScope builder(next); builder.emitDestroyValueOperation(next->getLoc(), cvi); } @@ -2031,16 +2129,15 @@ bool AllocOptimize::promoteLoadCopy(LoadInst *li) { // blocks that we may have can be found by performing a linear lifetime check // over all copies that we found using the load as the "consuming uses" (just // for the purposes of identifying the consuming block). - auto *oldLoad = agg.addMissingDestroysForCopiedValues(li, newVal); + agg.fixupOwnership(li, newVal); - // If we are returned the load, eliminate it. Otherwise, it was already - // handled for us... so return true. - if (!oldLoad) - return true; + // Now that we have fixed up all of our missing destroys, insert the copy + // value for our actual load and RAUW. + newVal = SILBuilderWithScope(li).emitCopyValueOperation(li->getLoc(), newVal); - oldLoad->replaceAllUsesWith(newVal); - SILValue addr = oldLoad->getOperand(0); - oldLoad->eraseFromParent(); + li->replaceAllUsesWith(newVal); + SILValue addr = li->getOperand(); + li->eraseFromParent(); if (auto *addrI = addr->getDefiningInstruction()) recursivelyDeleteTriviallyDeadInstructions(addrI); return true; diff --git a/test/SILOptimizer/predictable_memaccess_opts.sil b/test/SILOptimizer/predictable_memaccess_opts.sil index 192723cfbd731..8fa9df6db1b1e 100644 --- a/test/SILOptimizer/predictable_memaccess_opts.sil +++ b/test/SILOptimizer/predictable_memaccess_opts.sil @@ -60,9 +60,11 @@ bb0(%0 : $Builtin.Int32): // CHECK: [[STACK:%.*]] = alloc_stack $Builtin.NativeObject // CHECK: [[ARG_COPY:%.*]] = copy_value [[ARG]] // CHECK: store [[ARG]] to [init] [[STACK]] +// CHECK: [[ARG_COPY_2:%.*]] = copy_value [[ARG_COPY]] +// CHECK: destroy_value [[ARG_COPY]] // CHECK: destroy_addr [[STACK]] // CHECK: dealloc_stack [[STACK]] -// CHECK: return [[ARG_COPY]] +// CHECK: return [[ARG_COPY_2]] // CHECK: } // end sil function 'simple_nontrivial_load_promotion' sil [ossa] @simple_nontrivial_load_promotion : $@convention(thin) (@owned Builtin.NativeObject) -> @owned Builtin.NativeObject { bb0(%0 : @owned $Builtin.NativeObject): @@ -83,10 +85,14 @@ bb0(%0 : @owned $Builtin.NativeObject): // CHECK: store [[ARG1]] to [init] [[FIRST_ADDR]] // CHECK: [[ARG2_COPY:%.*]] = copy_value [[ARG2]] // CHECK: store [[ARG2]] to [init] [[SECOND_ADDR]] -// CHECK: [[RESULT:%.*]] = struct $NativeObjectPair ([[ARG1_COPY:%.*]] : $Builtin.NativeObject, [[ARG2_COPY:%.*]] : $Builtin.NativeObject) +// CHECK: [[ARG1_COPY_BORROW:%.*]] = begin_borrow [[ARG1_COPY]] +// CHECK: [[ARG2_COPY_BORROW:%.*]] = begin_borrow [[ARG2_COPY]] +// CHECK: [[RESULT:%.*]] = struct $NativeObjectPair ([[ARG1_COPY_BORROW:%.*]] : $Builtin.NativeObject, [[ARG2_COPY_BORROW:%.*]] : $Builtin.NativeObject) +// CHECK: [[RESULT_COPY_1:%.*]] = copy_value [[RESULT]] +// CHECK: [[RESULT_COPY_2:%.*]] = copy_value [[RESULT_COPY_1]] // CHECK: destroy_addr [[STACK]] // CHECK: dealloc_stack [[STACK]] -// CHECK: return [[RESULT]] +// CHECK: return [[RESULT_COPY_2]] // CHECK: } // end sil function 'struct_nontrivial_load_promotion' sil [ossa] @struct_nontrivial_load_promotion : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject) -> @owned NativeObjectPair { bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $Builtin.NativeObject): @@ -144,9 +150,11 @@ bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $Builtin.NativeObject): // CHECK: br bb3([[ARG_COPY]] : // // CHECK: bb3([[RESULT:%.*]] : @owned $Builtin.NativeObject): +// CHECK: [[RESULT_COPY_1:%.*]] = copy_value [[RESULT]] +// CHECK: [[RESULT_COPY_2:%.*]] = copy_value [[RESULT_COPY_1]] // CHECK: destroy_addr [[STACK]] // CHECK: dealloc_stack [[STACK]] -// CHECK: return [[RESULT]] +// CHECK: return [[RESULT_COPY_2]] // CHECK: } // end sil function 'simple_nontrivial_load_promotion_multi_insertpt' sil [ossa] @simple_nontrivial_load_promotion_multi_insertpt : $@convention(thin) (@owned Builtin.NativeObject) -> @owned Builtin.NativeObject { bb0(%0 : @owned $Builtin.NativeObject): @@ -192,10 +200,16 @@ bb3: // CHECK: br bb3([[ARG1_COPY]] : $Builtin.NativeObject, [[ARG2_COPY]] : $Builtin.NativeObject) // // CHECK: bb3([[ARG1_COPY:%.*]] : @owned $Builtin.NativeObject, [[ARG2_COPY:%.*]] : @owned $Builtin.NativeObject): -// CHECK: [[RESULT:%.*]] = struct $NativeObjectPair ([[ARG1_COPY:%.*]] : $Builtin.NativeObject, [[ARG2_COPY:%.*]] : $Builtin.NativeObject) +// CHECK: [[ARG1_COPY_COPY:%.*]] = copy_value [[ARG1_COPY]] +// CHECK: [[ARG2_COPY_COPY:%.*]] = copy_value [[ARG2_COPY]] +// CHECK: [[ARG1_COPY_COPY_BORROW:%.*]] = begin_borrow [[ARG1_COPY_COPY]] +// CHECK: [[ARG2_COPY_COPY_BORROW:%.*]] = begin_borrow [[ARG2_COPY_COPY]] +// CHECK: [[RESULT:%.*]] = struct $NativeObjectPair ([[ARG1_COPY_COPY_BORROW:%.*]] : $Builtin.NativeObject, [[ARG2_COPY_COPY_BORROW:%.*]] : $Builtin.NativeObject) +// CHECK: [[RESULT_COPY:%.*]] = copy_value [[RESULT]] +// CHECK: [[RESULT_COPY_2:%.*]] = copy_value [[RESULT_COPY]] // CHECK: destroy_addr [[STACK]] // CHECK: dealloc_stack [[STACK]] -// CHECK: return [[RESULT]] +// CHECK: return [[RESULT_COPY_2]] // CHECK: } // end sil function 'struct_nontrivial_load_promotion_multi_insertpt' sil [ossa] @struct_nontrivial_load_promotion_multi_insertpt : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject) -> @owned NativeObjectPair { bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $Builtin.NativeObject): @@ -306,12 +320,17 @@ bb3: // CHECK: br bb3([[ARG1_COPY]] : $Builtin.NativeObject) // // CHECK: bb3([[ARG1_COPY:%.*]] : @owned $Builtin.NativeObject): +// CHECK: [[ARG1_COPY_COPY:%.*]] = copy_value [[ARG1_COPY]] // CHECK: [[SECOND_ADDR:%.*]] = struct_element_addr [[STACK]] // CHECK: [[SECOND_VAL_COPY:%.*]] = load [copy] [[SECOND_ADDR]] -// CHECK: [[RESULT:%.*]] = struct $NativeObjectPair ([[ARG1_COPY:%.*]] : $Builtin.NativeObject, [[SECOND_VAL_COPY]] : $Builtin.NativeObject) +// CHECK: [[ARG1_COPY_COPY_BORROW:%.*]] = begin_borrow [[ARG1_COPY_COPY]] +// CHECK: [[SECOND_VAL_COPY_BORROW:%.*]] = begin_borrow [[SECOND_VAL_COPY]] +// CHECK: [[RESULT:%.*]] = struct $NativeObjectPair ([[ARG1_COPY_COPY_BORROW:%.*]] : $Builtin.NativeObject, [[SECOND_VAL_COPY_BORROW]] : $Builtin.NativeObject) +// CHECK: [[RESULT_COPY_1:%.*]] = copy_value [[RESULT]] +// CHECK: [[RESULT_COPY_2:%.*]] = copy_value [[RESULT_COPY_1]] // CHECK: destroy_addr [[STACK]] // CHECK: dealloc_stack [[STACK]] -// CHECK: return [[RESULT]] +// CHECK: return [[RESULT_COPY_2]] // CHECK: } // end sil function 'struct_nontrivial_load_promotion_multi_insertpt_value_not_fully_available' sil [ossa] @struct_nontrivial_load_promotion_multi_insertpt_value_not_fully_available : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @owned Builtin.NativeObject) -> @owned NativeObjectPair { bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $Builtin.NativeObject, %arg2 : @owned $Builtin.NativeObject): @@ -413,9 +432,11 @@ bb3: // CHECK: [[COPIED_ARG_FIELD:%.*]] = copy_value [[BORROWED_ARG_FIELD]] // CHECK: end_borrow [[BORROWED_ARG]] // CHECK: store [[ARG]] to [init] [[STACK]] +// CHECK: [[COPIED_ARG_FIELD_COPY_1:%.*]] = copy_value [[COPIED_ARG_FIELD]] +// CHECK: [[COPIED_ARG_FIELD_COPY_2:%.*]] = copy_value [[COPIED_ARG_FIELD_COPY_1]] // CHECK: destroy_addr [[STACK]] // CHECK: dealloc_stack [[STACK]] -// CHECK: return [[COPIED_ARG_FIELD]] +// CHECK: return [[COPIED_ARG_FIELD_COPY_2]] // CHECK: } // end sil function 'simple_partialstructuse_load_promotion' sil [ossa] @simple_partialstructuse_load_promotion : $@convention(thin) (@owned NativeObjectPair) -> (@owned Builtin.NativeObject) { bb0(%0 : @owned $NativeObjectPair): @@ -437,9 +458,11 @@ bb0(%0 : @owned $NativeObjectPair): // CHECK: [[COPIED_ARG_FIELD:%.*]] = copy_value [[BORROWED_ARG_FIELD_2]] // CHECK: end_borrow [[BORROWED_ARG]] // CHECK: store [[ARG]] to [init] [[STACK]] +// CHECK: [[COPIED_ARG_FIELD_COPY_1:%.*]] = copy_value [[COPIED_ARG_FIELD]] +// CHECK: [[COPIED_ARG_FIELD_COPY_2:%.*]] = copy_value [[COPIED_ARG_FIELD_COPY_1]] // CHECK: destroy_addr [[STACK]] // CHECK: dealloc_stack [[STACK]] -// CHECK: return [[COPIED_ARG_FIELD]] +// CHECK: return [[COPIED_ARG_FIELD_COPY_2]] // CHECK: } // end sil function 'simple_partialtupleuse_load_promotion' sil [ossa] @simple_partialtupleuse_load_promotion : $@convention(thin) (@owned NativeObjectAndTuple) -> (@owned Builtin.NativeObject) { bb0(%0 : @owned $NativeObjectAndTuple): @@ -459,9 +482,10 @@ bb0(%0 : @owned $NativeObjectAndTuple): // CHECK: store [[ARG0]] to [init] [[STACK]] // CHECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]] // CHECK: store [[ARG1]] to [assign] [[STACK]] +// CHECK: [[ARG1_COPY_1:%.*]] = copy_value [[ARG1_COPY]] // CHECK: destroy_addr [[STACK]] // CHECK: dealloc_stack [[STACK]] -// CHECK: return [[ARG1_COPY]] +// CHECK: return [[ARG1_COPY_1]] // CHECK: } // end sil function 'simple_assignstore' sil [ossa] @simple_assignstore : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject) -> @owned Builtin.NativeObject { bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $Builtin.NativeObject): @@ -488,11 +512,15 @@ bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $Builtin.NativeObject): // // CHECK: bb1: // CHECK: destroy_value [[LHS1_COPY]] -// CHECK: br bb3([[LHS2_COPY]] : +// CHECK: [[LHS2_COPY_1:%.*]] = copy_value [[LHS2_COPY]] +// CHECK: [[LHS2_COPY_2:%.*]] = copy_value [[LHS2_COPY_1]] +// CHECK: br bb3([[LHS2_COPY_2]] : // // CHECK: bb2: // CHECK: destroy_value [[LHS2_COPY]] -// CHECK: br bb3([[LHS1_COPY]] : +// CHECK: [[LHS1_COPY_1:%.*]] = copy_value [[LHS1_COPY]] +// CHECK: [[LHS1_COPY_2:%.*]] = copy_value [[LHS1_COPY_1]] +// CHECK: br bb3([[LHS1_COPY_2]] : // // CHECK: bb3([[PHI:%.*]] : // CHECK: destroy_addr [[STACK]] @@ -1340,3 +1368,570 @@ bbEnd: return %9999 : $() } +//--- + +// CHECK-LABEL: sil [ossa] @load_copy_promote_with_loop_1 : $@convention(thin) (@owned NativeObjectPair) -> () { +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'load_copy_promote_with_loop_1' +sil [ossa] @load_copy_promote_with_loop_1 : $@convention(thin) (@owned NativeObjectPair) -> () { +bb0(%0 : @owned $NativeObjectPair): + %1 = alloc_stack $NativeObjectPair + store %0 to [init] %1 : $*NativeObjectPair + %2 = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x + br bb2 + +bb2: + %3 = load [copy] %2 : $*Builtin.NativeObject + %4 = function_ref @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %4(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %3 : $Builtin.NativeObject + br bb2 +} + +// CHECK-LABEL: sil [ossa] @load_copy_loop_promote_with_loop_2 : $@convention(thin) (@owned NativeObjectPair) -> () { +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'load_copy_loop_promote_with_loop_2' +sil [ossa] @load_copy_loop_promote_with_loop_2 : $@convention(thin) (@owned NativeObjectPair) -> () { +bb0(%0 : @owned $NativeObjectPair): + %1 = alloc_stack $NativeObjectPair + store %0 to [init] %1 : $*NativeObjectPair + %2 = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x + br bb2 + +bb2: + %3 = load [copy] %2 : $*Builtin.NativeObject + %4 = function_ref @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %4(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %3 : $Builtin.NativeObject + cond_br undef, bb3, bb4 + +bb3: + br bb2 + +bb4: + destroy_addr %1 : $*NativeObjectPair + dealloc_stack %1 : $*NativeObjectPair + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @load_copy_promote_two_backedge_loop : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'load_copy_promote_two_backedge_loop' +sil [ossa] @load_copy_promote_two_backedge_loop : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %1 = alloc_stack $Builtin.NativeObject + store %0 to [init] %1 : $*Builtin.NativeObject + br bb1 + +bb1: + br bb2 + +bb2: + cond_br undef, bb3, bb4 + +bb3: + %2 = load [copy] %1 : $*Builtin.NativeObject + destroy_value %2 : $Builtin.NativeObject + cond_br undef, bb5, bb6 + +bb4: + %3 = load [copy] %1 : $*Builtin.NativeObject + destroy_value %3 : $Builtin.NativeObject + cond_br undef, bb7, bb8 + +bb5: + br bb2 + +bb6: + br bb9 + +bb7: + br bb2 + +bb8: + br bb9 + +bb9: + destroy_addr %1 : $*Builtin.NativeObject + dealloc_stack %1 : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @load_copy_multiple_available_values_diamond_followed_by_loop : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject) -> () { +// CHECK: bb0( +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'load_copy_multiple_available_values_diamond_followed_by_loop' +sil [ossa] @load_copy_multiple_available_values_diamond_followed_by_loop : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject) -> () { +bb0(%0a : @owned $Builtin.NativeObject, %0b : @owned $Builtin.NativeObject): + %func = function_ref @nativeobjectpair_user : $@convention(thin) (@guaranteed NativeObjectPair) -> () + %1 = alloc_stack $NativeObjectPair + %1a = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x + %1b = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.y + cond_br undef, bb1, bb2 + +bb1: + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + br bb3 + +bb2: + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + br bb3 + +bb3: + br bb4 + +bb4: + br bb5 + +bb5: + %2 = load [copy] %1 : $*NativeObjectPair + cond_br undef, bb6, bb7 + +bb6: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + destroy_value %2 : $NativeObjectPair + br bb5 + +bb7: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + destroy_value %2 : $NativeObjectPair + destroy_addr %1 : $*NativeObjectPair + dealloc_stack %1 : $*NativeObjectPair + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @load_copy_multiple_available_values_diamond_followed_by_loop_reload : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @owned Builtin.NativeObject) -> () { +// CHECK-NOT: load [copy] {{%.*}} : $*NativeObjectPair +// CHECK: } // end sil function 'load_copy_multiple_available_values_diamond_followed_by_loop_reload' +sil [ossa] @load_copy_multiple_available_values_diamond_followed_by_loop_reload : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @owned Builtin.NativeObject) -> () { +bb0(%0a : @owned $Builtin.NativeObject, %0b : @owned $Builtin.NativeObject, %0c : @owned $Builtin.NativeObject): + %func = function_ref @nativeobjectpair_user : $@convention(thin) (@guaranteed NativeObjectPair) -> () + %1 = alloc_stack $NativeObjectPair + %1a = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x + %1b = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.y + cond_br undef, bb1, bb2 + +bb1: + store %0a to [init] %1a : $*Builtin.NativeObject + store %0c to [init] %1b : $*Builtin.NativeObject + destroy_value %0b : $Builtin.NativeObject + br bb3 + +bb2: + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + destroy_value %0c : $Builtin.NativeObject + br bb3 + +bb3: + br bb4 + +bb4: + br bb5 + +bb5: + %2 = load [copy] %1 : $*NativeObjectPair + cond_br undef, bb6, bb7 + +bb6: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + destroy_value %2 : $NativeObjectPair + br bb5 + +bb7: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + destroy_value %2 : $NativeObjectPair + destroy_addr %1 : $*NativeObjectPair + dealloc_stack %1 : $*NativeObjectPair + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @load_copy_multiple_available_values_diamond_followed_by_loop_store_in_loop : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @guaranteed Builtin.NativeObject) -> () { +// CHECK-NOT: load [copy] {{%.*}} : $*NativeObjectPair +// CHECK: } // end sil function 'load_copy_multiple_available_values_diamond_followed_by_loop_store_in_loop' +sil [ossa] @load_copy_multiple_available_values_diamond_followed_by_loop_store_in_loop : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @guaranteed Builtin.NativeObject) -> () { +bb0(%0a : @owned $Builtin.NativeObject, %0b : @owned $Builtin.NativeObject, %0c : @guaranteed $Builtin.NativeObject): + %func = function_ref @nativeobjectpair_user : $@convention(thin) (@guaranteed NativeObjectPair) -> () + %1 = alloc_stack $NativeObjectPair + %1a = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x + %1b = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.y + %0bhat = copy_value %0b : $Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + br bb3 + +bb2: + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + br bb3 + +bb3: + br bb4 + +bb4: + br bb5 + +bb5: + %2 = load [copy] %1 : $*NativeObjectPair + cond_br undef, bb6, bb7 + +bb6: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + destroy_value %2 : $NativeObjectPair + destroy_addr %1b : $*Builtin.NativeObject + %0bhat2 = copy_value %0bhat : $Builtin.NativeObject + store %0bhat2 to [init] %1b : $*Builtin.NativeObject + br bb5 + +bb7: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + destroy_value %2 : $NativeObjectPair + destroy_value %0bhat : $Builtin.NativeObject + destroy_addr %1 : $*NativeObjectPair + dealloc_stack %1 : $*NativeObjectPair + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [canonical] [ossa] @loop_carry_loadcopy : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'loop_carry_loadcopy' +sil [canonical] [ossa] @loop_carry_loadcopy : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %func = function_ref @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %1 = alloc_stack $Builtin.NativeObject + store %0 to [init] %1 : $*Builtin.NativeObject + cond_br undef, bb1, bb7 + +bb1: + br bb2 + +bb2: + br bb3 + +bb3: + %2 = load [copy] %1 : $*Builtin.NativeObject + cond_br undef, bb4, bb5 + +bb4: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %2 : $Builtin.NativeObject + br bb2 + +bb5: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %2 : $Builtin.NativeObject + br bb6 + +bb6: + br bb8 + +bb7: + br bb8 + +bb8: + destroy_addr %1 : $*Builtin.NativeObject + dealloc_stack %1 : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [canonical] [ossa] @loop_carry_loadcopy_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'loop_carry_loadcopy_2' +sil [canonical] [ossa] @loop_carry_loadcopy_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %func = function_ref @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %1 = alloc_stack $Builtin.NativeObject + store %0 to [init] %1 : $*Builtin.NativeObject + cond_br undef, bb1, bb7 + +bb1: + br bb2 + +bb2: + br bb3 + +bb3: + %2 = load [copy] %1 : $*Builtin.NativeObject + cond_br undef, bb4, bb5 + +bb4: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %2 : $Builtin.NativeObject + br bb2 + +bb5: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %2 : $Builtin.NativeObject + br bb6 + +bb6: + br bb8 + +bb7: + br bb8 + +bb8: + destroy_addr %1 : $*Builtin.NativeObject + dealloc_stack %1 : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [canonical] [ossa] @loop_carry_loadcopy_3 : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @guaranteed Builtin.NativeObject) -> () { +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'loop_carry_loadcopy_3' +sil [canonical] [ossa] @loop_carry_loadcopy_3 : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @guaranteed Builtin.NativeObject) -> () { +bb0(%0a : @owned $Builtin.NativeObject, %0b : @owned $Builtin.NativeObject, %0c : @guaranteed $Builtin.NativeObject): + %func = function_ref @nativeobject_tuple_user : $@convention(thin) (@guaranteed (Builtin.NativeObject, Builtin.NativeObject)) -> () + %1 = alloc_stack $(Builtin.NativeObject, Builtin.NativeObject) + %1a = tuple_element_addr %1 : $*(Builtin.NativeObject, Builtin.NativeObject), 0 + %1b = tuple_element_addr %1 : $*(Builtin.NativeObject, Builtin.NativeObject), 1 + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + cond_br undef, bb1, bb7 + +bb1: + br bb2 + +bb2: + br bb3 + +bb3: + %0ccopy = copy_value %0c : $Builtin.NativeObject + destroy_addr %1a : $*Builtin.NativeObject + store %0ccopy to [init] %1a : $*Builtin.NativeObject + %2 = load [copy] %1 : $*(Builtin.NativeObject, Builtin.NativeObject) + cond_br undef, bb4, bb5 + +bb4: + apply %func(%2) : $@convention(thin) (@guaranteed (Builtin.NativeObject, Builtin.NativeObject)) -> () + destroy_value %2 : $(Builtin.NativeObject, Builtin.NativeObject) + br bb2 + +bb5: + apply %func(%2) : $@convention(thin) (@guaranteed (Builtin.NativeObject, Builtin.NativeObject)) -> () + destroy_value %2 : $(Builtin.NativeObject, Builtin.NativeObject) + br bb6 + +bb6: + br bb8 + +bb7: + br bb8 + +bb8: + destroy_addr %1 : $*(Builtin.NativeObject, Builtin.NativeObject) + dealloc_stack %1 : $*(Builtin.NativeObject, Builtin.NativeObject) + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [canonical] [ossa] @loop_carry_loadcopy_4 : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @guaranteed Builtin.NativeObject) -> () { +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'loop_carry_loadcopy_4' +sil [canonical] [ossa] @loop_carry_loadcopy_4 : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @guaranteed Builtin.NativeObject) -> () { +bb0(%0a : @owned $Builtin.NativeObject, %0b : @owned $Builtin.NativeObject, %0c : @guaranteed $Builtin.NativeObject): + %func = function_ref @nativeobjectpair_user : $@convention(thin) (@guaranteed NativeObjectPair) -> () + %1 = alloc_stack $NativeObjectPair + %1a = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x + %1b = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.y + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + cond_br undef, bb1, bb7 + +bb1: + br bb2 + +bb2: + br bb3 + +bb3: + %0ccopy = copy_value %0c : $Builtin.NativeObject + destroy_addr %1a : $*Builtin.NativeObject + store %0ccopy to [init] %1a : $*Builtin.NativeObject + %2 = load [copy] %1 : $*NativeObjectPair + cond_br undef, bb4, bb5 + +bb4: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + destroy_value %2 : $NativeObjectPair + br bb2 + +bb5: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + destroy_value %2 : $NativeObjectPair + br bb6 + +bb6: + br bb8 + +bb7: + br bb8 + +bb8: + destroy_addr %1 : $*NativeObjectPair + dealloc_stack %1 : $*NativeObjectPair + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @load_copy_loop_carry_load_copy_phi_not_control_equivalent : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'load_copy_loop_carry_load_copy_phi_not_control_equivalent' +sil [ossa] @load_copy_loop_carry_load_copy_phi_not_control_equivalent : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%arg : @owned $Builtin.NativeObject): + %func = function_ref @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %0 = alloc_stack $Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + cond_br undef, bb3, bb4 + +bb2: + store %arg to [init] %0 : $*Builtin.NativeObject + br bb5 + +bb3: + store %arg to [init] %0 : $*Builtin.NativeObject + br bb6 + +bb4: + store %arg to [init] %0 : $*Builtin.NativeObject + br bb7 + +bb5: + br bb8 + +bb6: + br bb8 + +bb7: + br bbPreLoopHeader + +bb8: + br bbPreLoopHeader + +bbPreLoopHeader: + br bbLoop + +bbLoop: + br bbLoop1 + +bbLoop1: + br bbLoop2 + +bbLoop2: + %2 = load [copy] %0 : $*Builtin.NativeObject + cond_br undef, bbLoop3, bbLoop4 + +bbLoop3: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %2 : $Builtin.NativeObject + br bbLoop2 + +bbLoop4: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %2 : $Builtin.NativeObject + br bbEnd + +bbEnd: + destroy_addr %0 : $*Builtin.NativeObject + dealloc_stack %0 : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// In this case, we will have that we need to separately lifetime extend our phi +// node's copy to prevent leaks along the edge skipping the loop. +// CHECK-LABEL: sil [ossa] @load_copy_loop_carry_load_copy_phi_not_control_equivalent_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'load_copy_loop_carry_load_copy_phi_not_control_equivalent_2' +sil [ossa] @load_copy_loop_carry_load_copy_phi_not_control_equivalent_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%arg : @owned $Builtin.NativeObject): + %func = function_ref @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %0 = alloc_stack $Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + cond_br undef, bb3, bb4 + +bb2: + store %arg to [init] %0 : $*Builtin.NativeObject + br bb5 + +bb3: + store %arg to [init] %0 : $*Builtin.NativeObject + br bb6 + +bb4: + store %arg to [init] %0 : $*Builtin.NativeObject + br bb7 + +bb5: + br bb8a + +bb6: + br bb8a + +bb7: + br bbPreLoopHeader + +bb8a: + br bb8 + +bb8: + cond_br undef, bbPreLoopHeader1, bbSkipLoop + +bbPreLoopHeader: + br bbLoop + +bbPreLoopHeader1: + br bbLoop + +bbLoop: + br bbLoop1 + +bbLoop1: + br bbLoop2 + +bbLoop2: + %2 = load [copy] %0 : $*Builtin.NativeObject + br bbLoop6 + +bbLoop6: + cond_br undef, bbLoop3, bbLoop4 + +bbLoop3: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %2 : $Builtin.NativeObject + br bbLoop5 + +bbLoop5: + br bbLoop2 + +bbLoop4: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %2 : $Builtin.NativeObject + br bbEnd + +bbSkipLoop: + br bbEnd + +bbEnd: + destroy_addr %0 : $*Builtin.NativeObject + dealloc_stack %0 : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + diff --git a/test/SILOptimizer/predictable_memopt_ownership.sil b/test/SILOptimizer/predictable_memopt_ownership.sil index ebb893ac4b803..e18ace13123ac 100644 --- a/test/SILOptimizer/predictable_memopt_ownership.sil +++ b/test/SILOptimizer/predictable_memopt_ownership.sil @@ -211,8 +211,12 @@ bb0(%0 : @owned $ContainsNativeObject): // CHECK: [[f3:%.*]] = struct_extract [[BORROWED_ARG]] : $ComplexStruct, #ComplexStruct.f1 // CHECK: [[f3_copy:%.*]] = copy_value [[f3]] // CHECK: end_borrow [[BORROWED_ARG]] +// CHECK: [[f3_copy_1:%.*]] = copy_value [[f3_copy]] +// CHECK: [[f3_copy_2:%.*]] = copy_value [[f3_copy_1]] +// CHECK: [[f2_x_copy_1:%.*]] = copy_value [[f2_x_copy]] +// CHECK: [[f2_x_copy_2:%.*]] = copy_value [[f2_x_copy_1]] // CHECK: destroy_value [[ARG]] -// CHECK: [[RESULT:%.*]] = tuple ([[f3_copy]] : $Builtin.NativeObject, [[f2_x_copy]] : $Builtin.NativeObject, [[f1]] : $Builtin.Int32) +// CHECK: [[RESULT:%.*]] = tuple ([[f3_copy_2]] : $Builtin.NativeObject, [[f2_x_copy_2]] : $Builtin.NativeObject, [[f1]] : $Builtin.Int32) // CHECK: return [[RESULT]] // CHECK: } // end sil function 'multiple_level_extract_2' sil [ossa] @multiple_level_extract_2 : $@convention(thin) (@owned ComplexStruct) -> (@owned Builtin.NativeObject, @owned Builtin.NativeObject, Builtin.Int32) { @@ -559,11 +563,15 @@ bb3: // // CHECK: bb1: // CHECK: destroy_value [[LHS1_COPY]] -// CHECK: br bb3([[LHS2_COPY]] : +// CHECK: [[LHS2_COPY_1:%.*]] = copy_value [[LHS2_COPY]] +// CHECK: [[LHS2_COPY_2:%.*]] = copy_value [[LHS2_COPY_1]] +// CHECK: br bb3([[LHS2_COPY_2]] : // // CHECK: bb2: // CHECK: destroy_value [[LHS2_COPY]] : $Builtin.NativeObject -// CHECK: br bb3([[LHS1_COPY]] : +// CHECK: [[LHS1_COPY_1:%.*]] = copy_value [[LHS1_COPY]] +// CHECK: [[LHS1_COPY_2:%.*]] = copy_value [[LHS1_COPY_1]] +// CHECK: br bb3([[LHS1_COPY_2]] : // // CHECK: bb3([[PHI:%.*]] : // CHECK: destroy_value [[ARG]] @@ -649,9 +657,11 @@ struct NativeObjectTriple { // CHECK-NEXT: br bb3([[PAIR_LHS_COPY]] : // // CHECK: bb3([[PHI:%.*]] : @owned $Builtin.NativeObject): -// CHECK-NEXT: [[REFORMED:%.*]] = struct $NativeObjectTriple ([[ARG0]] : {{.*}}, [[ARG1]] : {{.*}}) -// CHECK-NEXT: destroy_value [[REFORMED]] -// CHECK-NEXT: return [[PHI]] +// CHECK: [[PHI_COPY_1:%.*]] = copy_value [[PHI]] +// CHECK: [[PHI_COPY_2:%.*]] = copy_value [[PHI_COPY_1]] +// CHECK: [[REFORMED:%.*]] = struct $NativeObjectTriple ([[ARG0]] : {{.*}}, [[ARG1]] : {{.*}}) +// CHECK: destroy_value [[REFORMED]] +// CHECK: return [[PHI_COPY_2]] // CHECK: } // end sil function 'diamond_test_4' sil [ossa] @diamond_test_4 : $@convention(thin) (@owned Builtin.NativeObject, @owned NativeObjectPair) -> @owned Builtin.NativeObject { bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $NativeObjectPair): @@ -710,9 +720,14 @@ bb3: // CHECK: bb4: // CHECK: [[TRIPLE_RHS_LHS:%.*]] = struct_element_addr [[TRIPLE_RHS]] : $*NativeObjectPair, #NativeObjectPair.x // CHECK: [[TRIPLE_RHS_LHS_VAL:%.*]] = load [copy] [[TRIPLE_RHS_LHS]] : $*Builtin.NativeObject -// CHECK: [[STRUCT:%.*]] = struct $NativeObjectPair ([[TRIPLE_RHS_LHS_VAL]] : {{.*}}, [[TRIPLE_RHS_RHS_VAL]] : {{.*}}) +// CHECK: [[TRIPLE_RHS_RHS_VAL_COPY:%.*]] = copy_value [[TRIPLE_RHS_RHS_VAL]] +// CHECK: [[TRIPLE_RHS_LHS_VAL_BORROW:%.*]] = begin_borrow [[TRIPLE_RHS_LHS_VAL]] +// CHECK: [[TRIPLE_RHS_RHS_VAL_COPY_BORROW:%.*]] = begin_borrow [[TRIPLE_RHS_RHS_VAL_COPY]] +// CHECK: [[STRUCT:%.*]] = struct $NativeObjectPair ([[TRIPLE_RHS_LHS_VAL_BORROW]] : {{.*}}, [[TRIPLE_RHS_RHS_VAL_COPY_BORROW]] : {{.*}}) +// CHECK: [[STRUCT_COPY:%.*]] = copy_value [[STRUCT]] +// CHECK: [[STRUCT_COPY_2:%.*]] = copy_value [[STRUCT_COPY]] // CHECK: destroy_addr [[BOX]] -// CHECK: return [[STRUCT]] +// CHECK: return [[STRUCT_COPY_2]] // CHECK: } // end sil function 'diamond_test_5' sil [ossa] @diamond_test_5 : $@convention(thin) (@owned Builtin.NativeObject, @owned NativeObjectPair, @owned Builtin.NativeObject) -> @owned NativeObjectPair { bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $NativeObjectPair, %arg2 : @owned $Builtin.NativeObject): @@ -758,10 +773,14 @@ bb4: // CHECK: cond_br undef, [[CRITEDGE_BREAK_BB_1:bb[0-9]+]], [[CRITEDGE_BREAK_BB_2:bb[0-9]+]] // // CHECK: [[CRITEDGE_BREAK_BB_1]]: -// CHECK-NEXT: br [[SUCC_2:bb[0-9]+]]([[TRIPLE_RHS_RHS_VAL]] : +// CHECK-NEXT: [[TRIPLE_RHS_RHS_VAL_COPY:%.*]] = copy_value [[TRIPLE_RHS_RHS_VAL]] +// CHECK-NEXT: destroy_value [[TRIPLE_RHS_RHS_VAL]] +// CHECK-NEXT: br [[SUCC_2:bb[0-9]+]]([[TRIPLE_RHS_RHS_VAL_COPY]] : // // CHECK: [[CRITEDGE_BREAK_BB_2]]: -// CHECK-NEXT: br [[SUCC_1:bb[0-9]+]]([[TRIPLE_RHS_RHS_VAL]] : +// CHECK-NEXT: [[TRIPLE_RHS_RHS_VAL_COPY:%.*]] = copy_value [[TRIPLE_RHS_RHS_VAL]] +// CHECK-NEXT: destroy_value [[TRIPLE_RHS_RHS_VAL]] +// CHECK-NEXT: br [[SUCC_1:bb[0-9]+]]([[TRIPLE_RHS_RHS_VAL_COPY]] : // // CHECK: [[FALSE_BB]]: // CHECK: [[TRIPLE_LHS:%.*]] = struct_element_addr [[BOX]] : $*NativeObjectTriple, #NativeObjectTriple.f1 @@ -774,10 +793,14 @@ bb4: // CHECK: cond_br undef, [[CRITEDGE_BREAK_BB_1:bb[0-9]+]], [[CRITEDGE_BREAK_BB_2:bb[0-9]+]] // // CHECK: [[CRITEDGE_BREAK_BB_1]]: -// CHECK-NEXT: br [[SUCC_2]]([[TRIPLE_RHS_RHS_VAL]] : +// CHECK-NEXT: [[TRIPLE_RHS_RHS_VAL_COPY:%.*]] = copy_value [[TRIPLE_RHS_RHS_VAL]] +// CHECK-NEXT: destroy_value [[TRIPLE_RHS_RHS_VAL]] +// CHECK-NEXT: br [[SUCC_2]]([[TRIPLE_RHS_RHS_VAL_COPY]] : // // CHECK: [[CRITEDGE_BREAK_BB_2]]: -// CHECK-NEXT: br [[SUCC_1]]([[TRIPLE_RHS_RHS_VAL]] : +// CHECK-NEXT: [[TRIPLE_RHS_RHS_VAL_COPY:%.*]] = copy_value [[TRIPLE_RHS_RHS_VAL]] +// CHECK-NEXT: destroy_value [[TRIPLE_RHS_RHS_VAL]] +// CHECK-NEXT: br [[SUCC_1]]([[TRIPLE_RHS_RHS_VAL_COPY]] : // // CHECK: [[SUCC_2]]([[PHI1:%.*]] : @owned $Builtin.NativeObject): // CHECK: [[TRIPLE_RHS:%.*]] = struct_element_addr [[BOX]] : $*NativeObjectTriple, #NativeObjectTriple.f2 @@ -786,15 +809,21 @@ bb4: // CHECK: br [[EXIT_BB:bb[0-9]+]]([[PHI1:%.*]] : $Builtin.NativeObject) // // CHECK: [[SUCC_1]]([[PHI:%.*]] : @owned $Builtin.NativeObject): -// CHECK: br [[EXIT_BB]]([[PHI]] : {{.*}}) +// CHECK: [[PHI_COPY:%.*]] = copy_value [[PHI]] +// CHECK: br [[EXIT_BB]]([[PHI_COPY]] : {{.*}}) // // CHECK: [[EXIT_BB]]([[PHI:%.*]] : @owned $Builtin.NativeObject): // CHECK: [[TRIPLE_RHS:%.*]] = struct_element_addr [[BOX]] : $*NativeObjectTriple, #NativeObjectTriple.f2 // CHECK: [[TRIPLE_RHS_LHS:%.*]] = struct_element_addr [[TRIPLE_RHS]] : $*NativeObjectPair, #NativeObjectPair.x // CHECK: [[TRIPLE_RHS_LHS_VAL:%.*]] = load [copy] [[TRIPLE_RHS_LHS]] : $*Builtin.NativeObject -// CHECK: [[STRUCT:%.*]] = struct $NativeObjectPair ([[TRIPLE_RHS_LHS_VAL]] : {{.*}}, [[PHI]] : {{.*}}) +// CHECK: [[PHI_COPY:%.*]] = copy_value [[PHI]] +// CHECK: [[TRIPLE_RHS_LHS_VAL_BORROW:%.*]] = begin_borrow [[TRIPLE_RHS_LHS_VAL]] +// CHECK: [[PHI_COPY_BORROW:%.*]] = begin_borrow [[PHI_COPY]] +// CHECK: [[STRUCT:%.*]] = struct $NativeObjectPair ([[TRIPLE_RHS_LHS_VAL_BORROW]] : {{.*}}, [[PHI_COPY_BORROW]] : {{.*}}) +// CHECK: [[STRUCT_COPY_1:%.*]] = copy_value [[STRUCT]] +// CHECK: [[STRUCT_COPY_2:%.*]] = copy_value [[STRUCT_COPY_1]] // CHECK: destroy_addr [[BOX]] -// CHECK: return [[STRUCT]] +// CHECK: return [[STRUCT_COPY_2]] // CHECK: } // end sil function 'diamond_test_6' sil [ossa] @diamond_test_6 : $@convention(thin) (@owned Builtin.NativeObject, @owned NativeObjectPair, @owned Builtin.NativeObject) -> @owned NativeObjectPair { bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $NativeObjectPair, %arg2 : @owned $Builtin.NativeObject):