diff --git a/include/swift/AST/DiagnosticsSIL.def b/include/swift/AST/DiagnosticsSIL.def index 71e51f877f864..207f9e4a19cf5 100644 --- a/include/swift/AST/DiagnosticsSIL.def +++ b/include/swift/AST/DiagnosticsSIL.def @@ -765,6 +765,12 @@ ERROR(sil_moveonlychecker_notconsumable_but_assignable_was_consumed_escaping_var (StringRef)) ERROR(sil_moveonlychecker_let_capture_consumed, none, "'%0' was consumed but it is illegal to consume a noncopyable immutable capture of an escaping closure. One can only read from it", (StringRef)) +ERROR(sil_moveonlychecker_cannot_destructure_deinit_nominal_type_self, none, + "Cannot partially consume '%0' since it has a user defined deinit", + (StringRef)) +ERROR(sil_moveonlychecker_cannot_destructure_deinit_nominal_type_field, none, + "Cannot partially consume '%0' since it contains field '%1.%2' whose type %3 has a user defined deinit", + (StringRef, StringRef, StringRef, DeclBaseName)) NOTE(sil_moveonlychecker_moveonly_field_consumed_here, none, "move only field consumed here", ()) @@ -784,6 +790,8 @@ NOTE(sil_moveonlychecker_nonconsuming_use_here, none, "non-consuming use here", ()) NOTE(sil_movekillscopyablevalue_value_cyclic_consumed_in_loop_here, none, "consuming in loop use here", ()) +NOTE(sil_moveonlychecker_deinit_here, none, + "deinit declared here", ()) ERROR(sil_moveonlychecker_not_understand_consumable_and_assignable, none, "Usage of @noImplicitCopy that the move checker does not know how to " diff --git a/lib/SILOptimizer/Mandatory/CMakeLists.txt b/lib/SILOptimizer/Mandatory/CMakeLists.txt index e5deb95348c43..2a48a358fcfeb 100644 --- a/lib/SILOptimizer/Mandatory/CMakeLists.txt +++ b/lib/SILOptimizer/Mandatory/CMakeLists.txt @@ -23,17 +23,18 @@ target_sources(swiftSILOptimizer PRIVATE LexicalLifetimeEliminator.cpp LowerHopToActor.cpp MandatoryInlining.cpp - MovedAsyncVarDebugInfoPropagator.cpp - MoveOnlyAddressCheckerUtils.cpp MoveOnlyAddressCheckerTester.cpp - MoveOnlyBorrowToDestructureUtils.cpp + MoveOnlyAddressCheckerUtils.cpp MoveOnlyBorrowToDestructureTester.cpp + MoveOnlyBorrowToDestructureUtils.cpp + MoveOnlyChecker.cpp MoveOnlyDeinitInsertion.cpp MoveOnlyDiagnostics.cpp - MoveOnlyObjectCheckerUtils.cpp MoveOnlyObjectCheckerTester.cpp - MoveOnlyChecker.cpp + MoveOnlyObjectCheckerUtils.cpp + MoveOnlyTypeUtils.cpp MoveOnlyUtils.cpp + MovedAsyncVarDebugInfoPropagator.cpp NestedSemanticFunctionCheck.cpp OptimizeHopToExecutor.cpp PerformanceDiagnostics.cpp diff --git a/lib/SILOptimizer/Mandatory/MoveOnlyAddressCheckerUtils.cpp b/lib/SILOptimizer/Mandatory/MoveOnlyAddressCheckerUtils.cpp index 04ea3963c0953..5c715437d67d7 100644 --- a/lib/SILOptimizer/Mandatory/MoveOnlyAddressCheckerUtils.cpp +++ b/lib/SILOptimizer/Mandatory/MoveOnlyAddressCheckerUtils.cpp @@ -271,6 +271,7 @@ #include "MoveOnlyBorrowToDestructureUtils.h" #include "MoveOnlyDiagnostics.h" #include "MoveOnlyObjectCheckerUtils.h" +#include "MoveOnlyTypeUtils.h" #include "MoveOnlyUtils.h" #include @@ -521,13 +522,68 @@ namespace { struct UseState { MarkMustCheckInst *address; + /// A map from destroy_addr to the part of the type that it destroys. llvm::SmallMapVector destroys; + + /// A map from a liveness requiring use to the part of the type that it + /// requires liveness for. llvm::SmallMapVector livenessUses; + + /// A map from a load [copy] or load [take] that we determined must be + /// converted to a load_borrow to the part of the type tree that it needs to + /// borrow. + /// + /// NOTE: This does not include actual load_borrow which are treated + /// just as liveness uses. + /// + /// NOTE: load_borrow that we actually copy, we canonicalize early to a load + /// [copy] + begin_borrow so that we do not need to convert load_borrow to a + /// normal load when rewriting. llvm::SmallMapVector borrows; + + /// A copy_addr, load [copy], or load [take] that we determine is semantically + /// truly a take mapped to the part of the type tree that it needs to use. + /// + /// DISCUSSION: A copy_addr [init] or load [copy] are considered actually + /// takes if they are not destroyed with a destroy_addr/destroy_value. We + /// consider them to be takes since after the transform they must be a take. + /// + /// Importantly, these we know are never copied and are only consumed once. llvm::SmallMapVector takeInsts; + + /// A map from a copy_addr, load [copy], or load [take] that we determine + /// semantically are true copies to the part of the type tree they must copy. + /// + /// DISCUSSION: One of these instructions being a true copy means that their + /// result or destination is used in a way that some sort of extra copy is + /// needed. Example: + /// + /// %0 = load [take] %addr + /// %1 = copy_value %0 + /// consume(%0) + /// consume(%1) + /// + /// Notice how the load [take] above semantically requires a copy since it was + /// consumed twice even though SILGen emitted it as a load [take]. + /// + /// We represent these separately from \p takeInsts since: + /// + /// 1. llvm::SmallMapVector copyInsts; + + /// A map from an instruction that initializes memory to the description of + /// the part of the type tree that it initializes. llvm::SmallMapVector initInsts; + + /// memInstMustReinitialize insts. Contains both insts like copy_addr/store + /// [assign] that are reinits that we will convert to inits and true reinits. llvm::SmallMapVector reinitInsts; + + /// A "inout terminator use" is an implicit liveness use of the entire value + /// placed on a terminator. We use this both so we add liveness for the + /// terminator user and so that we can use the set to quickly identify later + /// while emitting diagnostics that a liveness use is a terminator user and + /// emit a specific diagnostic message. SmallSetVector inoutTermUsers; /// We add debug_values to liveness late after we diagnose, but before we @@ -1251,6 +1307,57 @@ struct CopiedLoadBorrowEliminationVisitor : public AccessUseVisitor { } // namespace +//===----------------------------------------------------------------------===// +// MARK: DestructureThroughDeinit Checking +//===----------------------------------------------------------------------===// + +static void +checkForDestructureThroughDeinit(MarkMustCheckInst *rootAddress, Operand *use, + TypeTreeLeafTypeRange usedBits, + DiagnosticEmitter &diagnosticEmitter) { + LLVM_DEBUG(llvm::dbgs() << " DestructureNeedingUse: " << *use->getUser()); + + SILFunction *fn = rootAddress->getFunction(); + SILModule &mod = fn->getModule(); + + // We walk down from our ancestor to our projection, emitting an error if any + // of our types have a deinit. + TypeOffsetSizePair pair(usedBits); + auto targetType = use->get()->getType(); + auto iterType = rootAddress->getType(); + TypeOffsetSizePair iterPair(iterType, fn); + + while (iterType != targetType) { + // If we have a nominal type as our parent type, see if it has a + // deinit. We know that it must be non-copyable since copyable types + // cannot contain non-copyable types and that our parent root type must be + // an enum, tuple, or struct. + if (auto *nom = iterType.getNominalOrBoundGenericNominal()) { + if (mod.lookUpMoveOnlyDeinitFunction(nom)) { + // If we find one, emit an error since we are going to have to extract + // through the deinit. Emit a nice error saying what it is. Since we + // are emitting an error, we do a bit more work and construct the + // actual projection string. + SmallString<128> pathString; + auto rootType = rootAddress->getType(); + if (iterType != rootType) { + llvm::raw_svector_ostream os(pathString); + pair.constructPathString(iterType, {rootType, fn}, rootType, fn, os); + } + + diagnosticEmitter.emitCannotDestructureDeinitNominalError( + rootAddress, pathString, nom, use->getUser()); + break; + } + } + + // Otherwise, walk one level towards our child type. We unconditionally + // unwrap since we should never fail here due to earlier checking. + std::tie(iterPair, iterType) = + *pair.walkOneLevelTowardsChild(iterPair, iterType, fn); + } +} + //===----------------------------------------------------------------------===// // MARK: GatherLexicalLifetimeUseVisitor //===----------------------------------------------------------------------===// @@ -1446,6 +1553,13 @@ bool GatherUsesVisitor::visitUse(Operand *op, AccessUseType useTy) { if (!leafRange) return false; + // TODO: Add borrow checking here like below. + + // TODO: Add destructure deinit checking here once address only checking is + // completely brought up. + + // TODO: Add check here that we don't error on trivial/copyable types. + if (copyAddr->isTakeOfSrc()) { LLVM_DEBUG(llvm::dbgs() << "Found take: " << *user); useState.takeInsts.insert({user, *leafRange}); @@ -1477,151 +1591,160 @@ bool GatherUsesVisitor::visitUse(Operand *op, AccessUseType useTy) { return true; } - if (li->getOwnershipQualifier() == LoadOwnershipQualifier::Copy || - li->getOwnershipQualifier() == LoadOwnershipQualifier::Take) { - - OSSACanonicalizer::LivenessState livenessState(moveChecker.canonicalizer, - li); + // We must have a load [take] or load [copy] here since we are in OSSA. + OSSACanonicalizer::LivenessState livenessState(moveChecker.canonicalizer, + li); + + // Before we do anything, run the borrow to destructure transform in case + // we have a switch_enum user. + unsigned numDiagnostics = + moveChecker.diagnosticEmitter.getDiagnosticCount(); + BorrowToDestructureTransform borrowToDestructure( + moveChecker.allocator, markedValue, li, moveChecker.diagnosticEmitter, + moveChecker.poa); + if (!borrowToDestructure.transform()) { + assert(moveChecker.diagnosticEmitter + .didEmitCheckerDoesntUnderstandDiagnostic()); + LLVM_DEBUG(llvm::dbgs() + << "Failed to perform borrow to destructure transform!\n"); + emittedEarlyDiagnostic = true; + return false; + } - // Before we do anything, run the borrow to destructure transform in case - // we have a switch_enum user. - unsigned numDiagnostics = - moveChecker.diagnosticEmitter.getDiagnosticCount(); - BorrowToDestructureTransform borrowToDestructure( - moveChecker.allocator, markedValue, li, moveChecker.diagnosticEmitter, - moveChecker.poa); - if (!borrowToDestructure.transform()) { - assert(moveChecker.diagnosticEmitter - .didEmitCheckerDoesntUnderstandDiagnostic()); - LLVM_DEBUG(llvm::dbgs() - << "Failed to perform borrow to destructure transform!\n"); - emittedEarlyDiagnostic = true; - return false; - } + // If we emitted an error diagnostic, do not transform further and instead + // mark that we emitted an early diagnostic and return true. + if (numDiagnostics != moveChecker.diagnosticEmitter.getDiagnosticCount()) { + LLVM_DEBUG(llvm::dbgs() << "Emitting borrow to destructure error!\n"); + emittedEarlyDiagnostic = true; + return true; + } - // If we emitted an error diagnostic, do not transform further and instead - // mark that we emitted an early diagnostic and return true. - if (numDiagnostics != - moveChecker.diagnosticEmitter.getDiagnosticCount()) { - LLVM_DEBUG(llvm::dbgs() << "Emitting borrow to destructure error!\n"); - emittedEarlyDiagnostic = true; - return true; - } + // Now, validate that what we will transform into a take isn't a take that + // would invalidate a field that has a deinit. + auto leafRange = TypeTreeLeafTypeRange::get(op->get(), getRootAddress()); + if (!leafRange) { + LLVM_DEBUG(llvm::dbgs() + << "Failed to compute leaf range for: " << *op->get()); + return false; + } - // Canonicalize the lifetime of the load [take], load [copy]. - LLVM_DEBUG(llvm::dbgs() << "Running copy propagation!\n"); - moveChecker.changed |= moveChecker.canonicalizer.canonicalize(); - - // If we are asked to perform no_consume_or_assign checking or - // assignable_but_not_consumable checking, if we found any consumes of our - // load, then we need to emit an error. - auto checkKind = markedValue->getCheckKind(); - if (checkKind != MarkMustCheckInst::CheckKind::ConsumableAndAssignable) { - if (moveChecker.canonicalizer.foundAnyConsumingUses()) { - LLVM_DEBUG(llvm::dbgs() - << "Found mark must check [nocopy] error: " << *user); - auto *fArg = dyn_cast( - stripAccessMarkers(markedValue->getOperand())); - if (fArg && fArg->isClosureCapture() && fArg->getType().isAddress()) { - moveChecker.diagnosticEmitter.emitPromotedBoxArgumentError( - markedValue, fArg); - } else { - moveChecker.diagnosticEmitter - .emitAddressEscapingClosureCaptureLoadedAndConsumed( - markedValue); - } - emittedEarlyDiagnostic = true; - return true; - } + checkForDestructureThroughDeinit(markedValue, op, *leafRange, + diagnosticEmitter); - // If set, this will tell the checker that we can change this load into - // a load_borrow. - auto leafRange = - TypeTreeLeafTypeRange::get(op->get(), getRootAddress()); - if (!leafRange) - return false; + // If we emitted an error diagnostic, do not transform further and instead + // mark that we emitted an early diagnostic and return true. + if (numDiagnostics != moveChecker.diagnosticEmitter.getDiagnosticCount()) { + LLVM_DEBUG(llvm::dbgs() + << "Emitting destructure through deinit error!\n"); + emittedEarlyDiagnostic = true; + return true; + } - LLVM_DEBUG(llvm::dbgs() << "Found potential borrow: " << *user); + // Canonicalize the lifetime of the load [take], load [copy]. + LLVM_DEBUG(llvm::dbgs() << "Running copy propagation!\n"); + moveChecker.changed |= moveChecker.canonicalizer.canonicalize(); - if (checkForExclusivityHazards(li)) { - LLVM_DEBUG(llvm::dbgs() << "Found exclusivity violation?!\n"); - emittedEarlyDiagnostic = true; - return true; + // If we are asked to perform no_consume_or_assign checking or + // assignable_but_not_consumable checking, if we found any consumes of our + // load, then we need to emit an error. + auto checkKind = markedValue->getCheckKind(); + if (checkKind != MarkMustCheckInst::CheckKind::ConsumableAndAssignable) { + if (moveChecker.canonicalizer.foundAnyConsumingUses()) { + LLVM_DEBUG(llvm::dbgs() + << "Found mark must check [nocopy] error: " << *user); + auto *fArg = dyn_cast( + stripAccessMarkers(markedValue->getOperand())); + if (fArg && fArg->isClosureCapture() && fArg->getType().isAddress()) { + moveChecker.diagnosticEmitter.emitPromotedBoxArgumentError( + markedValue, fArg); + } else { + moveChecker.diagnosticEmitter + .emitAddressEscapingClosureCaptureLoadedAndConsumed(markedValue); } + emittedEarlyDiagnostic = true; + return true; + } - useState.borrows.insert({user, *leafRange}); + // If set, this will tell the checker that we can change this load into + // a load_borrow. + auto leafRange = TypeTreeLeafTypeRange::get(op->get(), getRootAddress()); + if (!leafRange) + return false; - // If we had a load [copy], borrow then we know that all of its destroys - // must have been destroy_value. So we can just gather up those - // destroy_value and use then to create liveness to ensure that our - // value is alive over the entire borrow scope we are going to create. - LLVM_DEBUG(llvm::dbgs() << "Adding destroys from load as liveness uses " - "since they will become end_borrows.\n"); - for (auto *consumeUse : li->getConsumingUses()) { - auto *dvi = cast(consumeUse->getUser()); - useState.livenessUses.insert({dvi, *leafRange}); - } + LLVM_DEBUG(llvm::dbgs() << "Found potential borrow: " << *user); + if (checkForExclusivityHazards(li)) { + LLVM_DEBUG(llvm::dbgs() << "Found exclusivity violation?!\n"); + emittedEarlyDiagnostic = true; return true; } - // First check if we had any consuming uses that actually needed a - // copy. This will always be an error and we allow the user to recompile - // and eliminate the error. This just allows us to rely on invariants - // later. - if (moveChecker.canonicalizer.foundConsumingUseRequiringCopy()) { - LLVM_DEBUG(llvm::dbgs() - << "Found that load at object level requires copies!\n"); - // If we failed to understand how to perform the check or did not find - // any targets... continue. In the former case we want to fail with a - // checker did not understand diagnostic later and in the former, we - // succeeded. - // Otherwise, emit the diagnostic. - moveChecker.diagnosticEmitter.emitObjectOwnedDiagnostic(markedValue); - emittedEarlyDiagnostic = true; - LLVM_DEBUG(llvm::dbgs() << "Emitted early object level diagnostic.\n"); - return true; + useState.borrows.insert({user, *leafRange}); + + // If we had a load [copy], borrow then we know that all of its destroys + // must have been destroy_value. So we can just gather up those + // destroy_value and use then to create liveness to ensure that our + // value is alive over the entire borrow scope we are going to create. + LLVM_DEBUG(llvm::dbgs() << "Adding destroys from load as liveness uses " + "since they will become end_borrows.\n"); + for (auto *consumeUse : li->getConsumingUses()) { + auto *dvi = cast(consumeUse->getUser()); + useState.livenessUses.insert({dvi, *leafRange}); } - // Then if we had any final consuming uses, mark that this liveness use is - // a take/copy and if not, mark this as a borrow. - auto leafRange = TypeTreeLeafTypeRange::get(op->get(), getRootAddress()); - if (!leafRange) - return false; + return true; + } - if (!moveChecker.canonicalizer.foundFinalConsumingUses()) { - LLVM_DEBUG(llvm::dbgs() << "Found potential borrow inst: " << *user); - if (checkForExclusivityHazards(li)) { - LLVM_DEBUG(llvm::dbgs() << "Found exclusivity violation?!\n"); - emittedEarlyDiagnostic = true; - return true; - } + // First check if we had any consuming uses that actually needed a + // copy. This will always be an error and we allow the user to recompile + // and eliminate the error. This just allows us to rely on invariants + // later. + if (moveChecker.canonicalizer.foundConsumingUseRequiringCopy()) { + LLVM_DEBUG(llvm::dbgs() + << "Found that load at object level requires copies!\n"); + // If we failed to understand how to perform the check or did not find + // any targets... continue. In the former case we want to fail with a + // checker did not understand diagnostic later and in the former, we + // succeeded. + // Otherwise, emit the diagnostic. + moveChecker.diagnosticEmitter.emitObjectOwnedDiagnostic(markedValue); + emittedEarlyDiagnostic = true; + LLVM_DEBUG(llvm::dbgs() << "Emitted early object level diagnostic.\n"); + return true; + } - useState.borrows.insert({user, *leafRange}); - // If we had a load [copy], borrow then we know that all of its destroys - // must have been destroy_value. So we can just gather up those - // destroy_value and use then to create liveness to ensure that our - // value is alive over the entire borrow scope we are going to create. - LLVM_DEBUG(llvm::dbgs() << "Adding destroys from load as liveness uses " - "since they will become end_borrows.\n"); - for (auto *consumeUse : li->getConsumingUses()) { - auto *dvi = cast(consumeUse->getUser()); - useState.livenessUses.insert({dvi, *leafRange}); - } + if (!moveChecker.canonicalizer.foundFinalConsumingUses()) { + LLVM_DEBUG(llvm::dbgs() << "Found potential borrow inst: " << *user); + if (checkForExclusivityHazards(li)) { + LLVM_DEBUG(llvm::dbgs() << "Found exclusivity violation?!\n"); + emittedEarlyDiagnostic = true; + return true; + } + + useState.borrows.insert({user, *leafRange}); + // If we had a load [copy], borrow then we know that all of its destroys + // must have been destroy_value. So we can just gather up those + // destroy_value and use then to create liveness to ensure that our + // value is alive over the entire borrow scope we are going to create. + LLVM_DEBUG(llvm::dbgs() << "Adding destroys from load as liveness uses " + "since they will become end_borrows.\n"); + for (auto *consumeUse : li->getConsumingUses()) { + auto *dvi = cast(consumeUse->getUser()); + useState.livenessUses.insert({dvi, *leafRange}); + } + } else { + // If we had a load [copy], store this into the copy list. These are the + // things that we must merge into destroy_addr or reinits after we are + // done checking. The load [take] are already complete and good to go. + if (li->getOwnershipQualifier() == LoadOwnershipQualifier::Take) { + LLVM_DEBUG(llvm::dbgs() << "Found take inst: " << *user); + useState.takeInsts.insert({user, *leafRange}); } else { - // If we had a load [copy], store this into the copy list. These are the - // things that we must merge into destroy_addr or reinits after we are - // done checking. The load [take] are already complete and good to go. - if (li->getOwnershipQualifier() == LoadOwnershipQualifier::Take) { - LLVM_DEBUG(llvm::dbgs() << "Found take inst: " << *user); - useState.takeInsts.insert({user, *leafRange}); - } else { - LLVM_DEBUG(llvm::dbgs() << "Found copy inst: " << *user); - useState.copyInsts.insert({user, *leafRange}); - } + LLVM_DEBUG(llvm::dbgs() << "Found copy inst: " << *user); + useState.copyInsts.insert({user, *leafRange}); } - return true; } + return true; } // Now that we have handled or loadTakeOrCopy, we need to now track our @@ -2300,8 +2423,9 @@ bool MoveOnlyAddressCheckerPImpl::performSingleCheck( return false; } - // Before we do anything, convert any load_borrow + copy_value into load - // [copy] + begin_borrow for further processing. + // Before we do anything, canonicalize load_borrow + copy_value into load + // [copy] + begin_borrow for further processing. This just eliminates a case + // that the checker doesn't need to know about. { CopiedLoadBorrowEliminationVisitor copiedLoadBorrowEliminator(fn); if (!visitAccessPathBaseUses(copiedLoadBorrowEliminator, accessPathWithBase, @@ -2341,6 +2465,10 @@ bool MoveOnlyAddressCheckerPImpl::performSingleCheck( if (diagCount != diagnosticEmitter.getDiagnosticCount()) return true; + // Then check if we emitted an error. If we did not, return true. + if (diagCount != diagnosticEmitter.getDiagnosticCount()) + return true; + //===--- // Liveness Checking // diff --git a/lib/SILOptimizer/Mandatory/MoveOnlyBorrowToDestructureUtils.cpp b/lib/SILOptimizer/Mandatory/MoveOnlyBorrowToDestructureUtils.cpp index 5a9dda52fae42..c578f0aaba732 100644 --- a/lib/SILOptimizer/Mandatory/MoveOnlyBorrowToDestructureUtils.cpp +++ b/lib/SILOptimizer/Mandatory/MoveOnlyBorrowToDestructureUtils.cpp @@ -28,6 +28,7 @@ #include "MoveOnlyBorrowToDestructureUtils.h" #include "MoveOnlyDiagnostics.h" #include "MoveOnlyObjectCheckerUtils.h" +#include "MoveOnlyTypeUtils.h" #include "swift/Basic/BlotSetVector.h" #include "swift/Basic/Defer.h" @@ -544,236 +545,6 @@ void Implementation::checkDestructureUsesOnBoundary() const { } } -static StructDecl *getFullyReferenceableStruct(SILType ktypeTy) { - auto structDecl = ktypeTy.getStructOrBoundGenericStruct(); - if (!structDecl || structDecl->hasUnreferenceableStorage()) - return nullptr; - return structDecl; -} - -namespace { - -struct TypeOffsetSizePair { - SubElementOffset startOffset = 0; - TypeSubElementCount size = 0; - - TypeOffsetSizePair() : startOffset(0), size(0) {} - TypeOffsetSizePair(SILType baseType, SILFunction *fn) - : startOffset(0), size(baseType, fn) {} - TypeOffsetSizePair(SubElementOffset offset, TypeSubElementCount size) - : startOffset(offset), size(size) {} - TypeOffsetSizePair(SILValue projection, SILValue base) - : startOffset(*SubElementOffset::compute(projection, base)), - size(TypeSubElementCount(projection)) {} - - IntRange getRange() const { - return range(startOffset, getEndOffset()); - } - - SubElementOffset getEndOffset() const { - return SubElementOffset(startOffset + size); - } - - bool operator==(const TypeOffsetSizePair &other) const { - return startOffset == other.startOffset && size == other.size; - } - - bool operator!=(const TypeOffsetSizePair &other) const { - return !(*this == other); - } - - /// Given an ancestor offset \p ancestorOffset and a type called \p - /// ancestorType, walk one level towards this current type which is assumed to - /// be a child type of \p ancestorType. - Optional> - walkOneLevelTowardsChild(TypeOffsetSizePair ancestorOffsetSize, - SILType ancestorType, SILFunction *fn) const { - assert(ancestorOffsetSize.size >= size && - "Too large to be a child of ancestorType"); - assert((ancestorOffsetSize.startOffset <= startOffset && - startOffset < - (ancestorOffsetSize.startOffset + ancestorOffsetSize.size)) && - "Not within the offset range of ancestor"); - - if (auto tupleType = ancestorType.getAs()) { - // Before we do anything, see if we have a single element tuple. If we do, - // just return that. - if (tupleType->getNumElements() == 1) { - return {{ancestorOffsetSize, ancestorType.getTupleElementType(0)}}; - } - - assert(ancestorOffsetSize.size > size && - "Too large to be a child of ancestorType"); - - unsigned childOffset = ancestorOffsetSize.startOffset; - - for (auto index : indices(tupleType->getElementTypes())) { - SILType newType = ancestorType.getTupleElementType(index); - unsigned newSize = TypeSubElementCount(newType, fn); - - // childOffset + size(tupleChild) is the offset of the next tuple - // element. If our target offset is less than that, then we know that - // the target type must be a descendent of this tuple element type. - if (childOffset + newSize > startOffset) { - return {{{childOffset, newSize}, newType}}; - } - - // Otherwise, add the new size of this field to iterOffset so we visit - // our sibling type next. - childOffset += newSize; - } - - // At this point, we know that our type is not a subtype of this - // type. Some sort of logic error occurred. - llvm_unreachable("Not a child of this type?!"); - } - - if (auto *structDecl = getFullyReferenceableStruct(ancestorType)) { - // Before we do anything, see if we have a single element struct. If we - // do, just return that. - auto storedProperties = structDecl->getStoredProperties(); - if (storedProperties.size() == 1) { - return {{ancestorOffsetSize, - ancestorType.getFieldType(storedProperties[0], fn)}}; - } - - assert(ancestorOffsetSize.size > size && - "Too large to be a child of ancestorType"); - - unsigned childOffset = ancestorOffsetSize.startOffset; - for (auto *fieldDecl : storedProperties) { - SILType newType = ancestorType.getFieldType(fieldDecl, fn); - unsigned newSize = TypeSubElementCount(newType, fn); - - // iterOffset + size(tupleChild) is the offset of the next tuple - // element. If our target offset is less than that, then we know that - // the target type must be a child of this tuple element type. - if (childOffset + newSize > startOffset) { - return {{{childOffset, newSize}, newType}}; - } - - // Otherwise, add the new size of this field to iterOffset so we visit - // our sibling type next. - childOffset += newSize; - } - - // At this point, we know that our type is not a subtype of this - // type. Some sort of logic error occurred. - llvm_unreachable("Not a child of this type?!"); - } - - if (auto *enumDecl = ancestorType.getEnumOrBoundGenericEnum()) { - llvm_unreachable("Cannot find child type of enum!\n"); - } - - llvm_unreachable("Hit a leaf type?! Should have handled it earlier"); - } - - /// Given an ancestor offset \p ancestorOffset and a type called \p - /// ancestorType, walk one level towards this current type inserting on value, - /// the relevant projection. - Optional> - walkOneLevelTowardsChild(SILBuilderWithScope &builder, SILLocation loc, - TypeOffsetSizePair ancestorOffsetSize, - SILValue ancestorValue) const { - auto *fn = ancestorValue->getFunction(); - SILType ancestorType = ancestorValue->getType(); - - assert(ancestorOffsetSize.size >= size && - "Too large to be a child of ancestorType"); - assert((ancestorOffsetSize.startOffset <= startOffset && - startOffset < - (ancestorOffsetSize.startOffset + ancestorOffsetSize.size)) && - "Not within the offset range of ancestor"); - if (auto tupleType = ancestorType.getAs()) { - // Before we do anything, see if we have a single element tuple. If we do, - // just return that. - if (tupleType->getNumElements() == 1) { - auto *newValue = builder.createTupleExtract(loc, ancestorValue, 0); - return {{ancestorOffsetSize, newValue}}; - } - - assert(ancestorOffsetSize.size > size && - "Too large to be a child of ancestorType"); - - unsigned childOffset = ancestorOffsetSize.startOffset; - - for (auto index : indices(tupleType->getElementTypes())) { - SILType newType = ancestorType.getTupleElementType(index); - unsigned newSize = TypeSubElementCount(newType, fn); - - // childOffset + size(tupleChild) is the offset of the next tuple - // element. If our target offset is less than that, then we know that - // the target type must be a descendent of this tuple element type. - if (childOffset + newSize > startOffset) { - auto *newValue = - builder.createTupleExtract(loc, ancestorValue, index); - return {{{childOffset, newSize}, newValue}}; - } - - // Otherwise, add the new size of this field to iterOffset so we visit - // our sibling type next. - childOffset += newSize; - } - - // At this point, we know that our type is not a subtype of this - // type. Some sort of logic error occurred. - llvm_unreachable("Not a child of this type?!"); - } - - if (auto *structDecl = getFullyReferenceableStruct(ancestorType)) { - // Before we do anything, see if we have a single element struct. If we - // do, just return that. - auto storedProperties = structDecl->getStoredProperties(); - if (storedProperties.size() == 1) { - auto *newValue = builder.createStructExtract(loc, ancestorValue, - storedProperties[0]); - return {{ancestorOffsetSize, newValue}}; - } - - assert(ancestorOffsetSize.size > size && - "Too large to be a child of ancestorType"); - - unsigned childOffset = ancestorOffsetSize.startOffset; - for (auto *fieldDecl : structDecl->getStoredProperties()) { - SILType newType = ancestorType.getFieldType(fieldDecl, fn); - unsigned newSize = TypeSubElementCount(newType, fn); - - // iterOffset + size(tupleChild) is the offset of the next tuple - // element. If our target offset is less than that, then we know that - // the target type must be a child of this tuple element type. - if (childOffset + newSize > startOffset) { - auto *newValue = - builder.createStructExtract(loc, ancestorValue, fieldDecl); - return {{{childOffset, newSize}, newValue}}; - } - - // Otherwise, add the new size of this field to iterOffset so we visit - // our sibling type next. - childOffset += newSize; - } - - // At this point, we know that our type is not a subtype of this - // type. Some sort of logic error occurred. - llvm_unreachable("Not a child of this type?!"); - } - - if (auto *enumDecl = ancestorType.getEnumOrBoundGenericEnum()) { - llvm_unreachable("Cannot find child type of enum!\n"); - } - - llvm_unreachable("Hit a leaf type?! Should have handled it earlier"); - } -}; - -llvm::raw_ostream &operator<<(llvm::raw_ostream &os, - const TypeOffsetSizePair &other) { - return os << "(startOffset: " << other.startOffset << ", size: " << other.size - << ")"; -} - -} // anonymous namespace - #ifndef NDEBUG static void dumpSmallestTypeAvailable( SmallVectorImpl>> diff --git a/lib/SILOptimizer/Mandatory/MoveOnlyDiagnostics.cpp b/lib/SILOptimizer/Mandatory/MoveOnlyDiagnostics.cpp index 2f5c1db7a5ec4..8db7db4fdf0d4 100644 --- a/lib/SILOptimizer/Mandatory/MoveOnlyDiagnostics.cpp +++ b/lib/SILOptimizer/Mandatory/MoveOnlyDiagnostics.cpp @@ -750,3 +750,28 @@ void DiagnosticEmitter::emitPromotedBoxArgumentError( diagnose(astContext, user, diag::sil_moveonlychecker_consuming_use_here); } } + +void DiagnosticEmitter::emitCannotDestructureDeinitNominalError( + MarkMustCheckInst *markedValue, StringRef pathString, + NominalTypeDecl *deinitedNominal, SILInstruction *consumingUser) { + auto &astContext = fn->getASTContext(); + SmallString<64> varName; + getVariableNameForValue(markedValue, varName); + + registerDiagnosticEmitted(markedValue); + + if (pathString.empty()) { + diagnose( + astContext, markedValue, + diag::sil_moveonlychecker_cannot_destructure_deinit_nominal_type_self, + varName); + } else { + diagnose( + astContext, markedValue, + diag::sil_moveonlychecker_cannot_destructure_deinit_nominal_type_field, + varName, varName, pathString.drop_front(), + deinitedNominal->getBaseName()); + } + diagnose(astContext, consumingUser, + diag::sil_moveonlychecker_consuming_use_here); +} diff --git a/lib/SILOptimizer/Mandatory/MoveOnlyDiagnostics.h b/lib/SILOptimizer/Mandatory/MoveOnlyDiagnostics.h index d704aed994c6d..587a28e9bf9b6 100644 --- a/lib/SILOptimizer/Mandatory/MoveOnlyDiagnostics.h +++ b/lib/SILOptimizer/Mandatory/MoveOnlyDiagnostics.h @@ -111,6 +111,11 @@ class DiagnosticEmitter { void emitPromotedBoxArgumentError(MarkMustCheckInst *markedValue, SILFunctionArgument *arg); + void emitCannotDestructureDeinitNominalError(MarkMustCheckInst *markedValue, + StringRef pathString, + NominalTypeDecl *deinitedNominal, + SILInstruction *consumingUser); + private: /// Emit diagnostics for the final consuming uses and consuming uses needing /// copy. If filter is non-null, allow for the caller to pre-process operands diff --git a/lib/SILOptimizer/Mandatory/MoveOnlyTypeUtils.cpp b/lib/SILOptimizer/Mandatory/MoveOnlyTypeUtils.cpp new file mode 100644 index 0000000000000..4a6d6592a9347 --- /dev/null +++ b/lib/SILOptimizer/Mandatory/MoveOnlyTypeUtils.cpp @@ -0,0 +1,313 @@ +//===--- MoveOnlyTypeUtils.cpp --------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2022 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "MoveOnlyTypeUtils.h" + +using namespace swift; +using namespace swift::siloptimizer; + +static StructDecl *getFullyReferenceableStruct(SILType ktypeTy) { + auto structDecl = ktypeTy.getStructOrBoundGenericStruct(); + if (!structDecl || structDecl->hasUnreferenceableStorage()) + return nullptr; + return structDecl; +} + +Optional> +TypeOffsetSizePair::walkOneLevelTowardsChild( + TypeOffsetSizePair ancestorOffsetSize, SILType ancestorType, + SILFunction *fn) const { + assert(ancestorOffsetSize.size >= size && + "Too large to be a child of ancestorType"); + assert((ancestorOffsetSize.startOffset <= startOffset && + startOffset < + (ancestorOffsetSize.startOffset + ancestorOffsetSize.size)) && + "Not within the offset range of ancestor"); + + if (auto tupleType = ancestorType.getAs()) { + // Before we do anything, see if we have a single element tuple. If we do, + // just return that. + if (tupleType->getNumElements() == 1) { + return {{ancestorOffsetSize, ancestorType.getTupleElementType(0)}}; + } + + assert(ancestorOffsetSize.size > size && + "Too large to be a child of ancestorType"); + + unsigned childOffset = ancestorOffsetSize.startOffset; + + for (auto index : indices(tupleType->getElementTypes())) { + SILType newType = ancestorType.getTupleElementType(index); + unsigned newSize = TypeSubElementCount(newType, fn); + + // childOffset + size(tupleChild) is the offset of the next tuple + // element. If our target offset is less than that, then we know that + // the target type must be a descendent of this tuple element type. + if (childOffset + newSize > startOffset) { + return {{{childOffset, newSize}, newType}}; + } + + // Otherwise, add the new size of this field to iterOffset so we visit + // our sibling type next. + childOffset += newSize; + } + + // At this point, we know that our type is not a subtype of this + // type. Some sort of logic error occurred. + llvm_unreachable("Not a child of this type?!"); + } + + if (auto *structDecl = getFullyReferenceableStruct(ancestorType)) { + // Before we do anything, see if we have a single element struct. If we + // do, just return that. + auto storedProperties = structDecl->getStoredProperties(); + if (storedProperties.size() == 1) { + return {{ancestorOffsetSize, + ancestorType.getFieldType(storedProperties[0], fn)}}; + } + + assert(ancestorOffsetSize.size > size && + "Too large to be a child of ancestorType"); + + unsigned childOffset = ancestorOffsetSize.startOffset; + for (auto *fieldDecl : storedProperties) { + SILType newType = ancestorType.getFieldType(fieldDecl, fn); + unsigned newSize = TypeSubElementCount(newType, fn); + + // iterOffset + size(tupleChild) is the offset of the next tuple + // element. If our target offset is less than that, then we know that + // the target type must be a child of this tuple element type. + if (childOffset + newSize > startOffset) { + return {{{childOffset, newSize}, newType}}; + } + + // Otherwise, add the new size of this field to iterOffset so we visit + // our sibling type next. + childOffset += newSize; + } + + // At this point, we know that our type is not a subtype of this + // type. Some sort of logic error occurred. + llvm_unreachable("Not a child of this type?!"); + } + + if (auto *enumDecl = ancestorType.getEnumOrBoundGenericEnum()) { + llvm_unreachable("Cannot find child type of enum!\n"); + } + + llvm_unreachable("Hit a leaf type?! Should have handled it earlier"); +} + +/// Given an ancestor offset \p ancestorOffset and a type called \p +/// ancestorType, walk one level towards this current type inserting on value, +/// the relevant projection. +Optional> +TypeOffsetSizePair::walkOneLevelTowardsChild( + SILBuilderWithScope &builder, SILLocation loc, + TypeOffsetSizePair ancestorOffsetSize, SILValue ancestorValue) const { + auto *fn = ancestorValue->getFunction(); + SILType ancestorType = ancestorValue->getType(); + + assert(ancestorOffsetSize.size >= size && + "Too large to be a child of ancestorType"); + assert((ancestorOffsetSize.startOffset <= startOffset && + startOffset < + (ancestorOffsetSize.startOffset + ancestorOffsetSize.size)) && + "Not within the offset range of ancestor"); + if (auto tupleType = ancestorType.getAs()) { + // Before we do anything, see if we have a single element tuple. If we do, + // just return that. + if (tupleType->getNumElements() == 1) { + auto *newValue = builder.createTupleExtract(loc, ancestorValue, 0); + return {{ancestorOffsetSize, newValue}}; + } + + assert(ancestorOffsetSize.size > size && + "Too large to be a child of ancestorType"); + + unsigned childOffset = ancestorOffsetSize.startOffset; + + for (auto index : indices(tupleType->getElementTypes())) { + SILType newType = ancestorType.getTupleElementType(index); + unsigned newSize = TypeSubElementCount(newType, fn); + + // childOffset + size(tupleChild) is the offset of the next tuple + // element. If our target offset is less than that, then we know that + // the target type must be a descendent of this tuple element type. + if (childOffset + newSize > startOffset) { + auto *newValue = builder.createTupleExtract(loc, ancestorValue, index); + return {{{childOffset, newSize}, newValue}}; + } + + // Otherwise, add the new size of this field to iterOffset so we visit + // our sibling type next. + childOffset += newSize; + } + + // At this point, we know that our type is not a subtype of this + // type. Some sort of logic error occurred. + llvm_unreachable("Not a child of this type?!"); + } + + if (auto *structDecl = getFullyReferenceableStruct(ancestorType)) { + // Before we do anything, see if we have a single element struct. If we + // do, just return that. + auto storedProperties = structDecl->getStoredProperties(); + if (storedProperties.size() == 1) { + auto *newValue = + builder.createStructExtract(loc, ancestorValue, storedProperties[0]); + return {{ancestorOffsetSize, newValue}}; + } + + assert(ancestorOffsetSize.size > size && + "Too large to be a child of ancestorType"); + + unsigned childOffset = ancestorOffsetSize.startOffset; + for (auto *fieldDecl : structDecl->getStoredProperties()) { + SILType newType = ancestorType.getFieldType(fieldDecl, fn); + unsigned newSize = TypeSubElementCount(newType, fn); + + // iterOffset + size(tupleChild) is the offset of the next tuple + // element. If our target offset is less than that, then we know that + // the target type must be a child of this tuple element type. + if (childOffset + newSize > startOffset) { + auto *newValue = + builder.createStructExtract(loc, ancestorValue, fieldDecl); + return {{{childOffset, newSize}, newValue}}; + } + + // Otherwise, add the new size of this field to iterOffset so we visit + // our sibling type next. + childOffset += newSize; + } + + // At this point, we know that our type is not a subtype of this + // type. Some sort of logic error occurred. + llvm_unreachable("Not a child of this type?!"); + } + + if (auto *enumDecl = ancestorType.getEnumOrBoundGenericEnum()) { + llvm_unreachable("Cannot find child type of enum!\n"); + } + + llvm_unreachable("Hit a leaf type?! Should have handled it earlier"); +} + +/// Given an ancestor offset \p ancestorOffset and a type called \p +/// ancestorType, walk one level towards this current type which is assumed to +/// be a child type of \p ancestorType. +void TypeOffsetSizePair::constructPathString( + SILType targetType, TypeOffsetSizePair ancestorOffsetSize, + SILType ancestorType, SILFunction *fn, llvm::raw_ostream &os) const { + TypeOffsetSizePair iterPair = ancestorOffsetSize; + SILType iterType = ancestorType; + + do { + assert(iterPair.size >= size && "Too large to be a child of iterType"); + assert((iterPair.startOffset <= startOffset && + startOffset < (iterPair.startOffset + iterPair.size)) && + "Not within the offset range of ancestor"); + + if (auto tupleType = iterType.getAs()) { + // Before we do anything, see if we have a single element tuple. If we + // do, just return that. + if (tupleType->getNumElements() == 1) { + os << ".0"; + iterType = iterType.getTupleElementType(0); + continue; + } + + assert(iterPair.size > size && "Too large to be a child of iterType"); + + unsigned childOffset = iterPair.startOffset; + + bool foundValue = false; + for (auto index : indices(tupleType->getElementTypes())) { + SILType newType = iterType.getTupleElementType(index); + unsigned newSize = TypeSubElementCount(newType, fn); + + // childOffset + size(tupleChild) is the offset of the next tuple + // element. If our target offset is less than that, then we know that + // the target type must be a descendent of this tuple element type. + if (childOffset + newSize > startOffset) { + os << '.'; + os << index; + iterPair = {childOffset, newSize}; + iterType = newType; + foundValue = true; + break; + } + + // Otherwise, add the new size of this field to iterOffset so we visit + // our sibling type next. + childOffset += newSize; + } + + if (foundValue) + continue; + + // At this point, we know that our type is not a subtype of this + // type. Some sort of logic error occurred. + llvm_unreachable("Not a child of this type?!"); + } + + if (auto *structDecl = getFullyReferenceableStruct(iterType)) { + // Before we do anything, see if we have a single element struct. If we + // do, just return that. + auto storedProperties = structDecl->getStoredProperties(); + if (storedProperties.size() == 1) { + os << '.'; + os << storedProperties[0]->getBaseName().userFacingName(); + iterType = iterType.getFieldType(storedProperties[0], fn); + continue; + } + + assert(iterPair.size > size && "Too large to be a child of iterType"); + + unsigned childOffset = iterPair.startOffset; + bool foundValue = false; + for (auto *fieldDecl : storedProperties) { + SILType newType = iterType.getFieldType(fieldDecl, fn); + unsigned newSize = TypeSubElementCount(newType, fn); + + // iterOffset + size(tupleChild) is the offset of the next tuple + // element. If our target offset is less than that, then we know that + // the target type must be a child of this tuple element type. + if (childOffset + newSize > startOffset) { + os << '.'; + os << fieldDecl->getBaseName().userFacingName(); + iterPair = {childOffset, newSize}; + iterType = newType; + foundValue = true; + break; + } + + // Otherwise, add the new size of this field to iterOffset so we visit + // our sibling type next. + childOffset += newSize; + } + + if (foundValue) + continue; + + // At this point, we know that our type is not a subtype of this + // type. Some sort of logic error occurred. + llvm_unreachable("Not a child of this type?!"); + } + + if (auto *enumDecl = iterType.getEnumOrBoundGenericEnum()) { + llvm_unreachable("Cannot find child type of enum!\n"); + } + + llvm_unreachable("Hit a leaf type?! Should have handled it earlier"); + } while (iterType != targetType); +} diff --git a/lib/SILOptimizer/Mandatory/MoveOnlyTypeUtils.h b/lib/SILOptimizer/Mandatory/MoveOnlyTypeUtils.h new file mode 100644 index 0000000000000..136c503ca40a6 --- /dev/null +++ b/lib/SILOptimizer/Mandatory/MoveOnlyTypeUtils.h @@ -0,0 +1,93 @@ +//===--- MoveOnlyTypeUtils.h ----------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2022 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// +/// This file contains utilities for manipulating types as used by the move +/// checker. +/// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_SILOPTIMIZER_MANDATORY_MOVEONLYTYPEUTILS_H +#define SWIFT_SILOPTIMIZER_MANDATORY_MOVEONLYTYPEUTILS_H + +#include "swift/SIL/FieldSensitivePrunedLiveness.h" +#include "swift/SIL/SILBuilder.h" + +namespace swift { +namespace siloptimizer { + +struct TypeOffsetSizePair { + SubElementOffset startOffset = 0; + TypeSubElementCount size = 0; + + TypeOffsetSizePair() : startOffset(0), size(0) {} + TypeOffsetSizePair(SILType baseType, SILFunction *fn) + : startOffset(0), size(baseType, fn) {} + TypeOffsetSizePair(SubElementOffset offset, TypeSubElementCount size) + : startOffset(offset), size(size) {} + TypeOffsetSizePair(SILValue projection, SILValue base) + : startOffset(*SubElementOffset::compute(projection, base)), + size(TypeSubElementCount(projection)) {} + TypeOffsetSizePair(TypeTreeLeafTypeRange leafTypeRange) + : startOffset(leafTypeRange.startEltOffset), size(leafTypeRange.size()) {} + + IntRange getRange() const { + return range(startOffset, getEndOffset()); + } + + SubElementOffset getEndOffset() const { + return SubElementOffset(startOffset + size); + } + + bool operator==(const TypeOffsetSizePair &other) const { + return startOffset == other.startOffset && size == other.size; + } + + bool operator!=(const TypeOffsetSizePair &other) const { + return !(*this == other); + } + + /// Given an ancestor offset \p ancestorOffset and a type called \p + /// ancestorType, walk one level towards this current type which is assumed to + /// be a child type of \p ancestorType. + Optional> + walkOneLevelTowardsChild(TypeOffsetSizePair ancestorOffsetSize, + SILType ancestorType, SILFunction *fn) const; + + /// Given an ancestor offset \p ancestorOffset and a type called \p + /// ancestorType, walk one level towards this current type inserting on value, + /// the relevant projection. + Optional> + walkOneLevelTowardsChild(SILBuilderWithScope &builder, SILLocation loc, + TypeOffsetSizePair ancestorOffsetSize, + SILValue ancestorValue) const; + + /// Given an ancestor offset \p ancestorOffset and a type called \p + /// ancestorType, walk one level towards this current type which is assumed to + /// be a child type of \p ancestorType. + void constructPathString(SILType targetType, + TypeOffsetSizePair ancestorOffsetSize, + SILType ancestorType, SILFunction *fn, + llvm::raw_ostream &os) const; +}; + +inline llvm::raw_ostream &operator<<(llvm::raw_ostream &os, + const TypeOffsetSizePair &other) { + return os << "(startOffset: " << other.startOffset << ", size: " << other.size + << ")"; +} + +} // namespace siloptimizer +} // namespace swift + +#endif diff --git a/test/SILOptimizer/moveonly_addresschecker_destructure_through_deinit_diagnostics.swift b/test/SILOptimizer/moveonly_addresschecker_destructure_through_deinit_diagnostics.swift new file mode 100644 index 0000000000000..b1af855fc83d1 --- /dev/null +++ b/test/SILOptimizer/moveonly_addresschecker_destructure_through_deinit_diagnostics.swift @@ -0,0 +1,145 @@ +// RUN: %target-swift-emit-sil -sil-verify-all -verify -enable-experimental-feature MoveOnlyClasses %s + +// This test validates that we properly emit errors if we partially invalidate +// through a type with a deinit. + +////////////////// +// Declarations // +////////////////// + +class Klass {} + +@_moveOnly +class MoveOnlyKlass { + var value: Int = 0 +} + +@_moveOnly +struct KlassPair { + var lhs: Klass + var rhs: MoveOnlyKlass +} + +@_moveOnly +struct AggStruct { + var pair: KlassPair +} + +@_moveOnly +struct KlassPair2 { + var lhs: MoveOnlyKlass + var rhs: MoveOnlyKlass +} + +@_moveOnly +struct AggStruct2 { + var lhs: MoveOnlyKlass + var pair: KlassPair2 + var rhs: MoveOnlyKlass +} + +@_moveOnly +struct SingleIntContainingStruct { + var value: Int = 0 +} + +func consume(_ x: consuming MoveOnlyKlass) {} +func consume(_ x: consuming Klass) {} + +//////////////////// +// Test Top Level // +//////////////////// + +@_moveOnly +struct DeinitStruct { + var first: Klass + var second: (Klass, Klass) + var third: KlassPair + var fourth: (MoveOnlyKlass, MoveOnlyKlass) + var fifth: MoveOnlyKlass + + deinit {} +} + +func testConsumeCopyable(_ x: consuming DeinitStruct) { + consume(x.first) + consume(x.second.0) + consume(x.third.lhs) +} + +func testConsumeNonCopyable1(_ x: consuming DeinitStruct) { + // expected-error @-1 {{Cannot partially consume 'x' since it has a user defined deinit}} + consume(x.third.rhs) // expected-note {{consuming use here}} +} + +func testConsumeNonCopyable2(_ x: consuming DeinitStruct) { + // expected-error @-1 {{Cannot partially consume 'x' since it has a user defined deinit}} + consume(x.fourth.0) // expected-note {{consuming use here}} +} + +func testConsumeNonCopyable3(_ x: consuming DeinitStruct) { + // expected-error @-1 {{Cannot partially consume 'x' since it has a user defined deinit}} + consume(x.fourth.1) // expected-note {{consuming use here}} +} + + +func testConsumeNonCopyable4(_ x: consuming DeinitStruct) { + // expected-error @-1 {{Cannot partially consume 'x' since it has a user defined deinit}} + consume(x.fifth) // expected-note {{consuming use here}} +} + +///////////////// +// Test Fields // +///////////////// + +@_moveOnly +struct StructContainDeinitStruct { + var first: DeinitStruct + var second: (DeinitStruct, DeinitStruct) + var third: Klass + var fourth: (Klass, Klass) + var fifth: MoveOnlyKlass + var sixth: (MoveOnlyKlass, MoveOnlyKlass) +} + +func testStructContainDeinitStructConsumeCopyable1(_ x: consuming StructContainDeinitStruct) { + consume(x.first.first) + consume(x.first.second.0) + consume(x.first.third.lhs) + consume(x.second.0.first) + consume(x.second.1.second.0) + consume(x.second.0.third.lhs) + consume(x.sixth.0) +} + + +func testStructContainStructContainDeinitStructConsumeNonCopyable1(_ x: consuming StructContainDeinitStruct) { + // expected-error @-1 {{Cannot partially consume 'x' since it contains field 'x.first' whose type 'DeinitStruct' has a user defined deinit}} + consume(x.first.third.rhs) // expected-note {{consuming use here}} +} + +func testStructContainStructContainDeinitStructConsumeNonCopyable1a(_ x: consuming StructContainDeinitStruct) { + // expected-error @-1 {{Cannot partially consume 'x' since it contains field 'x.second.0' whose type 'DeinitStruct' has a user defined deinit}} + consume(x.second.0.third.rhs) // expected-note {{consuming use here}} +} + +func testStructContainStructContainDeinitStructConsumeNonCopyable2(_ x: consuming StructContainDeinitStruct) { + // expected-error @-1 {{Cannot partially consume 'x' since it contains field 'x.first' whose type 'DeinitStruct' has a user defined deinit}} + consume(x.first.fourth.0) // expected-note {{consuming use here}} +} + +func testStructContainStructContainDeinitStructConsumeNonCopyable2a(_ x: consuming StructContainDeinitStruct) { + // expected-error @-1 {{Cannot partially consume 'x' since it contains field 'x.second.1' whose type 'DeinitStruct' has a user defined deinit}} + consume(x.second.1.fourth.0) // expected-note {{consuming use here}} +} + +func testStructContainStructContainDeinitStructConsumeNonCopyable3(_ x: consuming StructContainDeinitStruct) { + // expected-error @-1 {{Cannot partially consume 'x' since it contains field 'x.first' whose type 'DeinitStruct' has a user defined deinit}} + consume(x.first.fourth.1) // expected-note {{consuming use here}} +} + + +func testStructContainStructContainDeinitStructConsumeNonCopyable4(_ x: consuming StructContainDeinitStruct) { + // expected-error @-1 {{Cannot partially consume 'x' since it contains field 'x.first' whose type 'DeinitStruct' has a user defined deinit}} + consume(x.first.fifth) // expected-note {{consuming use here}} +}