From 6b74f511d0f05e7a3a5b2f0a7d1efca57c43b192 Mon Sep 17 00:00:00 2001 From: Arnold Schwaighofer Date: Thu, 28 Sep 2023 10:09:30 -0700 Subject: [PATCH] Preliminary IRGen support for typed throws Typed errors are returned indirectly in this version. No support for non-loadable typed errors --- include/swift/SIL/SILFunctionConventions.h | 5 + lib/IRGen/EntryPointArgumentEmission.h | 1 + lib/IRGen/GenBuiltin.cpp | 2 +- lib/IRGen/GenCall.cpp | 143 +++++++--- lib/IRGen/GenFunc.cpp | 12 +- lib/IRGen/IRGenFunction.h | 13 +- lib/IRGen/IRGenSIL.cpp | 114 ++++++-- test/IRGen/pack_metadata_marker_inserter.sil | 9 +- test/IRGen/typed_throws.sil | 274 +++++++++++++++++++ 9 files changed, 503 insertions(+), 70 deletions(-) create mode 100644 test/IRGen/typed_throws.sil diff --git a/include/swift/SIL/SILFunctionConventions.h b/include/swift/SIL/SILFunctionConventions.h index 9b410ba6c6215..74dee4f28d435 100644 --- a/include/swift/SIL/SILFunctionConventions.h +++ b/include/swift/SIL/SILFunctionConventions.h @@ -214,6 +214,11 @@ class SILFunctionConventions { return getSILType(funcTy->getErrorResult(), context); } + bool isTypedError() const { + return !funcTy->getErrorResult() + .getInterfaceType()->isExistentialWithError(); + } + /// Returns an array of result info. /// Provides convenient access to the underlying SILFunctionType. ArrayRef getResults() const { diff --git a/lib/IRGen/EntryPointArgumentEmission.h b/lib/IRGen/EntryPointArgumentEmission.h index db77c0218d728..d397e37e3bb14 100644 --- a/lib/IRGen/EntryPointArgumentEmission.h +++ b/lib/IRGen/EntryPointArgumentEmission.h @@ -45,6 +45,7 @@ class NativeCCEntryPointArgumentEmission public: virtual void mapAsyncParameters() = 0; virtual llvm::Value *getCallerErrorResultArgument() = 0; + virtual llvm::Value *getCallerTypedErrorResultArgument() = 0; virtual llvm::Value *getContext() = 0; virtual Explosion getArgumentExplosion(unsigned index, unsigned size) = 0; virtual llvm::Value *getSelfWitnessTable() = 0; diff --git a/lib/IRGen/GenBuiltin.cpp b/lib/IRGen/GenBuiltin.cpp index 60fd2bcff2d18..1ee9b3499774a 100644 --- a/lib/IRGen/GenBuiltin.cpp +++ b/lib/IRGen/GenBuiltin.cpp @@ -581,7 +581,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, auto error = args.claimNext(); auto errorTy = IGF.IGM.Context.getErrorExistentialType(); auto errorBuffer = IGF.getCalleeErrorResultSlot( - SILType::getPrimitiveObjectType(errorTy)); + SILType::getPrimitiveObjectType(errorTy), false); IGF.Builder.CreateStore(error, errorBuffer); auto context = llvm::UndefValue::get(IGF.IGM.Int8PtrTy); diff --git a/lib/IRGen/GenCall.cpp b/lib/IRGen/GenCall.cpp index 003fa224245a2..53d99428bc561 100644 --- a/lib/IRGen/GenCall.cpp +++ b/lib/IRGen/GenCall.cpp @@ -517,6 +517,9 @@ namespace { void expandCoroutineResult(bool forContinuation); void expandCoroutineContinuationParameters(); + + void addIndirectThrowingResult(); + llvm::Type *getErrorRegisterType(); }; } // end anonymous namespace } // end namespace irgen @@ -1850,12 +1853,16 @@ void SignatureExpansion::expandParameters( if (FnType->hasErrorResult()) { if (claimError()) IGM.addSwiftErrorAttributes(Attrs, ParamIRTypes.size()); - llvm::Type *errorType = - IGM.getStorageType(getSILFuncConventions().getSILType( - FnType->getErrorResult(), IGM.getMaximalTypeExpansionContext())); + llvm::Type *errorType = getErrorRegisterType(); ParamIRTypes.push_back(errorType->getPointerTo()); if (recordedABIDetails) recordedABIDetails->hasErrorResult = true; + if (getSILFuncConventions().isTypedError()) { + ParamIRTypes.push_back( + IGM.getStorageType(getSILFuncConventions().getSILType( + FnType->getErrorResult(), IGM.getMaximalTypeExpansionContext()) + )->getPointerTo()); + } } // Witness methods have some extra parameter types. @@ -1903,6 +1910,14 @@ void SignatureExpansion::expandCoroutineContinuationType() { expandCoroutineContinuationParameters(); } +llvm::Type *SignatureExpansion::getErrorRegisterType() { + if (getSILFuncConventions().isTypedError()) + return IGM.Int8PtrTy; + + return IGM.getStorageType(getSILFuncConventions().getSILType( + FnType->getErrorResult(), IGM.getMaximalTypeExpansionContext())); +} + void SignatureExpansion::expandAsyncReturnType() { // Build up the signature of the return continuation function. // void (AsyncTask *, ExecutorRef, AsyncContext *, DirectResult0, ..., @@ -1914,9 +1929,7 @@ void SignatureExpansion::expandAsyncReturnType() { auto addErrorResult = [&]() { // Add the error pointer at the end. if (FnType->hasErrorResult()) { - llvm::Type *errorType = - IGM.getStorageType(getSILFuncConventions().getSILType( - FnType->getErrorResult(), IGM.getMaximalTypeExpansionContext())); + llvm::Type *errorType = getErrorRegisterType(); claimSelf(); auto selfIdx = ParamIRTypes.size(); IGM.addSwiftSelfAttributes(Attrs, selfIdx); @@ -1943,6 +1956,17 @@ void SignatureExpansion::expandAsyncReturnType() { addErrorResult(); } +void SignatureExpansion::addIndirectThrowingResult() { + if (getSILFuncConventions().funcTy->hasErrorResult() && + getSILFuncConventions().isTypedError()) { + auto resultType = getSILFuncConventions().getSILErrorType( + IGM.getMaximalTypeExpansionContext()); + const TypeInfo &resultTI = cast(IGM.getTypeInfo(resultType)); + auto storageTy = resultTI.getStorageType(); + ParamIRTypes.push_back(storageTy->getPointerTo()); + } + +} void SignatureExpansion::expandAsyncEntryType() { ResultIRType = IGM.VoidTy; @@ -2029,6 +2053,8 @@ void SignatureExpansion::expandAsyncEntryType() { } } + addIndirectThrowingResult(); + // For now we continue to store the error result in the context to be able to // reuse non throwing functions. @@ -2049,7 +2075,7 @@ void SignatureExpansion::expandAsyncAwaitType() { auto addErrorResult = [&]() { if (FnType->hasErrorResult()) { - llvm::Type *errorType = + llvm::Type *errorType = getErrorRegisterType(); IGM.getStorageType(getSILFuncConventions().getSILType( FnType->getErrorResult(), IGM.getMaximalTypeExpansionContext())); auto selfIdx = components.size(); @@ -2376,9 +2402,16 @@ class SyncCallEmission final : public CallEmission { // don't need to do anything extra here. SILFunctionConventions fnConv(fnType, IGF.getSILModule()); Address errorResultSlot = IGF.getCalleeErrorResultSlot( - fnConv.getSILErrorType(IGF.IGM.getMaximalTypeExpansionContext())); + fnConv.getSILErrorType(IGF.IGM.getMaximalTypeExpansionContext()), + fnConv.isTypedError()); assert(LastArgWritten > 0); + if (fnConv.isTypedError()) { + // Return the error indirectly. + auto buf = IGF.getCalleeTypedErrorResultSlot( + fnConv.getSILErrorType(IGF.IGM.getMaximalTypeExpansionContext())); + Args[--LastArgWritten] = buf.getAddress(); + } Args[--LastArgWritten] = errorResultSlot.getAddress(); addParamAttribute(LastArgWritten, llvm::Attribute::NoCapture); IGF.IGM.addSwiftErrorAttributes(CurCallee.getMutableAttributes(), @@ -2575,7 +2608,10 @@ class SyncCallEmission final : public CallEmission { out = nativeSchema.mapFromNative(IGF.IGM, IGF, nativeExplosion, resultType); } Address getCalleeErrorSlot(SILType errorType, bool isCalleeAsync) override { - return IGF.getCalleeErrorResultSlot(errorType); + SILFunctionConventions fnConv(getCallee().getOrigFunctionType(), + IGF.getSILModule()); + + return IGF.getCalleeErrorResultSlot(errorType, fnConv.isTypedError()); }; llvm::Value *getResumeFunctionPointer() override { @@ -2674,6 +2710,18 @@ class AsyncCallEmission final : public CallEmission { } } + // Add the indirect typed error result if we have one. + SILFunctionConventions fnConv(fnType, IGF.getSILModule()); + if (fnType->hasErrorResult() && fnConv.isTypedError()) { + // The invariant is that this is always zero-initialized, so we + // don't need to do anything extra here. + assert(LastArgWritten > 0); + // Return the error indirectly. + auto buf = IGF.getCalleeTypedErrorResultSlot( + fnConv.getSILErrorType(IGF.IGM.getMaximalTypeExpansionContext())); + Args[--LastArgWritten] = buf.getAddress(); + } + llvm::Value *contextPtr = CurCallee.getSwiftContext(); // Add the data pointer if we have one. if (contextPtr) { @@ -2855,7 +2903,8 @@ class AsyncCallEmission final : public CallEmission { if (resultTys.size() == 1) { result = Builder.CreateExtractValue(result, numAsyncContextParams); if (hasError) { - Address errorAddr = IGF.getCalleeErrorResultSlot(errorType); + Address errorAddr = IGF.getCalleeErrorResultSlot(errorType, + substConv.isTypedError()); Builder.CreateStore(result, errorAddr); return; } @@ -2863,7 +2912,7 @@ class AsyncCallEmission final : public CallEmission { auto tmp = result; result = Builder.CreateExtractValue(result, numAsyncContextParams); auto errorResult = Builder.CreateExtractValue(tmp, numAsyncContextParams + 1); - Address errorAddr = IGF.getCalleeErrorResultSlot(errorType); + Address errorAddr = IGF.getCalleeErrorResultSlot(errorType, substConv.isTypedError()); Builder.CreateStore(errorResult, errorAddr); } else { auto directResultTys = hasError ? resultTys.drop_back() : resultTys; @@ -2877,7 +2926,7 @@ class AsyncCallEmission final : public CallEmission { if (hasError) { auto errorResult = Builder.CreateExtractValue( result, numAsyncContextParams + directResultTys.size()); - Address errorAddr = IGF.getCalleeErrorResultSlot(errorType); + Address errorAddr = IGF.getCalleeErrorResultSlot(errorType, substConv.isTypedError()); Builder.CreateStore(errorResult, errorAddr); } result = resultAgg; @@ -2915,7 +2964,9 @@ class AsyncCallEmission final : public CallEmission { out = nativeSchema.mapFromNative(IGF.IGM, IGF, nativeExplosion, resultType); } Address getCalleeErrorSlot(SILType errorType, bool isCalleeAsync) override { - return IGF.getCalleeErrorResultSlot(errorType); + SILFunctionConventions fnConv(getCallee().getOrigFunctionType(), + IGF.getSILModule()); + return IGF.getCalleeErrorResultSlot(errorType, fnConv.isTypedError()); } llvm::CallBase *createCall(const FunctionPointer &fn, @@ -3050,7 +3101,8 @@ void CallEmission::emitToUnmappedMemory(Address result) { errorType = substConv.getSILErrorType(IGM.getMaximalTypeExpansionContext()); auto result = Builder.CreateExtractValue(call, numAsyncContextParams); - Address errorAddr = IGF.getCalleeErrorResultSlot(errorType); + Address errorAddr = IGF.getCalleeErrorResultSlot(errorType, + substConv.isTypedError()); Builder.CreateStore(result, errorAddr); } } @@ -4604,17 +4656,21 @@ Explosion IRGenFunction::collectParameters() { params.add(&*i); return params; } - -Address IRGenFunction::createErrorResultSlot(SILType errorType, bool isAsync) { +Address IRGenFunction::createErrorResultSlot(SILType errorType, bool isAsync, + bool setSwiftErrorFlag, + bool isTypedError) { auto &errorTI = cast(getTypeInfo(errorType)); IRBuilder builder(IGM.getLLVMContext(), IGM.DebugInfo != nullptr); builder.SetInsertPoint(AllocaIP->getParent(), AllocaIP->getIterator()); - + auto errorStorageType = isTypedError ? IGM.Int8PtrTy : + errorTI.getStorageType(); + auto errorAlignment = isTypedError ? IGM.getPointerAlignment() : + errorTI.getFixedAlignment(); // Create the alloca. We don't use allocateStack because we're // not allocating this in stack order. - auto addr = createAlloca(errorTI.getStorageType(), - errorTI.getFixedAlignment(), "swifterror"); + auto addr = createAlloca(errorStorageType, + errorAlignment, "swifterror"); if (!isAsync) { builder.SetInsertPoint(getEarliestInsertionPoint()->getParent(), @@ -4628,36 +4684,43 @@ Address IRGenFunction::createErrorResultSlot(SILType errorType, bool isAsync) { // The slot for async callees cannot be annotated swifterror because those // errors are never passed in registers but rather are always passed // indirectly in the async context. - if (IGM.ShouldUseSwiftError && !isAsync) + if (IGM.ShouldUseSwiftError && !isAsync && setSwiftErrorFlag) cast(addr.getAddress())->setSwiftError(true); // Initialize at the alloca point. - auto nullError = llvm::ConstantPointerNull::get( - cast(errorTI.getStorageType())); - builder.CreateStore(nullError, addr); + if (setSwiftErrorFlag) { + auto nullError = llvm::ConstantPointerNull::get( + cast(errorStorageType)); + builder.CreateStore(nullError, addr); + } return addr; } /// Fetch the error result slot. -Address IRGenFunction::getCalleeErrorResultSlot(SILType errorType) { +Address IRGenFunction::getCalleeErrorResultSlot(SILType errorType, + bool isTypedError) { if (!CalleeErrorResultSlot.isValid()) { - CalleeErrorResultSlot = createErrorResultSlot(errorType, /*isAsync=*/false); + CalleeErrorResultSlot = createErrorResultSlot(errorType, /*isAsync=*/false, + /*setSwiftError*/true, + isTypedError); } return CalleeErrorResultSlot; } -/// Fetch the error result slot. -Address IRGenFunction::getAsyncCalleeErrorResultSlot(SILType errorType) { - assert(isAsync() && - "throwing async functions must be called from async functions"); - if (!AsyncCalleeErrorResultSlot.isValid()) { - AsyncCalleeErrorResultSlot = - createErrorResultSlot(errorType, /*isAsync=*/true); +Address IRGenFunction::getCalleeTypedErrorResultSlot(SILType errorType) { + + auto &errorTI = cast(getTypeInfo(errorType)); + if (!CalleeTypedErrorResultSlot.isValid() || + CalleeTypedErrorResultSlot.getElementType() != errorTI.getStorageType()) { + CalleeTypedErrorResultSlot = + createErrorResultSlot(errorType, /*isAsync=*/false, + /*setSwiftErrorFlag*/false); } - return AsyncCalleeErrorResultSlot; + return CalleeTypedErrorResultSlot; } + /// Fetch the error result slot received from the caller. Address IRGenFunction::getCallerErrorResultSlot() { assert(CallerErrorResultSlot.isValid() && "no error result slot!"); @@ -4679,6 +4742,20 @@ void IRGenFunction::setCallerErrorResultSlot(Address address) { } } +// Set the error result slot for a typed throw for the current function. +// This should only be done in the prologue. +void IRGenFunction::setCallerTypedErrorResultSlot(Address address) { + assert(!CallerTypedErrorResultSlot.isValid() && + "already have a caller error result slot!"); + assert(isa(address.getAddress()->getType())); + CallerTypedErrorResultSlot = address; +} + +Address IRGenFunction::getCallerTypedErrorResultSlot() { + assert(CallerTypedErrorResultSlot.isValid() && "no error result slot!"); + assert(isa(CallerTypedErrorResultSlot.getAddress())); + return CallerTypedErrorResultSlot; +} /// Emit the basic block that 'return' should branch to and insert it into /// the current function. This creates a second /// insertion point that most blocks should be inserted before. diff --git a/lib/IRGen/GenFunc.cpp b/lib/IRGen/GenFunc.cpp index 706ce5e9021f3..16cb84433980a 100644 --- a/lib/IRGen/GenFunc.cpp +++ b/lib/IRGen/GenFunc.cpp @@ -1118,6 +1118,10 @@ class SyncPartialApplicationForwarderEmission void forwardErrorResult() override { llvm::Value *errorResultPtr = origParams.claimNext(); args.add(errorResultPtr); + if (origConv.isTypedError()) { + auto *typedErrorResultPtr = origParams.claimNext(); + args.add(typedErrorResultPtr); + } } llvm::CallInst *createCall(FunctionPointer &fnPtr) override { return subIGF.Builder.CreateCall(fnPtr, args.claimAll()); @@ -1289,8 +1293,12 @@ class AsyncPartialApplicationForwarderEmission } void forwardErrorResult() override { - // Nothing to do here. The error result pointer is already in the - // appropriate position. + // The error result pointer is already in the appropriate position but the + // type error address is not. + if (origConv.isTypedError()) { + auto *typedErrorResultPtr = origParams.claimNext(); + args.add(typedErrorResultPtr); + } } llvm::CallInst *createCall(FunctionPointer &fnPtr) override { PointerAuthInfo newAuthInfo = diff --git a/lib/IRGen/IRGenFunction.h b/lib/IRGen/IRGenFunction.h index a1f200334c710..c2ff49fd352b2 100644 --- a/lib/IRGen/IRGenFunction.h +++ b/lib/IRGen/IRGenFunction.h @@ -90,7 +90,7 @@ class IRGenFunction { friend class Scope; - Address createErrorResultSlot(SILType errorType, bool isAsync); + Address createErrorResultSlot(SILType errorType, bool isAsync, bool setSwiftErrorFlag = true, bool isTypedError = false); //--- Function prologue and epilogue //------------------------------------------- @@ -122,14 +122,19 @@ class IRGenFunction { /// /// For async functions, this is different from the caller result slot because /// that is a gep into the %swift.context. - Address getCalleeErrorResultSlot(SILType errorType); - Address getAsyncCalleeErrorResultSlot(SILType errorType); + Address getCalleeErrorResultSlot(SILType errorType, + bool isTypedError); /// Return the error result slot provided by the caller. Address getCallerErrorResultSlot(); /// Set the error result slot for the current function. void setCallerErrorResultSlot(Address address); + /// Set the error result slot for a typed throw for the current function. + void setCallerTypedErrorResultSlot(Address address); + + Address getCallerTypedErrorResultSlot(); + Address getCalleeTypedErrorResultSlot(SILType errorType); /// Are we currently emitting a coroutine? bool isCoroutine() { @@ -198,6 +203,8 @@ class IRGenFunction { Address CalleeErrorResultSlot; Address AsyncCalleeErrorResultSlot; Address CallerErrorResultSlot; + Address CallerTypedErrorResultSlot; + Address CalleeTypedErrorResultSlot; llvm::Value *CoroutineHandle = nullptr; llvm::Value *AsyncCoroutineCurrentResume = nullptr; llvm::Value *AsyncCoroutineCurrentContinuationContext = nullptr; diff --git a/lib/IRGen/IRGenSIL.cpp b/lib/IRGen/IRGenSIL.cpp index d4a80a28683c9..a22470eb7cff5 100644 --- a/lib/IRGen/IRGenSIL.cpp +++ b/lib/IRGen/IRGenSIL.cpp @@ -1611,6 +1611,9 @@ class SyncNativeCCEntryPointArgumentEmission final llvm::Value *getCallerErrorResultArgument() override { return allParamValues.takeLast(); } + llvm::Value *getCallerTypedErrorResultArgument() override { + return allParamValues.takeLast(); + } void mapAsyncParameters() override{/* nothing to map*/}; llvm::Value *getContext() override { return allParamValues.takeLast(); } Explosion getArgumentExplosion(unsigned index, unsigned size) override { @@ -1705,6 +1708,9 @@ class AsyncNativeCCEntryPointArgumentEmission final llvm::Value *getCallerErrorResultArgument() override { llvm_unreachable("should not be used"); } + llvm::Value *getCallerTypedErrorResultArgument() override { + return allParamValues.takeLast(); + } llvm::Value *getContext() override { return allParamValues.takeLast(); } Explosion getArgumentExplosion(unsigned index, unsigned size) override { assert(size > 0); @@ -2136,6 +2142,7 @@ static void emitEntryPointArgumentsNativeCC(IRGenSILFunction &IGF, break; } + SILFunctionConventions fnConv(funcTy, IGF.getSILModule()); if (funcTy->isAsync()) { emitAsyncFunctionEntry(IGF, getAsyncContextLayout(IGF.IGM, IGF.CurSILFn), LinkEntity::forSILFunction(IGF.CurSILFn), @@ -2148,17 +2155,26 @@ static void emitEntryPointArgumentsNativeCC(IRGenSILFunction &IGF, // Remap the entry block. IGF.LoweredBBs[&*IGF.CurSILFn->begin()] = LoweredBB(IGF.Builder.GetInsertBlock(), {}); } - } + } // Bind the error result by popping it off the parameter list. - if (funcTy->hasErrorResult() && !funcTy->isAsync()) { - SILFunctionConventions fnConv(funcTy, IGF.getSILModule()); + if (funcTy->hasErrorResult()) { auto errorType = fnConv.getSILErrorType(IGF.IGM.getMaximalTypeExpansionContext()); auto &errorTI = cast(IGF.getTypeInfo(errorType)); - IGF.setCallerErrorResultSlot( + bool isTypedError = fnConv.isTypedError(); + if (isTypedError) { + IGF.setCallerTypedErrorResultSlot(Address( + emission->getCallerTypedErrorResultArgument(), + errorTI.getStorageType(), + errorTI.getFixedAlignment())); + } + if (!funcTy->isAsync()) { + IGF.setCallerErrorResultSlot( Address(emission->getCallerErrorResultArgument(), - errorTI.getStorageType(), IGF.IGM.getPointerAlignment())); + isTypedError ? IGF.IGM.Int8PtrTy : errorTI.getStorageType(), + IGF.IGM.getPointerAlignment())); + } } SILFunctionConventions conv(funcTy, IGF.getSILModule()); @@ -3736,10 +3752,17 @@ void IRGenSILFunction::visitFullApplySite(FullApplySite site) { // See below. Builder.CreateStore(nullError, calleeErrorSlot); } + auto hasTypedError = substConv.isTypedError(); + llvm::BasicBlock *typedErrorLoadBB = nullptr; + if (hasTypedError) { + typedErrorLoadBB = createBasicBlock("typed.error.load"); + } // If the error value is non-null, branch to the error destination. auto hasError = Builder.CreateICmpNE(errorValue, nullError); - Builder.CreateCondBr(hasError, errorDest.bb, normalDest.bb); + Builder.CreateCondBr(hasError, + typedErrorLoadBB ? typedErrorLoadBB : errorDest.bb, + normalDest.bb); // Set up the PHI nodes on the normal edge. unsigned firstIndex = 0; @@ -3747,8 +3770,19 @@ void IRGenSILFunction::visitFullApplySite(FullApplySite site) { assert(firstIndex == normalDest.phis.size()); // Set up the PHI nodes on the error edge. - assert(errorDest.phis.size() == 1); - errorDest.phis[0]->addIncoming(errorValue, Builder.GetInsertBlock()); + if (!typedErrorLoadBB) { + assert(errorDest.phis.size() == 1); + errorDest.phis[0]->addIncoming(errorValue, Builder.GetInsertBlock()); + } else { + Builder.emitBlock(typedErrorLoadBB); + auto &ti = cast(IGM.getTypeInfo(errorType)); + Explosion errorValue; + ti.loadAsTake(*this, getCalleeTypedErrorResultSlot(errorType), errorValue); + for (unsigned i = 0, e = errorDest.phis.size(); i != e; ++i) { + errorDest.phis[i]->addIncoming(errorValue.claimNext(), Builder.GetInsertBlock()); + } + Builder.CreateBr(errorDest.bb); + } if (tryApplyInst->getErrorBB()->getSinglePredecessorBlock()) { // Zeroing out the error slot only in the error block increases the chance @@ -4106,6 +4140,17 @@ static void emitReturnInst(IRGenSILFunction &IGF, SILFunctionConventions conv(IGF.CurSILFn->getLoweredFunctionType(), IGF.getSILModule()); + auto getNullErrorValue = [&] () -> llvm::Value* { + if (!conv.isTypedError()) { + auto errorResultType = IGF.CurSILFn->mapTypeIntoContext( + conv.getSILErrorType(IGF.IGM.getMaximalTypeExpansionContext())); + auto errorType = + cast(IGF.IGM.getStorageType(errorResultType)); + return llvm::ConstantPointerNull::get(errorType); + } + return llvm::ConstantPointerNull::get(IGF.IGM.Int8PtrTy); + }; + // The invariant on the out-parameter is that it's always zeroed, so // there's nothing to do here. @@ -4121,11 +4166,7 @@ static void emitReturnInst(IRGenSILFunction &IGF, } else { if (fnType->hasErrorResult()) { SmallVector nativeResultsStorage; - auto errorResultType = IGF.CurSILFn->mapTypeIntoContext( - conv.getSILErrorType(IGF.IGM.getMaximalTypeExpansionContext())); - auto errorType = - cast(IGF.IGM.getStorageType(errorResultType)); - nativeResultsStorage.push_back(llvm::ConstantPointerNull::get(errorType)); + nativeResultsStorage.push_back(getNullErrorValue()); return emitAsyncReturn(IGF, asyncLayout, fnType, llvm::Optional>( nativeResultsStorage)); @@ -4143,12 +4184,7 @@ static void emitReturnInst(IRGenSILFunction &IGF, auto asyncLayout = getAsyncContextLayout(IGF); Explosion error; if (fnType->hasErrorResult()) { - SmallVector nativeResultsStorage; - auto errorResultType = IGF.CurSILFn->mapTypeIntoContext( - conv.getSILErrorType(IGF.IGM.getMaximalTypeExpansionContext())); - auto errorType = - cast(IGF.IGM.getStorageType(errorResultType)); - error.add(llvm::ConstantPointerNull::get(errorType)); + error.add(getNullErrorValue()); } emitAsyncReturn(IGF, asyncLayout, funcResultType, fnType, result, error); } else { @@ -4180,24 +4216,44 @@ void IRGenSILFunction::visitReturnInst(swift::ReturnInst *i) { } void IRGenSILFunction::visitThrowInst(swift::ThrowInst *i) { - // Store the exception to the error slot. - llvm::Value *exn = getLoweredSingletonExplosion(i->getOperand()); - + SILFunctionConventions conv(CurSILFn->getLoweredFunctionType(), + getSILModule()); if (!isAsync()) { - Builder.CreateStore(exn, getCallerErrorResultSlot()); + Explosion errorResult = getLoweredExplosion(i->getOperand()); + if (conv.isTypedError()) { + auto &ti = cast(IGM.getTypeInfo(conv.getSILErrorType( + IGM.getMaximalTypeExpansionContext()))); + llvm::Constant *flag = llvm::ConstantInt::get(IGM.IntPtrTy, 1); + flag = llvm::ConstantExpr::getIntToPtr(flag, IGM.Int8PtrTy); + Builder.CreateStore(flag, + getCallerErrorResultSlot()); + ti.initialize(*this, errorResult, getCallerTypedErrorResultSlot(), false); + } else { + Builder.CreateStore(errorResult.claimNext(), getCallerErrorResultSlot()); + } // Async functions just return to the continuation. } else if (isAsync()) { + // Store the exception to the error slot. + auto exn = getLoweredExplosion(i->getOperand()); + auto layout = getAsyncContextLayout(*this); - SILFunctionConventions conv(CurSILFn->getLoweredFunctionType(), - getSILModule()); auto funcResultType = CurSILFn->mapTypeIntoContext( conv.getSILResultType(IGM.getMaximalTypeExpansionContext())); + if (conv.isTypedError()) { + auto &ti = cast(IGM.getTypeInfo(conv.getSILErrorType( + IGM.getMaximalTypeExpansionContext()))); + ti.initialize(*this, exn, getCallerTypedErrorResultSlot(), false); + llvm::Constant *flag = llvm::ConstantInt::get(IGM.IntPtrTy, 1); + flag = llvm::ConstantExpr::getIntToPtr(flag, IGM.Int8PtrTy); + assert(exn.empty() && "Unclaimed typed error results"); + exn.reset(); + exn.add(flag); + } + Explosion empty; - Explosion error; - error.add(exn); emitAsyncReturn(*this, layout, funcResultType, - i->getFunction()->getLoweredFunctionType(), empty, error); + i->getFunction()->getLoweredFunctionType(), empty, exn); return; } @@ -5184,7 +5240,7 @@ void IRGenSILFunction::emitErrorResultVar(CanSILFunctionType FnTy, if (IGM.ShouldUseSwiftError) return; auto ErrorResultSlot = getCalleeErrorResultSlot(IGM.silConv.getSILType( - ErrorInfo, FnTy, IGM.getMaximalTypeExpansionContext())); + ErrorInfo, FnTy, IGM.getMaximalTypeExpansionContext()), false); auto Var = DbgValue->getVarInfo(); assert(Var && "error result without debug info"); auto Storage = diff --git a/test/IRGen/pack_metadata_marker_inserter.sil b/test/IRGen/pack_metadata_marker_inserter.sil index 7629101480b44..e69bb9ac2303a 100644 --- a/test/IRGen/pack_metadata_marker_inserter.sil +++ b/test/IRGen/pack_metadata_marker_inserter.sil @@ -15,14 +15,19 @@ case some(T) struct Int { var _value: Builtin.Int64 } +class AnyObject {} -protocol Error {} +sil_vtable AnyObject {} + +protocol Error : class {} + +protocol NonError {} struct S1 {} struct S2 {} struct S3 {} -struct GVT : Error { +struct GVT : NonError { } struct GV { diff --git a/test/IRGen/typed_throws.sil b/test/IRGen/typed_throws.sil new file mode 100644 index 0000000000000..90185d87b934d --- /dev/null +++ b/test/IRGen/typed_throws.sil @@ -0,0 +1,274 @@ +// RUN: %target-swift-frontend -primary-file %s -emit-irgen | %FileCheck %s --check-prefix=CHECK + +sil_stage canonical + +import Builtin +import Swift + +struct S { + var x: A + var y: A +} + +class A {} + +sil_vtable A {} + +sil @create_error : $@convention(thin) () -> @owned A + +// CHECK: define{{.*}} swiftcc void @throw_error(ptr swiftself %0, ptr noalias nocapture swifterror dereferenceable({{.*}}) %1, ptr %2) +// CHECK: [[ERR:%.*]] = call swiftcc ptr @create_error() +// CHECK: call ptr @swift_retain(ptr returned [[ERR]]) +// CHECK: store ptr inttoptr (i64 1 to ptr), ptr %1 +// CHECK: [[F1:%.*]] = getelementptr inbounds %T12typed_throws1SV, ptr %2, i32 0, i32 0 +// CHECK: store ptr [[ERR]], ptr [[F1]] +// CHECK: [[F2:%.*]] = getelementptr inbounds %T12typed_throws1SV, ptr %2, i32 0, i32 1 +// CHECK: store ptr [[ERR]], ptr [[F2]] +// CHECK: ret void +// CHECK: } + +sil @throw_error : $@convention(thin) () -> @error S { + %0 = function_ref @create_error : $@convention(thin) () -> @owned A + %1 = apply %0() : $@convention(thin) () -> @owned A + retain_value %1 : $A + %2 = struct $S(%1: $A, %1 : $A) + throw %2 : $S +} + +sil @doesnt_throw : $@convention(thin) () -> (@owned A, @error S) { + %0 = function_ref @create_error : $@convention(thin) () -> @owned A + %1 = apply %0() : $@convention(thin) () -> @owned A + return %1 : $A +} + +sil @try_apply_helper : $@convention(thin) (@owned AnyObject) -> (@owned AnyObject, @error S) + +// CHECK: define{{.*}} swiftcc void @try_apply(ptr %0) +// CHECK: entry: +// CHECK: %swifterror = alloca swifterror ptr +// CHECK: store ptr null, ptr %swifterror +// CHECK: %swifterror1 = alloca %T12typed_throws1SV +// CHECK: [[RES:%.*]] = call swiftcc ptr @try_apply_helper(ptr %0, ptr swiftself undef, ptr noalias nocapture swifterror dereferenceable({{.*}}) %swifterror, ptr %swifterror1) +// CHECK: [[ERRFLAG:%.*]] = load ptr, ptr %swifterror +// CHECK: [[C:%.*]] = icmp ne ptr [[ERRFLAG]], null +// CHECK: br i1 [[C]], label %[[ERR_B:.*]], label %[[SUCC_B:[0-9]+]] + +// CHECK: [[ERR_B]]: +// CHECK: %swifterror1.x = getelementptr inbounds %T12typed_throws1SV, ptr %swifterror1, i32 0, i32 0 +// CHECK: [[ERR_v1:%.*]] = load ptr, ptr %swifterror1.x +// CHECK: %swifterror1.y = getelementptr inbounds %T12typed_throws1SV, ptr %swifterror1, i32 0, i32 1 +// CHECK: [[ERR_v2:%.*]] = load ptr, ptr %swifterror1.y +// CHECK: br label %[[ERR2_B:[0-9]+]] + +// CHECK: [[SUCC_B]]: +// CHECK: [[R:%.*]] = phi ptr [ [[RES]], %entry ] +// CHECK: call void @swift_{{.*}}elease(ptr [[R]]) +// CHECK: br label %[[RET_B:[0-9]+]] + +// CHECK: [[ERR2_B]]: +// CHECK: [[E1:%.*]] = phi ptr [ [[ERR_v1]], %[[ERR_B]] ] +// CHECK: [[E2:%.*]] = phi ptr [ [[ERR_v2]], %[[ERR_B]] ] +// CHECK: store ptr null, ptr %swifterror +// CHECK: call void @swift_release(ptr [[E1]]) +// CHECK: call void @swift_release(ptr [[E2]]) +// CHECK: br label %[[RET_B]] + +// CHECK: [[RET_B]]: +// CHECK: ret void +// CHECK: } + +sil @try_apply : $@convention(thin) (@owned AnyObject) -> () { +entry(%0 : $AnyObject): + %1 = function_ref @try_apply_helper : $@convention(thin) (@owned AnyObject) -> (@owned AnyObject, @error S) + try_apply %1(%0) : $@convention(thin) (@owned AnyObject) -> (@owned AnyObject, @error S), normal bb1, error bb2 + +bb1(%2 : $AnyObject): + strong_release %2 : $AnyObject + br bb3 + +bb2(%3 : $S): + release_value %3 : $S + br bb3 + +bb3: + %4 = tuple () + return %4 : $() +} + +sil @try_apply_helper2 : $@convention(thin) (@owned AnyObject) -> (@owned AnyObject, @error A) + +sil @try_apply_multiple : $@convention(thin) (@owned AnyObject) -> () { +entry(%0 : $AnyObject): + %1 = function_ref @try_apply_helper : $@convention(thin) (@owned AnyObject) -> (@owned AnyObject, @error S) + try_apply %1(%0) : $@convention(thin) (@owned AnyObject) -> (@owned AnyObject, @error S), normal bb1, error bb2 + +bb1(%2 : $AnyObject): + strong_release %2 : $AnyObject + br bb3 + +bb2(%3 : $S): + release_value %3 : $S + br bb3 + +bb3: + %4 = function_ref @try_apply_helper2 : $@convention(thin) (@owned AnyObject) -> (@owned AnyObject, @error A) + retain_value %0 : $AnyObject + try_apply %4(%0) : $@convention(thin) (@owned AnyObject) -> (@owned AnyObject, @error A), normal bb4, error bb5 + +bb4(%5 : $AnyObject): + strong_release %5 : $AnyObject + br bb6 + +bb5(%6 : $A): + release_value %6 : $A + br bb6 + +bb6: + %7 = tuple () + return %7 : $() +} + +// CHECK: define{{.*}} swifttailcc void @does_throw_async(ptr swiftasync %0, ptr %1) +// CHECK: %.x = getelementptr inbounds %T12typed_throws1SV, ptr %1, i32 0, i32 0 +// CHECK: store ptr {{.*}}, ptr %.x +// CHECK: %.y = getelementptr inbounds %T12typed_throws1SV, ptr %1, i32 0, i32 1 +// CHECK: store ptr {{.*}}, ptr %.y +// CHECK: call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr {{.*}}, i1 false, ptr @does_throw_async.0, ptr {{.*}}, ptr {{.*}}, ptr inttoptr (i64 1 to ptr)) +// CHECK: ret void + +sil @does_throw_async : $@convention(thin) @async () -> @error S { + %0 = function_ref @create_error : $@convention(thin) () -> @owned A + %1 = apply %0() : $@convention(thin) () -> @owned A + retain_value %1 : $A + %2 = struct $S(%1: $A, %1 : $A) + throw %2 : $S +} + +// CHECK: define{{.*}} swifttailcc void @does_not_throw_async(ptr swiftasync %0, ptr %1) +// CHECK: [[R:%.*]] = call swiftcc ptr @create_error() +// CHECK: call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr {{.*}}, i1 false, ptr @does_not_throw_async.0, ptr {{.*}}, ptr {{.*}}, ptr [[R]], ptr null) +// CHECK: ret void +sil @does_not_throw_async : $@convention(thin) @async () -> (@owned A, @error S) { + %0 = function_ref @create_error : $@convention(thin) () -> @owned A + %1 = apply %0() : $@convention(thin) () -> @owned A + return %1 : $A +} + + +sil @try_apply_helper_async : $@convention(thin) @async (@owned AnyObject) -> (@owned AnyObject, @error S) + +sil @try_apply_async : $@convention(thin) @async (@owned AnyObject) -> () { +entry(%0 : $AnyObject): + %1 = function_ref @try_apply_helper_async : $@convention(thin) @async (@owned AnyObject) -> (@owned AnyObject, @error S) + try_apply %1(%0) : $@convention(thin) @async (@owned AnyObject) -> (@owned AnyObject, @error S), normal bb1, error bb2 + +bb1(%2 : $AnyObject): + strong_release %2 : $AnyObject + br bb3 + +bb2(%3 : $S): + release_value %3 : $S + br bb3 + +bb3: + %4 = tuple () + return %4 : $() +} + +sil @try_apply_helper_async2 : $@convention(thin) @async (@owned AnyObject) -> (@owned AnyObject, @error A) + +sil @try_apply_multiple_async : $@convention(thin) @async (@owned AnyObject) -> () { +entry(%0 : $AnyObject): + %1 = function_ref @try_apply_helper_async : $@convention(thin) @async (@owned AnyObject) -> (@owned AnyObject, @error S) + try_apply %1(%0) : $@convention(thin) @async (@owned AnyObject) -> (@owned AnyObject, @error S), normal bb1, error bb2 + +bb1(%2 : $AnyObject): + strong_release %2 : $AnyObject + br bb3 + +bb2(%3 : $S): + release_value %3 : $S + br bb3 + +bb3: + %4 = function_ref @try_apply_helper_async2 : $@convention(thin) @async (@owned AnyObject) -> (@owned AnyObject, @error A) + retain_value %0 : $AnyObject + try_apply %4(%0) : $@convention(thin) @async (@owned AnyObject) -> (@owned AnyObject, @error A), normal bb4, error bb5 + +bb4(%5 : $AnyObject): + strong_release %5 : $AnyObject + br bb6 + +bb5(%6 : $A): + release_value %6 : $A + br bb6 + +bb6: + %7 = tuple () + return %7 : $() +} + +// CHECK: define{{.*}} internal swiftcc ptr @"$s16try_apply_helperTA"(ptr swiftself %0, ptr noalias nocapture swifterror dereferenceable({{.*}}) %1, ptr %2) +// CHECK: tail call swiftcc ptr @try_apply_helper(ptr {{.*}}, ptr swiftself undef, ptr noalias nocapture swifterror dereferenceable({{.*}}) %1, ptr %2) +// CHECK: ret ptr + +sil @partial_apply_test : $@convention(thin) (@owned AnyObject) -> @owned @callee_guaranteed () ->(@owned AnyObject, @error S) { +entry(%0: $AnyObject): + %f = function_ref @try_apply_helper : $@convention(thin) (@owned AnyObject) -> (@owned AnyObject, @error S) + %36 = partial_apply [callee_guaranteed] %f(%0) : $@convention(thin) (@owned AnyObject) -> (@owned AnyObject, @error S) + return %36 : $@callee_guaranteed () ->(@owned AnyObject, @error S) +} + +// CHECK: define{{.*}} internal swifttailcc void @"$s22try_apply_helper_asyncTA"(ptr swiftasync %0, ptr swiftself %1, ptr %2) +// CHECK: call { ptr, ptr, ptr } (i32, ptr, ptr, ...) @llvm.coro.suspend.async.sl_p0p0p0s(i32 512, ptr {{.*}}, ptr @__swift_async_resume_project_context, ptr @"$s22try_apply_helper_asyncTA.0", ptr @try_apply_helper_async, ptr {{.*}}, ptr {{.*}}, ptr %2) + +sil @partial_apply_test_async : $@convention(thin) (@owned AnyObject) -> @owned @callee_guaranteed @async () ->(@owned AnyObject, @error S) { +entry(%0: $AnyObject): + %f = function_ref @try_apply_helper_async : $@convention(thin) @async (@owned AnyObject) -> (@owned AnyObject, @error S) + %36 = partial_apply [callee_guaranteed] %f(%0) : $@convention(thin) @async (@owned AnyObject) -> (@owned AnyObject, @error S) + return %36 : $@callee_guaranteed @async () ->(@owned AnyObject, @error S) +} + +// CHECK:define{{.*}} swiftcc void @apply_closure(ptr %0, ptr %1) +// CHECK:entry: +// CHECK: %swifterror = alloca swifterror ptr +// CHECK: store ptr null, ptr %swifterror +// CHECK: %swifterror1 = alloca %T12typed_throws1SV +// CHECK: call swiftcc ptr %0(ptr swiftself %1, ptr noalias nocapture swifterror dereferenceable({{[0-9]+}}) %swifterror, ptr %swifterror1) + +sil @apply_closure : $@convention(thin) (@guaranteed @callee_guaranteed () -> (@owned AnyObject, @error S)) -> () { +entry(%0 : $@callee_guaranteed () ->(@owned AnyObject, @error S)): + try_apply %0() : $@callee_guaranteed () -> (@owned AnyObject, @error S), normal bb4, error bb5 + +bb4(%5 : $AnyObject): + strong_release %5 : $AnyObject + br bb6 + +bb5(%6 : $S): + release_value %6 : $S + br bb6 + +bb6: + %t = tuple() + return %t : $() +} + +// CHECK: define{{.*}} swifttailcc void @apply_closure_async(ptr swiftasync %0, ptr %1, ptr %2) +// CHECK: %swifterror = alloca %T12typed_throws1SV +// CHECK: call { ptr, ptr, ptr } (i32, ptr, ptr, ...) @llvm.coro.suspend.async.sl_p0p0p0s(i32 512, ptr %{{[0-9]+}}, ptr @__swift_async_resume_project_context, ptr @apply_closure_async.0, ptr %{{[0-9]+}}, ptr %{{[0-9]+}}, ptr %2, ptr %swifterror) +sil @apply_closure_async : $@convention(thin) @async (@guaranteed @callee_guaranteed @async () -> (@owned AnyObject, @error S)) -> () { +entry(%0 : $@callee_guaranteed @async () ->(@owned AnyObject, @error S)): + try_apply %0() : $@callee_guaranteed @async () -> (@owned AnyObject, @error S), normal bb4, error bb5 + +bb4(%5 : $AnyObject): + strong_release %5 : $AnyObject + br bb6 + +bb5(%6 : $S): + release_value %6 : $S + br bb6 + +bb6: + %t = tuple() + return %t : $() +}