diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index a20b1ab298f9c..f9f401dff7e7f 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -279,6 +279,9 @@ Improvements to Clang's diagnostics - The :doc:`ThreadSafetyAnalysis` attributes ``ACQUIRED_BEFORE(...)`` and ``ACQUIRED_AFTER(...)`` have been moved to the stable feature set and no longer require ``-Wthread-safety-beta`` to be used. +- The :doc:`ThreadSafetyAnalysis` gains basic alias-analysis of capability + pointers under ``-Wthread-safety-beta`` (still experimental), which reduces + both false positives but also false negatives through more precise analysis. Improvements to Clang's time-trace ---------------------------------- diff --git a/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h b/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h index 6c97905a2d7f9..d20f172f446e6 100644 --- a/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h +++ b/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h @@ -35,6 +35,7 @@ #include "llvm/ADT/PointerUnion.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Casting.h" +#include #include #include #include @@ -386,6 +387,11 @@ class SExprBuilder { SelfVar->setKind(til::Variable::VK_SFun); } + // Create placeholder for this: we don't know the VarDecl on construction yet. + til::LiteralPtr *createThisPlaceholder() { + return new (Arena) til::LiteralPtr(nullptr); + } + // Translate a clang expression in an attribute to a til::SExpr. // Constructs the context from D, DeclExp, and SelfDecl. CapabilityExpr translateAttrExpr(const Expr *AttrExp, const NamedDecl *D, @@ -394,8 +400,8 @@ class SExprBuilder { CapabilityExpr translateAttrExpr(const Expr *AttrExp, CallingContext *Ctx); - // Translate a variable reference. - til::LiteralPtr *createVariable(const VarDecl *VD); + // Translate a VarDecl to its canonical TIL expression. + til::SExpr *translateVariable(const VarDecl *VD, CallingContext *Ctx); // Translate a clang statement or expression to a TIL expression. // Also performs substitution of variables; Ctx provides the context. @@ -412,6 +418,10 @@ class SExprBuilder { const til::SCFG *getCFG() const { return Scfg; } til::SCFG *getCFG() { return Scfg; } + void setLookupLocalVarExpr(std::function F) { + LookupLocalVarExpr = std::move(F); + } + private: // We implement the CFGVisitor API friend class CFGWalker; @@ -445,6 +455,7 @@ class SExprBuilder { const AbstractConditionalOperator *C, CallingContext *Ctx); til::SExpr *translateDeclStmt(const DeclStmt *S, CallingContext *Ctx); + til::SExpr *translateStmtExpr(const StmtExpr *SE, CallingContext *Ctx); // Map from statements in the clang CFG to SExprs in the til::SCFG. using StatementMap = llvm::DenseMap; @@ -531,6 +542,11 @@ class SExprBuilder { std::vector IncompleteArgs; til::BasicBlock *CurrentBB = nullptr; BlockInfo *CurrentBlockInfo = nullptr; + + // Recursion guard. + llvm::DenseSet VarsBeingTranslated; + // Context-dependent lookup of currently valid definitions of local variables. + std::function LookupLocalVarExpr; }; #ifndef NDEBUG diff --git a/clang/lib/Analysis/ThreadSafety.cpp b/clang/lib/Analysis/ThreadSafety.cpp index 131170df9976e..cee98d58a6112 100644 --- a/clang/lib/Analysis/ThreadSafety.cpp +++ b/clang/lib/Analysis/ThreadSafety.cpp @@ -39,6 +39,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/ImmutableMap.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/ScopeExit.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Allocator.h" @@ -537,6 +538,13 @@ class LocalVariableMap { protected: friend class VarMapBuilder; + // Resolve any definition ID down to its non-reference base ID. + unsigned getCanonicalDefinitionID(unsigned ID) { + while (ID > 0 && VarDefinitions[ID].isReference()) + ID = VarDefinitions[ID].Ref; + return ID; + } + // Get the current context index unsigned getContextIndex() { return SavedContexts.size()-1; } @@ -621,6 +629,7 @@ class VarMapBuilder : public ConstStmtVisitor { void VisitDeclStmt(const DeclStmt *S); void VisitBinaryOperator(const BinaryOperator *BO); + void VisitCallExpr(const CallExpr *CE); }; } // namespace @@ -666,6 +675,56 @@ void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) { } } +// Invalidates local variable definitions if variable escaped. +void VarMapBuilder::VisitCallExpr(const CallExpr *CE) { + const FunctionDecl *FD = CE->getDirectCallee(); + if (!FD) + return; + + // Heuristic for likely-benign functions that pass by mutable reference. This + // is needed to avoid a slew of false positives due to mutable reference + // passing where the captured reference is usually passed on by-value. + if (const IdentifierInfo *II = FD->getIdentifier()) { + // Any kind of std::bind-like functions. + if (II->isStr("bind") || II->isStr("bind_front")) + return; + } + + // Invalidate local variable definitions that are passed by non-const + // reference or non-const pointer. + for (unsigned Idx = 0; Idx < CE->getNumArgs(); ++Idx) { + if (Idx >= FD->getNumParams()) + break; + + const Expr *Arg = CE->getArg(Idx)->IgnoreParenImpCasts(); + const ParmVarDecl *PVD = FD->getParamDecl(Idx); + QualType ParamType = PVD->getType(); + + // Potential reassignment if passed by non-const reference / pointer. + const ValueDecl *VDec = nullptr; + if (ParamType->isReferenceType() && + !ParamType->getPointeeType().isConstQualified()) { + if (const auto *DRE = dyn_cast(Arg)) + VDec = DRE->getDecl(); + } else if (ParamType->isPointerType() && + !ParamType->getPointeeType().isConstQualified()) { + Arg = Arg->IgnoreParenCasts(); + if (const auto *UO = dyn_cast(Arg)) { + if (UO->getOpcode() == UO_AddrOf) { + const Expr *SubE = UO->getSubExpr()->IgnoreParenCasts(); + if (const auto *DRE = dyn_cast(SubE)) + VDec = DRE->getDecl(); + } + } + } + + if (VDec && Ctx.lookup(VDec)) { + Ctx = VMap->clearDefinition(VDec, Ctx); + VMap->saveContext(CE, Ctx); + } + } +} + // Computes the intersection of two contexts. The intersection is the // set of variables which have the same definition in both contexts; // variables with different definitions are discarded. @@ -674,11 +733,16 @@ LocalVariableMap::intersectContexts(Context C1, Context C2) { Context Result = C1; for (const auto &P : C1) { const NamedDecl *Dec = P.first; - const unsigned *i2 = C2.lookup(Dec); - if (!i2) // variable doesn't exist on second path + const unsigned *I2 = C2.lookup(Dec); + if (!I2) { + // The variable doesn't exist on second path. Result = removeDefinition(Dec, Result); - else if (*i2 != P.second) // variable exists, but has different definition + } else if (getCanonicalDefinitionID(P.second) != + getCanonicalDefinitionID(*I2)) { + // If canonical definitions mismatch the underlying definitions are + // different, invalidate. Result = clearDefinition(Dec, Result); + } } return Result; } @@ -698,13 +762,22 @@ LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) { // createReferenceContext. void LocalVariableMap::intersectBackEdge(Context C1, Context C2) { for (const auto &P : C1) { - unsigned i1 = P.second; - VarDefinition *VDef = &VarDefinitions[i1]; + const unsigned I1 = P.second; + VarDefinition *VDef = &VarDefinitions[I1]; assert(VDef->isReference()); - const unsigned *i2 = C2.lookup(P.first); - if (!i2 || (*i2 != i1)) - VDef->Ref = 0; // Mark this variable as undefined + const unsigned *I2 = C2.lookup(P.first); + if (!I2) { + // Variable does not exist at the end of the loop, invalidate. + VDef->Ref = 0; + continue; + } + + // Compare the canonical IDs. This correctly handles chains of references + // and determines if the variable is truly loop-invariant. + if (getCanonicalDefinitionID(VDef->Ref) != getCanonicalDefinitionID(*I2)) { + VDef->Ref = 0; // Mark this variable as undefined + } } } @@ -1196,11 +1269,10 @@ class ThreadSafetyAnalyzer { void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK, Expr *MutexExp, - ProtectedOperationKind POK, til::LiteralPtr *Self, + ProtectedOperationKind POK, til::SExpr *Self, SourceLocation Loc); void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp, - Expr *MutexExp, til::LiteralPtr *Self, - SourceLocation Loc); + Expr *MutexExp, til::SExpr *Self, SourceLocation Loc); void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK, ProtectedOperationKind POK); @@ -1596,6 +1668,16 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()]; const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext; + // Temporarily set the lookup context for SExprBuilder. + SxBuilder.setLookupLocalVarExpr([&](const NamedDecl *D) -> const Expr * { + if (!Handler.issueBetaWarnings()) + return nullptr; + auto Ctx = LVarCtx; + return LocalVarMap.lookupExpr(D, Ctx); + }); + auto Cleanup = llvm::make_scope_exit( + [this] { SxBuilder.setLookupLocalVarExpr(nullptr); }); + const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate); if (!Exp) return; @@ -1652,7 +1734,7 @@ class BuildLockset : public ConstStmtVisitor { } void handleCall(const Expr *Exp, const NamedDecl *D, - til::LiteralPtr *Self = nullptr, + til::SExpr *Self = nullptr, SourceLocation Loc = SourceLocation()); void examineArguments(const FunctionDecl *FD, CallExpr::const_arg_iterator ArgBegin, @@ -1664,7 +1746,17 @@ class BuildLockset : public ConstStmtVisitor { const FactSet &FunctionExitFSet) : ConstStmtVisitor(), Analyzer(Anlzr), FSet(Info.EntrySet), FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext), - CtxIndex(Info.EntryIndex) {} + CtxIndex(Info.EntryIndex) { + Analyzer->SxBuilder.setLookupLocalVarExpr( + [this](const NamedDecl *D) -> const Expr * { + if (!Analyzer->Handler.issueBetaWarnings()) + return nullptr; + auto Ctx = LVarCtx; + return Analyzer->LocalVarMap.lookupExpr(D, Ctx); + }); + } + + ~BuildLockset() { Analyzer->SxBuilder.setLookupLocalVarExpr(nullptr); } void VisitUnaryOperator(const UnaryOperator *UO); void VisitBinaryOperator(const BinaryOperator *BO); @@ -1682,7 +1774,7 @@ class BuildLockset : public ConstStmtVisitor { /// of at least the passed in AccessKind. void ThreadSafetyAnalyzer::warnIfMutexNotHeld( const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK, - Expr *MutexExp, ProtectedOperationKind POK, til::LiteralPtr *Self, + Expr *MutexExp, ProtectedOperationKind POK, til::SExpr *Self, SourceLocation Loc) { LockKind LK = getLockKindFromAccessKind(AK); CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self); @@ -1741,8 +1833,7 @@ void ThreadSafetyAnalyzer::warnIfMutexNotHeld( /// Warn if the LSet contains the given lock. void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp, - Expr *MutexExp, - til::LiteralPtr *Self, + Expr *MutexExp, til::SExpr *Self, SourceLocation Loc) { CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self); if (Cp.isInvalid()) { @@ -1910,7 +2001,7 @@ void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp, /// of an implicitly called cleanup function. /// \param Loc If \p Exp = nullptr, the location. void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D, - til::LiteralPtr *Self, SourceLocation Loc) { + til::SExpr *Self, SourceLocation Loc) { CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd; CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove; CapExprSet ScopedReqsAndExcludes; @@ -1922,7 +2013,7 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D, const auto *TagT = Exp->getType()->getAs(); if (D->hasAttrs() && TagT && Exp->isPRValue()) { til::LiteralPtr *Placeholder = - Analyzer->SxBuilder.createVariable(nullptr); + Analyzer->SxBuilder.createThisPlaceholder(); [[maybe_unused]] auto inserted = Analyzer->ConstructedObjects.insert({Exp, Placeholder}); assert(inserted.second && "Are we visiting the same expression again?"); @@ -2216,6 +2307,9 @@ void BuildLockset::examineArguments(const FunctionDecl *FD, } void BuildLockset::VisitCallExpr(const CallExpr *Exp) { + // adjust the context + LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, Exp, LVarCtx); + if (const auto *CE = dyn_cast(Exp)) { const auto *ME = dyn_cast(CE->getCallee()); // ME can be null when calling a method pointer @@ -2603,7 +2697,8 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { } if (UnderlyingLocks.empty()) continue; - CapabilityExpr Cp(SxBuilder.createVariable(Param), StringRef(), + CapabilityExpr Cp(SxBuilder.translateVariable(Param, nullptr), + StringRef(), /*Neg=*/false, /*Reentrant=*/false); auto *ScopedEntry = FactMan.createFact( Cp, Param->getLocation(), FactEntry::Declared, @@ -2721,17 +2816,19 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { if (!DD->hasAttrs()) break; - LocksetBuilder.handleCall(nullptr, DD, - SxBuilder.createVariable(AD.getVarDecl()), - AD.getTriggerStmt()->getEndLoc()); + LocksetBuilder.handleCall( + nullptr, DD, + SxBuilder.translateVariable(AD.getVarDecl(), nullptr), + AD.getTriggerStmt()->getEndLoc()); break; } case CFGElement::CleanupFunction: { const CFGCleanupFunction &CF = BI.castAs(); - LocksetBuilder.handleCall(/*Exp=*/nullptr, CF.getFunctionDecl(), - SxBuilder.createVariable(CF.getVarDecl()), - CF.getVarDecl()->getLocation()); + LocksetBuilder.handleCall( + /*Exp=*/nullptr, CF.getFunctionDecl(), + SxBuilder.translateVariable(CF.getVarDecl(), nullptr), + CF.getVarDecl()->getLocation()); break; } diff --git a/clang/lib/Analysis/ThreadSafetyCommon.cpp b/clang/lib/Analysis/ThreadSafetyCommon.cpp index 68c27eeb7e8e6..25ad673b58db6 100644 --- a/clang/lib/Analysis/ThreadSafetyCommon.cpp +++ b/clang/lib/Analysis/ThreadSafetyCommon.cpp @@ -26,6 +26,7 @@ #include "clang/Basic/LLVM.h" #include "clang/Basic/OperatorKinds.h" #include "clang/Basic/Specifiers.h" +#include "llvm/ADT/ScopeExit.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" #include @@ -239,7 +240,36 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp, return CapabilityExpr(E, AttrExp->getType(), Neg); } -til::LiteralPtr *SExprBuilder::createVariable(const VarDecl *VD) { +til::SExpr *SExprBuilder::translateVariable(const VarDecl *VD, + CallingContext *Ctx) { + assert(VD); + + // General recursion guard for x = f(x). If we are already in the process of + // defining VD, use its pre-assignment value to break the cycle. + if (VarsBeingTranslated.contains(VD->getCanonicalDecl())) + return new (Arena) til::LiteralPtr(VD); + VarsBeingTranslated.insert(VD->getCanonicalDecl()); + auto Cleanup = llvm::make_scope_exit( + [&] { VarsBeingTranslated.erase(VD->getCanonicalDecl()); }); + + QualType Ty = VD->getType(); + if (!VD->isStaticLocal() && Ty->isPointerType()) { + // Substitute local variable aliases with a canonical definition. + if (LookupLocalVarExpr) { + // Attempt to resolve an alias through the more complex local variable map + // lookup. This will fail with complex control-flow graphs (where we + // revert to no alias resolution to retain stable variable names). + if (const Expr *E = LookupLocalVarExpr(VD)) { + til::SExpr *Result = translate(E, Ctx); + // Unsupported expression (such as heap allocations) will be undefined; + // rather than failing here, we simply revert to the pointer being the + // canonical variable. + if (Result && !isa(Result)) + return Result; + } + } + } + return new (Arena) til::LiteralPtr(VD); } @@ -311,6 +341,8 @@ til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) { case Stmt::DeclStmtClass: return translateDeclStmt(cast(S), Ctx); + case Stmt::StmtExprClass: + return translateStmtExpr(cast(S), Ctx); default: break; } @@ -351,6 +383,9 @@ til::SExpr *SExprBuilder::translateDeclRefExpr(const DeclRefExpr *DRE, : cast(D)->getCanonicalDecl()->getParamDecl(I); } + if (const auto *VarD = dyn_cast(VD)) + return translateVariable(VarD, Ctx); + // For non-local variables, treat it as a reference to a named object. return new (Arena) til::LiteralPtr(VD); } @@ -689,6 +724,15 @@ SExprBuilder::translateDeclStmt(const DeclStmt *S, CallingContext *Ctx) { return nullptr; } +til::SExpr *SExprBuilder::translateStmtExpr(const StmtExpr *SE, + CallingContext *Ctx) { + // The value of a statement expression is the value of the last statement, + // which must be an expression. + const CompoundStmt *CS = SE->getSubStmt(); + return CS->body_empty() ? new (Arena) til::Undefined(SE) + : translate(CS->body_back(), Ctx); +} + // If (E) is non-trivial, then add it to the current basic block, and // update the statement map so that S refers to E. Returns a new variable // that refers to E. diff --git a/clang/test/Sema/warn-thread-safety-analysis.c b/clang/test/Sema/warn-thread-safety-analysis.c index b43f97af9162e..549cb1231baa6 100644 --- a/clang/test/Sema/warn-thread-safety-analysis.c +++ b/clang/test/Sema/warn-thread-safety-analysis.c @@ -184,9 +184,13 @@ int main(void) { /// Cleanup functions { struct Mutex* const __attribute__((cleanup(unlock_scope))) scope = &mu1; - mutex_exclusive_lock(scope); // Note that we have to lock through scope, because no alias analysis! + mutex_exclusive_lock(scope); // Lock through scope works. // Cleanup happens automatically -> no warning. } + { + struct Mutex* const __attribute__((unused, cleanup(unlock_scope))) scope = &mu1; + mutex_exclusive_lock(&mu1); // With basic alias analysis lock through mu1 also works. + } foo_.a_value = 0; // expected-warning {{writing variable 'a_value' requires holding mutex 'mu_' exclusively}} *foo_.a_ptr = 1; // expected-warning {{writing the value pointed to by 'a_ptr' requires holding mutex 'bar.other_mu' exclusively}} diff --git a/clang/test/SemaCXX/warn-thread-safety-analysis.cpp b/clang/test/SemaCXX/warn-thread-safety-analysis.cpp index d82e2484ace1e..ef662b78fb6f1 100644 --- a/clang/test/SemaCXX/warn-thread-safety-analysis.cpp +++ b/clang/test/SemaCXX/warn-thread-safety-analysis.cpp @@ -1556,9 +1556,9 @@ void main() { Child *c; Base *b = c; - b->func1(); // expected-warning {{calling function 'func1' requires holding mutex 'b->mu_' exclusively}} + b->func1(); // expected-warning {{calling function 'func1' requires holding mutex 'c->mu_' exclusively}} b->mu_.Lock(); - b->func2(); // expected-warning {{cannot call function 'func2' while mutex 'b->mu_' is held}} + b->func2(); // expected-warning {{cannot call function 'func2' while mutex 'c->mu_' is held}} b->mu_.Unlock(); c->func1(); // expected-warning {{calling function 'func1' requires holding mutex 'c->mu_' exclusively}} @@ -6875,6 +6875,34 @@ class PointerGuard { }; } // namespace Derived_Smart_Pointer +// Test for capabilities that are heap-allocated and stored in static variables. +namespace FunctionStaticVariable { +struct Data { + Mutex mu; + int x GUARDED_BY(mu); +}; + +void testStaticVariable() { +} + +void testHeapAllocation() { + static Data *d = new Data; + d->mu.Lock(); + d->x = 5; + d->mu.Unlock(); +} + +void testHeapAllocationBug() { + static auto *d = new Data; + d->x = 10; // expected-warning{{writing variable 'x' requires holding mutex 'd->mu' exclusively}} +} + +void testHeapAllocationScopedLock() { + static Mutex *mu = new Mutex; + MutexLock lock(mu); +} +} // namespace FunctionStaticVariable + namespace Reentrancy { class LOCKABLE REENTRANT_CAPABILITY ReentrantMutex { @@ -7238,3 +7266,341 @@ class TestNegativeWithReentrantMutex { }; } // namespace Reentrancy + +// Tests for tracking aliases of capabilities. +namespace CapabilityAliases { +struct Foo { + Mutex mu; + int data GUARDED_BY(mu); +}; + +Foo *returnsFoo(); +Foo *returnsFoo(Foo *foo); +void locksRequired(Foo *foo) EXCLUSIVE_LOCKS_REQUIRED(foo->mu); +void escapeAlias(int a, Foo *&ptr); +void escapeAlias(int b, Foo **ptr); +void passByConstRef(Foo* const& ptr); + +void testBasicPointerAlias(Foo *f) { + Foo *ptr = f; + ptr->mu.Lock(); // lock through alias + f->data = 42; // access through original + ptr->mu.Unlock(); // unlock through alias +} + +void testBasicPointerAliasNoInit(Foo *f) { + Foo *ptr; + + ptr = nullptr; + ptr = f; + ptr->mu.Lock(); + f->data = 42; + ptr->mu.Unlock(); + ptr = nullptr; +} + +void testBasicPointerAliasLoop() { + for (;;) { + Foo *f = returnsFoo(); + Foo *ptr = f; + if (!ptr) + break; + ptr->mu.Lock(); + f->data = 42; + ptr->mu.Unlock(); + } +} + +void testPointerAliasNoEscape1(Foo *f) { + Foo *ptr = f; + testBasicPointerAlias(ptr); // pass alias by value + + ptr->mu.Lock(); + f->data = 42; + ptr->mu.Unlock(); +} + +void testPointerAliasNoEscape2(Foo *f) { + Foo *ptr = f; + passByConstRef(ptr); // pass alias by const ref + + ptr->mu.Lock(); + f->data = 42; + ptr->mu.Unlock(); +} + +void testPointerAliasNoEscape3() { + Foo *ptr = returnsFoo(); + ptr->mu.Lock(); + locksRequired(ptr); + ptr->mu.Unlock(); +} + +void testPointerAliasEscape1(Foo *f) { + Foo *ptr = f; + escapeAlias(0, ptr); + + ptr->mu.Lock(); + f->data = 42; // expected-warning{{writing variable 'data' requires holding mutex 'f->mu' exclusively}} \ + // expected-note{{found near match 'ptr->mu'}} + ptr->mu.Unlock(); +} + +void testPointerAliasEscape2(Foo *f) { + Foo *ptr = f; + escapeAlias(0, &ptr); + + ptr->mu.Lock(); + f->data = 42; // expected-warning{{writing variable 'data' requires holding mutex 'f->mu' exclusively}} \ + // expected-note{{found near match 'ptr->mu'}} + ptr->mu.Unlock(); +} + +void testPointerAliasEscape3(Foo *f) { + Foo *ptr; + + ptr = f; + escapeAlias(0, &ptr); + + ptr->mu.Lock(); + f->data = 42; // expected-warning{{writing variable 'data' requires holding mutex 'f->mu' exclusively}} \ + // expected-note{{found near match 'ptr->mu'}} + ptr->mu.Unlock(); +} + +void testPointerAliasEscapeAndReset(Foo *f) { + Foo *ptr; + + ptr = f; + escapeAlias(0, &ptr); + ptr = f; + + ptr->mu.Lock(); + f->data = 42; + ptr->mu.Unlock(); +} + +void testPointerAliasTryLock1() { + Foo *ptr = returnsFoo(); + if (ptr->mu.TryLock()) { + locksRequired(ptr); + ptr->mu.Unlock(); + } +} + +void testPointerAliasTryLock2() { + Foo *ptr; + ptr = returnsFoo(); + Foo *ptr2 = ptr; + if (ptr->mu.TryLock()) { + locksRequired(ptr); + ptr2->mu.Unlock(); + } +} + +// FIXME: This test demonstrates a dubious pattern that the analysis correctly +// flags as unsafe, though the user might perceive it as a false positive. The +// pattern combines a TryLock() failure path with a conditional reassignment of +// the pointer being locked: +// +// 1. The conditional reassignment `ptr = returnsFoo(ptr);` forces `ptr` to +// become a phi node in the CFG at the subsequent merge point. The alias +// analysis correctly tracks that `ptr` could refer to one of two distinct +// objects. +// +// 2. The lock acquired on the `!TryLock()` path should be (conceptually) +// `phi(P1, P2)->mu`, while the lock on the successful path is on the +// original `P1->mu`. When the paths merge the analysis currently discards +// the alias as it cannot prove a single alias on that path. +// +// While this pattern is stylistically fragile and difficult to reason about, a +// robust solution would require a more advanced symbolic representation of +// capabilities within the analyzer. For now, we warn on such ambiguity. +void testPointerAliasTryLockDubious(int x) { + Foo *ptr = returnsFoo(); + if (!ptr->mu.TryLock()) { // expected-note{{mutex acquired here}} + if (x) + ptr = returnsFoo(ptr); // <-- this breaks the pattern + ptr->mu.Lock(); // expected-note{{mutex acquired here}} + } + ptr->data = 42; // expected-warning{{writing variable 'data' requires holding mutex 'ptr->mu' exclusively}} \ + // expected-warning{{mutex 'ptr->mu' is not held on every path through here}} \ + // expected-warning{{mutex 'returnsFoo().mu' is not held on every path through here}} + ptr->mu.Unlock(); // expected-warning{{releasing mutex 'ptr->mu' that was not held}} +} + +void testReassignment() { + Foo f1, f2; + Foo *ptr = &f1; + ptr->mu.Lock(); + f1.data = 42; + ptr->mu.Unlock(); + + ptr = &f2; + ptr->mu.Lock(); + f2.data = 42; + f1.data = 42; // expected-warning{{writing variable 'data' requires holding mutex 'f1.mu'}} \ + // expected-note{{found near match 'f2.mu'}} + ptr->mu.Unlock(); +} + +// Nested field access through pointer +struct Container { + Foo foo; +}; + +void testNestedAccess(Container *c) { + Foo *ptr = &c->foo; // pointer to nested field + ptr->mu.Lock(); + c->foo.data = 42; + ptr->mu.Unlock(); +} + +void testNestedAcquire(Container *c) EXCLUSIVE_LOCK_FUNCTION(&c->foo.mu) { + Foo *buf = &c->foo; + buf->mu.Lock(); +} + +struct ContainerOfPtr { + Foo *foo_ptr; +}; + +void testIndirectAccess(ContainerOfPtr *fc) { + Foo *ptr = fc->foo_ptr; // get pointer + ptr->mu.Lock(); + fc->foo_ptr->data = 42; // access via original + ptr->mu.Unlock(); +} + +void testControlFlowDoWhile(Foo *f, int x) { + Foo *ptr = f; + + f->mu.Lock(); + if (x) { + // complex merge + do { } while (x--); + } + ptr->data = 42; + ptr->mu.Unlock(); +} + +// FIXME: No alias tracking through complex control flow. +void testComplexControlFlow(Foo *f1, Foo *f2, bool cond) { + Foo *ptr; + if (cond) { + ptr = f1; + } else { + ptr = f2; + } + ptr->mu.Lock(); + if (cond) { + f1->data = 42; // expected-warning{{writing variable 'data' requires holding mutex 'f1->mu' exclusively}} \ + // expected-note{{found near match 'ptr->mu'}} + } else { + f2->data = 42; // expected-warning{{writing variable 'data' requires holding mutex 'f2->mu' exclusively}} \ + // expected-note{{found near match 'ptr->mu'}} + } + ptr->mu.Unlock(); +} + +void testLockFunction(Foo *f) EXCLUSIVE_LOCK_FUNCTION(&f->mu) { + Mutex *mu = &f->mu; + mu->Lock(); +} + +void testUnlockFunction(Foo *f) UNLOCK_FUNCTION(&f->mu) { + Mutex *mu = &f->mu; + mu->Unlock(); +} + +// This is an idiom to deal with "pointer to returned object has a lock held, +// but you must unlock it later" where the statement expression would be hidden +// behind a macro. +void lockWithinStatementExpr() { + Foo *f = ({ auto x = returnsFoo(); x->mu.Lock(); x; }); + f->data = 42; + f->mu.Unlock(); +} + +// Semantically UB, but let's not crash the compiler with this (should be +// handled by -Wuninitialized). +void testSelfInit() { + Mutex *mu = mu; // don't do this at home + mu->Lock(); + mu->Unlock(); +} + +void testSelfAssign() { + Foo *f = returnsFoo(); + f = f; + f->mu.Lock(); + f->data = 42; + f->mu.Unlock(); +} + +void testRecursiveAssign() { + Foo *f = returnsFoo(); + f = returnsFoo(f); + f->mu.Lock(); + f->data = 42; + f->mu.Unlock(); +} + +void testNew(Mutex *&out, int &x) { + Mutex *mu = new Mutex; + __atomic_store_n(&out, mu, __ATOMIC_RELEASE); + mu->Lock(); + x = 42; // ... perhaps guarded by mu + mu->Unlock(); +} + +void testNestedLoopInvariant(Container *c, int n) { + Foo *ptr = &c->foo; + ptr->mu.Lock(); + + for (int i = 0; i < n; ++i) { + for (int j = 0; j < n; ++j) { + } + } + + c->foo.data = 42; // ok: alias still valid + ptr->mu.Unlock(); +} + +void testLoopWithBreak(Foo *f, bool cond) { + Foo *ptr = f; + ptr->mu.Lock(); + for (int i = 0; i < 10; ++i) { + if (cond) { + break; // merge point is after the loop + } + } + f->data = 42; // ok + ptr->mu.Unlock(); +} + +void testLoopWithContinue(Foo *f, bool cond) { + Foo *ptr = f; + ptr->mu.Lock(); + for (int i = 0; i < 10; ++i) { + if (cond) { + continue; // tests merge at the top of loop. + } + } + f->data = 42; // ok + ptr->mu.Unlock(); +} + +void testLoopConditionalReassignment(Foo *f1, Foo *f2, bool cond) { + Foo *ptr = f1; + ptr->mu.Lock(); // expected-note{{mutex acquired here}} + + for (int i = 0; i < 10; ++i) { + if (cond) { + ptr = f2; // alias is reassigned on some path inside the loop. + } + } + f1->data = 42; + ptr->mu.Unlock(); // expected-warning{{releasing mutex 'ptr->mu' that was not held}} +} // expected-warning{{mutex 'f1->mu' is still held at the end of function}} +} // namespace CapabilityAliases