-
Notifications
You must be signed in to change notification settings - Fork 14.3k
RSan #145540
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: release/20.x
Are you sure you want to change the base?
RSan #145540
Conversation
Thank you for submitting a Pull Request (PR) to the LLVM Project! This PR will be automatically labeled and the relevant teams will be notified. If you wish to, you can add reviewers by using the "Reviewers" section on this page. If this is not working for you, it is probably because you do not have write permissions for the repository. In which case you can instead tag reviewers by name in a comment by using If you have received no comments on your PR for a week, you can request a review by "ping"ing the PR by adding a comment “Ping”. The common courtesy "ping" rate is once a week. Please remember that you are asking for valuable time from other developers. If you have further questions, they may be answered by the LLVM GitHub User Guide. You can also ask questions in a comment on this PR, on the LLVM Discord or on the forums. |
Without value support
@llvm/pr-subscribers-compiler-rt-sanitizer Author: None (rymrg) ChangesInitial RSan without value extension https://discourse.llvm.org/t/rfc-robustess-sanitizer/86831/ Paper: https://doi.org/10.1145/3729277 Proper race detection depends on #142579 Patch is 85.83 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/145540.diff 21 Files Affected:
diff --git a/compiler-rt/lib/tsan/rtl/CMakeLists.txt b/compiler-rt/lib/tsan/rtl/CMakeLists.txt
index d7d84706bfd58..eb5f4a84fa359 100644
--- a/compiler-rt/lib/tsan/rtl/CMakeLists.txt
+++ b/compiler-rt/lib/tsan/rtl/CMakeLists.txt
@@ -49,6 +49,9 @@ set(TSAN_SOURCES
tsan_symbolize.cpp
tsan_sync.cpp
tsan_vector_clock.cpp
+ rsan.cpp
+ rsan_report.cpp
+ rsan_stacktrace.cpp
)
set(TSAN_CXX_SOURCES
@@ -59,6 +62,10 @@ set(TSAN_PREINIT_SOURCES
tsan_preinit.cpp
)
+set_source_files_properties(tsan_interface_atomic.cpp PROPERTIES COMPILE_FLAGS -std=c++20)
+set_source_files_properties(tsan_mman.cpp PROPERTIES COMPILE_FLAGS -std=c++20)
+set_source_files_properties(tsan_rtl_mutex.cpp PROPERTIES COMPILE_FLAGS -std=c++20)
+
if(APPLE)
list(APPEND TSAN_SOURCES
tsan_interceptors_mac.cpp
diff --git a/compiler-rt/lib/tsan/rtl/rsan.cpp b/compiler-rt/lib/tsan/rtl/rsan.cpp
new file mode 100644
index 0000000000000..fb696eb277b98
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan.cpp
@@ -0,0 +1,8 @@
+#include "rsan_vectorclock.hpp"
+#include "rsan_robustnessmodel.hpp"
+#include "rsan_instrument.hpp"
+#include "rsan_map.hpp"
+#include "rsan_arena.hpp"
+
+namespace Robustness{
+} // namespace Robustness
diff --git a/compiler-rt/lib/tsan/rtl/rsan_action.hpp b/compiler-rt/lib/tsan/rtl/rsan_action.hpp
new file mode 100644
index 0000000000000..a066b4e6ea8fc
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_action.hpp
@@ -0,0 +1,97 @@
+#pragma once
+#include "rsan_defs.hpp"
+namespace Robustness::Action{
+ struct StoreAction{
+ ThreadId tid;
+ Address addr;
+ int size;
+ };
+ struct LoadAction{
+ ThreadId tid;
+ Address addr;
+ int size;
+ };
+ struct AtomicVerifyAction{
+ ThreadId tid;
+ Address addr;
+ morder mo;
+ int size;
+ };
+ struct AtomicVerifyStoreAction{
+ ThreadId tid;
+ Address addr;
+ morder mo;
+ int size;
+ };
+ struct AtomicLoadAction{
+ ThreadId tid;
+ Address addr;
+ morder mo;
+ int size;
+ bool rmw;
+ DebugInfo dbg;
+ };
+ struct AtomicStoreAction{
+ ThreadId tid;
+ Address addr;
+ morder mo;
+ int size;
+ uint64_t oldValue;
+ uint64_t newValue;
+ DebugInfo dbg;
+ };
+ struct AtomicRMWAction{
+ ThreadId tid;
+ Address addr;
+ morder mo;
+ int size;
+ uint64_t oldValue;
+ uint64_t newValue;
+ DebugInfo dbg;
+ };
+ struct AtomicCasAction{
+ ThreadId tid;
+ Address addr;
+ morder mo;
+ int size;
+ uint64_t oldValue;
+ uint64_t newValue;
+ bool success;
+ DebugInfo dbg;
+ };
+ struct FenceAction{
+ ThreadId tid;
+ morder mo;
+ };
+ struct TrackAction{
+ ThreadId tid;
+ Address addr;
+ uint64_t value;
+ };
+ struct WaitAction{
+ ThreadId tid;
+ Address addr;
+ uint64_t value;
+ DebugInfo dbg;
+ };
+ struct BcasAction{
+ ThreadId tid;
+ Address addr;
+ uint64_t value;
+ DebugInfo dbg;
+ };
+ struct ThreadCreate{
+ ThreadId creator, createe;
+ };
+ struct ThreadJoin{
+ ThreadId absorber, absorbee;
+ };
+ struct Free{
+ ThreadId tid;
+ Address addr;
+ uptr size;
+ DebugInfo dbg;
+ };
+}
+
+
diff --git a/compiler-rt/lib/tsan/rtl/rsan_arena.hpp b/compiler-rt/lib/tsan/rtl/rsan_arena.hpp
new file mode 100644
index 0000000000000..95fe3c5229942
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_arena.hpp
@@ -0,0 +1,45 @@
+#pragma once
+#include "rsan_vector.h"
+#include "rsan_defs.hpp"
+#include "sanitizer_common/sanitizer_placement_new.h"
+
+namespace Robustness {
+ template< class T >
+ class Arena {
+
+ //const FACTOR = 2;
+ static const u8 BASE = 8;
+
+ u64 cv = 0;
+ u64 ci = 0;
+
+ Vector<Vector<T>> vs;
+ Arena(const Arena&) = delete;
+
+
+ public:
+ Arena() = default;
+ ~Arena() {
+ for (auto& v : vs)
+ v.clear();
+ }
+
+ T* allocate(){
+ if (cv == vs.size()){
+ vs.push_back();
+ vs[cv].resize(BASE << (cv));
+ ci = 0;
+ }
+ DCHECK_GT(vs.size(), cv);
+ DCHECK_GT(vs[cv].size(), ci);
+ auto ret = &vs[cv][ci++];
+ DCHECK_GT(ret, 0);
+ if (ci >= vs[cv].size()){
+ ++cv;
+ }
+
+ new (ret) T();
+ return ret;
+ }
+ };
+} // namespace Robustness
diff --git a/compiler-rt/lib/tsan/rtl/rsan_defs.hpp b/compiler-rt/lib/tsan/rtl/rsan_defs.hpp
new file mode 100644
index 0000000000000..6b11897ce9a76
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_defs.hpp
@@ -0,0 +1,96 @@
+#pragma once
+#include "tsan_defs.h"
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+
+//class __tsan::ThreadState;
+
+namespace Robustness{
+ using __tsan::s8;
+ using __tsan::u8;
+ using __tsan::s16;
+ using __tsan::u16;
+ using __tsan::s32;
+ using __tsan::u32;
+ using __tsan::s64;
+ using __tsan::u64;
+ using __tsan::uptr;
+ using __tsan::Epoch;
+ using __tsan::EpochInc;
+ using __tsan::EpochOverflow;
+ using __tsan::kEpochZero;
+ using __tsan::kEpochOver;
+ using __tsan::kEpochLast;
+ typedef __tsan::Epoch timestamp_t;
+ typedef s64 ssize_t;
+ typedef u64 uint64_t;
+ typedef s64 int64_t;
+ typedef __PTRDIFF_TYPE__ ptrdiff_t;
+ typedef __SIZE_TYPE__ size_t;
+
+ typedef u8 uint8_t;;
+
+ typedef u64 Address;
+ typedef u64 LocationId;
+
+ typedef u32 ThreadId;
+
+ using __tsan::InternalScopedString;
+
+ using __tsan::flags;
+
+ using __sanitizer::IsAligned;
+
+ using __sanitizer::LowLevelAllocator;
+ using __sanitizer::InternalAlloc;
+ using __sanitizer::InternalFree;
+ using __sanitizer::internal_memcpy;
+ using __sanitizer::internal_memmove;
+ using __sanitizer::internal_memset;
+ using __sanitizer::RoundUpTo;
+ using __sanitizer::RoundUpToPowerOfTwo;
+ using __sanitizer::GetPageSizeCached;
+ using __sanitizer::MostSignificantSetBitIndex;
+ using __sanitizer::MmapOrDie;
+ using __sanitizer::UnmapOrDie;
+ using __sanitizer::Max;
+ using __sanitizer::Swap;
+ using __sanitizer::forward;
+ using __sanitizer::move;
+
+ using __sanitizer::Printf;
+ using __sanitizer::Report;
+
+ using __sanitizer::Lock;
+ using __sanitizer::Mutex;
+
+ template <typename T1, typename T2>
+ struct Pair{
+ T1 first;
+ T2 second;
+ };
+ template <typename T1, typename T2>
+ auto pair(T1 fst, T2 snd){
+ return Pair<T1, T2>{fst, snd};
+ }
+
+ using __tsan::max;
+ using __tsan::min;
+
+ enum class ViolationType{
+ read, write,
+ };
+
+ struct DebugInfo {
+ __tsan::ThreadState* thr = nullptr;
+ uptr pc = 0xDEADBEEF;
+ };
+
+ template<class>
+ inline constexpr bool always_false_v = false;
+
+ inline bool isRobustness() {
+ return __tsan::flags()->enable_robustness;
+ }
+} // namespace Robustness
diff --git a/compiler-rt/lib/tsan/rtl/rsan_dense_map.h b/compiler-rt/lib/tsan/rtl/rsan_dense_map.h
new file mode 100644
index 0000000000000..57775613ed00b
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_dense_map.h
@@ -0,0 +1,714 @@
+//===- sanitizer_dense_map.h - Dense probed hash table ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This is fork of llvm/ADT/DenseMap.h class with the following changes:
+// * Use mmap to allocate.
+// * No iterators.
+// * Does not shrink.
+//
+//===----------------------------------------------------------------------===//
+
+#pragma once
+
+#include "rsan_defs.hpp"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_dense_map_info.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_type_traits.h"
+
+namespace Robustness {
+ namespace detail{
+ using __sanitizer::detail::DenseMapPair;
+ using __sanitizer::detail::combineHashValue;
+ } // namespace detail
+ using __sanitizer::DenseMapInfo;
+
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+ typename BucketT>
+class DenseMapBase {
+ public:
+ using size_type = unsigned;
+ using key_type = KeyT;
+ using mapped_type = ValueT;
+ using value_type = BucketT;
+
+ WARN_UNUSED_RESULT bool empty() const { return getNumEntries() == 0; }
+ unsigned size() const { return getNumEntries(); }
+
+ /// Grow the densemap so that it can contain at least \p NumEntries items
+ /// before resizing again.
+ void reserve(size_type NumEntries) {
+ auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
+ if (NumBuckets > getNumBuckets())
+ grow(NumBuckets);
+ }
+
+ void clear() {
+ if (getNumEntries() == 0 && getNumTombstones() == 0)
+ return;
+
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ if (__sanitizer::is_trivially_destructible<ValueT>::value) {
+ // Use a simpler loop when values don't need destruction.
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
+ P->getFirst() = EmptyKey;
+ } else {
+ unsigned NumEntries = getNumEntries();
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
+ if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
+ P->getSecond().~ValueT();
+ --NumEntries;
+ }
+ P->getFirst() = EmptyKey;
+ }
+ }
+ CHECK_EQ(NumEntries, 0);
+ }
+ setNumEntries(0);
+ setNumTombstones(0);
+ }
+
+ /// Return 1 if the specified key is in the map, 0 otherwise.
+ size_type count(const KeyT &Key) const {
+ const BucketT *TheBucket;
+ return LookupBucketFor(Key, TheBucket) ? 1 : 0;
+ }
+
+ value_type *find(const KeyT &Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket;
+ return nullptr;
+ }
+ const value_type *find(const KeyT &Key) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket;
+ return nullptr;
+ }
+ bool contains(const KeyT &Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return true;
+ return false;
+ }
+
+ /// Alternate version of find() which allows a different, and possibly
+ /// less expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+ /// type used.
+ template <class LookupKeyT>
+ value_type *find_as(const LookupKeyT &Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket;
+ return nullptr;
+ }
+ template <class LookupKeyT>
+ const value_type *find_as(const LookupKeyT &Key) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket;
+ return nullptr;
+ }
+
+ /// lookup - Return the entry for the specified key, or a default
+ /// constructed value if no such entry exists.
+ ValueT lookup(const KeyT &Key) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket->getSecond();
+ return ValueT();
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ detail::DenseMapPair<value_type *, bool> insert(const value_type &KV) {
+ return try_emplace(KV.first, KV.second);
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ detail::DenseMapPair<value_type *, bool> insert(value_type &&KV) {
+ return try_emplace(__sanitizer::move(KV.first),
+ __sanitizer::move(KV.second));
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // The value is constructed in-place if the key is not in the map, otherwise
+ // it is not moved.
+ template <typename... Ts>
+ detail::DenseMapPair<value_type *, bool> try_emplace(KeyT &&Key,
+ Ts &&...Args) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return {TheBucket, false}; // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket = InsertIntoBucket(TheBucket, __sanitizer::move(Key),
+ __sanitizer::forward<Ts>(Args)...);
+ return {TheBucket, true};
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // The value is constructed in-place if the key is not in the map, otherwise
+ // it is not moved.
+ template <typename... Ts>
+ detail::DenseMapPair<value_type *, bool> try_emplace(const KeyT &Key,
+ Ts &&...Args) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return {TheBucket, false}; // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket =
+ InsertIntoBucket(TheBucket, Key, __sanitizer::forward<Ts>(Args)...);
+ return {TheBucket, true};
+ }
+
+ /// Alternate version of insert() which allows a different, and possibly
+ /// less expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+ /// type used.
+ template <typename LookupKeyT>
+ detail::DenseMapPair<value_type *, bool> insert_as(value_type &&KV,
+ const LookupKeyT &Val) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return {TheBucket, false}; // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket =
+ InsertIntoBucketWithLookup(TheBucket, __sanitizer::move(KV.first),
+ __sanitizer::move(KV.second), Val);
+ return {TheBucket, true};
+ }
+
+ bool erase(const KeyT &Val) {
+ BucketT *TheBucket;
+ if (!LookupBucketFor(Val, TheBucket))
+ return false; // not in map.
+
+ TheBucket->getSecond().~ValueT();
+ TheBucket->getFirst() = getTombstoneKey();
+ decrementNumEntries();
+ incrementNumTombstones();
+ return true;
+ }
+
+ void erase(value_type *I) {
+ CHECK_NE(I, nullptr);
+ BucketT *TheBucket = &*I;
+ TheBucket->getSecond().~ValueT();
+ TheBucket->getFirst() = getTombstoneKey();
+ decrementNumEntries();
+ incrementNumTombstones();
+ }
+
+ value_type &FindAndConstruct(const KeyT &Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return *TheBucket;
+
+ return *InsertIntoBucket(TheBucket, Key);
+ }
+
+ ValueT &operator[](const KeyT &Key) { return FindAndConstruct(Key).second; }
+
+ value_type &FindAndConstruct(KeyT &&Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return *TheBucket;
+
+ return *InsertIntoBucket(TheBucket, __sanitizer::move(Key));
+ }
+
+ ValueT &operator[](KeyT &&Key) {
+ return FindAndConstruct(__sanitizer::move(Key)).second;
+ }
+
+ /// Iterate over active entries of the container.
+ ///
+ /// Function can return fast to stop the process.
+ template <class Fn>
+ void forEach(Fn fn) {
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ for (auto *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ const KeyT K = P->getFirst();
+ if (!KeyInfoT::isEqual(K, EmptyKey) &&
+ !KeyInfoT::isEqual(K, TombstoneKey)) {
+ if (!fn(*P))
+ return;
+ }
+ }
+ }
+
+ template <class Fn>
+ void forEach(Fn fn) const {
+ const_cast<DenseMapBase *>(this)->forEach(
+ [&](const value_type &KV) { return fn(KV); });
+ }
+
+ protected:
+ DenseMapBase() = default;
+
+ void destroyAll() {
+ if (getNumBuckets() == 0) // Nothing to do.
+ return;
+
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
+ P->getSecond().~ValueT();
+ P->getFirst().~KeyT();
+ }
+ }
+
+ void initEmpty() {
+ setNumEntries(0);
+ setNumTombstones(0);
+
+ CHECK_EQ((getNumBuckets() & (getNumBuckets() - 1)), 0);
+ const KeyT EmptyKey = getEmptyKey();
+ for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
+ ::new (&B->getFirst()) KeyT(EmptyKey);
+ }
+
+ /// Returns the number of buckets to allocate to ensure that the DenseMap can
+ /// accommodate \p NumEntries without need to grow().
+ unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
+ // Ensure that "NumEntries * 4 < NumBuckets * 3"
+ if (NumEntries == 0)
+ return 0;
+ // +1 is required because of the strict equality.
+ // For example if NumEntries is 48, we need to return 401.
+ return RoundUpToPowerOfTwo((NumEntries * 4 / 3 + 1) + /* NextPowerOf2 */ 1);
+ }
+
+ void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
+ initEmpty();
+
+ // Insert all the old elements.
+ const KeyT EmptyKey = getEmptyKey();
+ const KeyT TombstoneKey = getTombstoneKey();
+ for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
+ if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
+ // Insert the key/value into the new table.
+ BucketT *DestBucket;
+ bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
+ (void)FoundVal; // silence warning.
+ CHECK(!FoundVal);
+ DestBucket->getFirst() = __sanitizer::move(B->getFirst());
+ ::new (&DestBucket->getSecond())
+ ValueT(__sanitizer::move(B->getSecond()));
+ incrementNumEntries();
+
+ // Free the value.
+ B->getSecond().~ValueT();
+ }
+ B->getFirst().~KeyT();
+ }
+ }
+
+ template <typename OtherBaseT>
+ void copyFrom(
+ const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
+ CHECK_NE(&other, this);
+ CHECK_EQ(getNumBuckets(), other.getNumBuckets());
+
+ setNumEntries(other.getNumEntries());
+ setNumTombstones(other.getNumTombstones());
+
+ if (__sanitizer::is_trivially_copyable<KeyT>::value &&
+ __sanitizer::is_trivially_copyable<ValueT>::value)
+ internal_memcpy(reinterpret_cast<void *>(getBuckets()),
+ other.getBuckets(), getNumBuckets() * sizeof(BucketT));
+ else
+ for (uptr i = 0; i < getNumBuckets(); ++i) {
+ ::new (&getBuckets()[i].getFirst())
+ KeyT(other.getBuckets()[i].getFirst());
+ if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
+ !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
+ ::new (&getBuckets()[i].getSecond())
+ ValueT(other.getBuckets()[i].getSecond());
+ }
+ }
+
+ static unsigned getHashValue(const KeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+
+ template <typename LookupKeyT>
+ static unsigned getHashValue(const LookupKeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+
+ static const KeyT getEmptyKey() { return KeyInfoT::getEmptyKey(); }
+
+ static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
+
+ private:
+ unsigned getNumEntries() const {
+ return static_cast<const DerivedT *>(this)->getNumEntries();
+ }
+
+ void setNumEntries(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumEntries(Num);
+ }
+
+ void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
+
+ void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
+
+ unsigned getNumTombstones() const {
+ return static_cast<const DerivedT *>(this)->getNumTombstones();
+ }
+
+ void setNumTombstones(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumTombstones(Num);
+ }
+
+ void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
+
+ void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
+
+ const BucketT *getBuckets() const {
+ return static_cast<const DerivedT *>(this)->getBuckets();
+ }
+
+ BucketT *getBuckets() { return static_cast<DerivedT *>(this)->getBuckets(); }
+
+ unsigned getNumBuckets() const {
+ return static_cast<const DerivedT *>(this)->getNumBuckets();
+ }
+
+ BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
+
+ const BucketT *getBuckets...
[truncated]
|
Initial RSan without value extension
https://discourse.llvm.org/t/rfc-robustess-sanitizer/86831/
Paper: https://doi.org/10.1145/3729277
Preprint version: https://arxiv.org/pdf/2504.15036
Proper race detection depends on #142579