diff --git a/featureflags/libmalloc.plist b/featureflags/libmalloc.plist
index 4efb827..ce41bc1 100644
--- a/featureflags/libmalloc.plist
+++ b/featureflags/libmalloc.plist
@@ -7,12 +7,12 @@
Enabled
- ProbGuard
+ PGuardViaLaunchd
Enabled
-
+
- ProbGuardAllProcesses
+ PGuardAllProcesses
Enabled
diff --git a/libmalloc.xcodeproj/project.pbxproj b/libmalloc.xcodeproj/project.pbxproj
index 7af1be5..31df77d 100644
--- a/libmalloc.xcodeproj/project.pbxproj
+++ b/libmalloc.xcodeproj/project.pbxproj
@@ -329,7 +329,6 @@
4DD5962D23E22A3D00D573D2 /* pguard_internals.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = pguard_internals.c; sourceTree = ""; tabWidth = 2; };
4DF318FF23D796550064A673 /* pguard_malloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pguard_malloc.c; sourceTree = ""; tabWidth = 2; };
4DF3190023D796550064A673 /* pguard_malloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pguard_malloc.h; sourceTree = ""; };
- 4DFE482725BA55C5004A50E8 /* malloc_zone_unregister_test.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = malloc_zone_unregister_test.c; sourceTree = ""; };
875E02E32125B62300A7FE8A /* aligned_alloc_test.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = aligned_alloc_test.c; sourceTree = ""; };
875E02E42125C1D100A7FE8A /* posix_memalign_test.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = posix_memalign_test.c; sourceTree = ""; };
8CB962B01F7E9F610046942E /* asan.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = asan.c; sourceTree = ""; };
@@ -337,7 +336,6 @@
925383D01BD03B4A00F745DB /* Makefile */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.make; path = Makefile; sourceTree = ""; };
925383D11BD03B4A00F745DB /* stress_test.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = stress_test.c; sourceTree = ""; };
925383D31BD03B8F00F745DB /* manpages.lst */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = manpages.lst; sourceTree = ""; };
- 96E1C8ED2653373700B23906 /* magazine_medium_test.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = magazine_medium_test.c; sourceTree = ""; };
B61341DD20114B070038D163 /* ktrace.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = ktrace.framework; path = Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.Internal.sdk/System/Library/PrivateFrameworks/ktrace.framework; sourceTree = DEVELOPER_DIR; };
B629CF29202BA3C2007719B9 /* libmalloc_resolver.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = libmalloc_resolver.xcconfig; sourceTree = ""; };
B629CF42202BB337007719B9 /* libmalloc_alt.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libmalloc_alt.a; sourceTree = BUILT_PRODUCTS_DIR; };
@@ -640,7 +638,6 @@
875E02E32125B62300A7FE8A /* aligned_alloc_test.c */,
875E02E42125C1D100A7FE8A /* posix_memalign_test.c */,
B6A414EA1FBDF01C0038DC53 /* malloc_claimed_address_tests.c */,
- 4DFE482725BA55C5004A50E8 /* malloc_zone_unregister_test.c */,
B6726EC92092473D00E8AF5A /* malloc_heap_check_test.c */,
C9F8C2681D70B521008C4044 /* magazine_small_test.c */,
B64E100A205311DC004C4BA6 /* malloc_size_test.c */,
@@ -660,7 +657,6 @@
B6D59B0E225EA90E009E5896 /* reallocarray.c */,
B68C985C2180BEB5003DAF36 /* region_cookie_test.c */,
925383D11BD03B4A00F745DB /* stress_test.c */,
- 96E1C8ED2653373700B23906 /* magazine_medium_test.c */,
);
path = tests;
sourceTree = "";
diff --git a/private/malloc_implementation.h b/private/malloc_implementation.h
index c116909..278f094 100644
--- a/private/malloc_implementation.h
+++ b/private/malloc_implementation.h
@@ -31,16 +31,15 @@
/********* Libsystem initializers ************/
-struct _malloc_late_init {
+struct _malloc_functions {
unsigned long version;
/* The following functions are included in version 1 of this structure */
void * (*dlopen) (const char *path, int mode);
void * (*dlsym) (void *handle, const char *symbol);
- bool internal_diagnostics; /* os_variant_has_internal_diagnostics() */
};
void __malloc_init(const char *apple[]);
-void __malloc_late_init(const struct _malloc_late_init *);
+void __stack_logging_early_finished(const struct _malloc_functions *);
diff --git a/src/internal.h b/src/internal.h
index 28eb2ef..605621b 100644
--- a/src/internal.h
+++ b/src/internal.h
@@ -67,7 +67,6 @@
#endif
#include
#include
-#include // _pthread_threadid_self_np_direct()
#include // _pthread_threadid_self_np_direct()
#include // TSD keys
#include
@@ -146,18 +145,6 @@ malloc_traced(void)
return malloc_tracing_enabled;
}
-static inline uint32_t
-_malloc_cpu_number(void)
-{
-#if TARGET_OS_SIMULATOR
- size_t n;
- pthread_cpu_number_np(&n);
- return (uint32_t)n;
-#else
- return _os_cpu_number();
-#endif
-}
-
/*
* Copies the malloc library's _malloc_msl_lite_hooks_t structure to a given
* location. Size is passed to allow the structure to grow. Since this is
diff --git a/src/magazine_medium.c b/src/magazine_medium.c
index d0f122e..0e1ea40 100644
--- a/src/magazine_medium.c
+++ b/src/magazine_medium.c
@@ -124,13 +124,13 @@ medium_mag_get_thread_index(void)
{
#if CONFIG_MEDIUM_USES_HYPER_SHIFT
if (os_likely(_os_cpu_number_override == -1)) {
- return _malloc_cpu_number() >> hyper_shift;
+ return _os_cpu_number() >> hyper_shift;
} else {
return _os_cpu_number_override >> hyper_shift;
}
#else // CONFIG_MEDIUM_USES_HYPER_SHIFT
if (os_likely(_os_cpu_number_override == -1)) {
- return _malloc_cpu_number();
+ return _os_cpu_number();
} else {
return _os_cpu_number_override;
}
@@ -866,7 +866,6 @@ medium_free_scan_madvise_free(rack_t *rack, magazine_t *depot_ptr, region_t r)
medium_advisory_t mat = (medium_advisory_t)pgLo;
mat->next = advisories;
mat->size = pgHi - pgLo;
- advisories = mat;
}
break;
}
@@ -894,7 +893,6 @@ medium_free_scan_madvise_free(rack_t *rack, magazine_t *depot_ptr, region_t r)
medium_advisory_t mat = (medium_advisory_t)pgLo;
mat->next = advisories;
mat->size = pgHi - pgLo;
- advisories = mat;
}
memset(&madv_headers[index], 0, sizeof(uint16_t) * alloc_msize);
@@ -1072,31 +1070,19 @@ medium_madvise_pressure_relief(rack_t *rack)
for (mag_index = 0; mag_index < rack->num_magazines; mag_index++) {
size_t index;
for (index = 0; index < rack->region_generation->num_regions_allocated; ++index) {
- rack_region_lock(rack);
+ SZONE_LOCK(MEDIUM_SZONE_FROM_RACK(rack));
region_t medium = rack->region_generation->hashed_regions[index];
if (!medium || medium == HASHRING_REGION_DEALLOCATED) {
- rack_region_unlock(rack);
+ SZONE_UNLOCK(MEDIUM_SZONE_FROM_RACK(rack));
continue;
}
- region_trailer_t *trailer =
- REGION_TRAILER_FOR_MEDIUM_REGION(medium);
- // Make sure that the owning magazine doesn't try and take this out
- // from under our feet.
- trailer->dispose_flags |= RACK_DISPOSE_DELAY;
- rack_region_unlock(rack);
-
magazine_t *mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines,
- trailer, MAGAZINE_INDEX_FOR_MEDIUM_REGION(medium));
+ REGION_TRAILER_FOR_MEDIUM_REGION(medium),
+ MAGAZINE_INDEX_FOR_MEDIUM_REGION(medium));
- // If acquiring the region lock was enough to prevent the owning
- // magazine from deallocating the region, free it now so we don't
- // do wasted work.
- if (rack_region_maybe_dispose(rack, medium, MEDIUM_REGION_SIZE, trailer)) {
- SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr);
- continue;
- }
+ SZONE_UNLOCK(MEDIUM_SZONE_FROM_RACK(rack));
/* Ordering is important here, the magazine of a region may potentially change
* during mag_lock_zine_for_region_trailer, so src_mag_index must be taken
@@ -1170,6 +1156,7 @@ medium_madvise_free_range_conditional_no_lock(rack_t *rack, magazine_t *mag_ptr,
{
region_trailer_t *node = REGION_TRAILER_FOR_MEDIUM_REGION(region);
msize_t *madvh = MEDIUM_MADVISE_HEADER_FOR_PTR(ptr);
+
msize_t trigger_msize = trigger_level >> SHIFT_MEDIUM_QUANTUM;
size_t free_header_size = sizeof(medium_inplace_free_entry_s) + sizeof(msize_t);
@@ -1218,7 +1205,7 @@ medium_madvise_free_range_conditional_no_lock(rack_t *rack, magazine_t *mag_ptr,
}
msize_t right_dirty_msz = 0;
- if (right_end_idx > src_end_idx) {
+ if (right_end_idx < src_end_idx) {
// Same as above, if we had trailing data coalesced with this entry
// and that was not madvised, consider it, too.
right_dirty_msz = medium_madvise_header_dirty_len(madvh, right_start_idx);
@@ -1231,7 +1218,7 @@ medium_madvise_free_range_conditional_no_lock(rack_t *rack, magazine_t *mag_ptr,
medium_madvise_header_mark_middle(madvh, right_end_idx);
}
- // We absolutely can't madvise lower than the free-list entry pointer plus
+ // We absolutely can't madvise lower the the free-list entry pointer plus
// the header size. When the entry is OOB, there's no header or footer to
// store in memory.
uintptr_t safe_start_ptr = round_page_kernel(rangep + free_header_size);
@@ -1248,16 +1235,14 @@ medium_madvise_free_range_conditional_no_lock(rack_t *rack, magazine_t *mag_ptr,
MEDIUM_BYTES_FOR_MSIZE(range_msz), safe_end_ptr);
// The page that contains the freelist entry needs to be marked as not
- // having been madvised. Note that the quantum is larger than the kernel page size
- // so if safe_start_ptr and rangep are on different pages, we just mark
- // the whole block as clean.
+ // having been madvised.
if (range_idx < MEDIUM_META_INDEX_FOR_PTR(safe_start_ptr)) {
medium_madvise_header_mark_dirty(madvh, range_idx,
MEDIUM_META_INDEX_FOR_PTR(safe_start_ptr) - range_idx);
}
if (range_idx + range_msz > MEDIUM_META_INDEX_FOR_PTR(safe_end_ptr)) {
medium_madvise_header_mark_dirty(madvh,
- MEDIUM_META_INDEX_FOR_PTR(safe_end_ptr), range_idx +
+ MEDIUM_META_INDEX_FOR_PTR(safe_end_ptr) + 1, range_idx +
range_msz - MEDIUM_META_INDEX_FOR_PTR(safe_end_ptr));
}
@@ -1282,12 +1267,10 @@ medium_madvise_free_range_conditional_no_lock(rack_t *rack, magazine_t *mag_ptr,
// We chose not to madvise, we need to re-mark the region as dirty
// for when we come back to it later.
if (left_dirty_msz < left_msz) {
- /* The preceding block was clean. */
medium_madvise_header_mark_clean(madvh, range_idx,
left_msz - left_dirty_msz);
}
if (right_dirty_msz < right_msz) {
- /* The trailing block was clean. */
medium_madvise_header_mark_clean(madvh, right_start_idx +
right_dirty_msz, right_msz - right_dirty_msz);
}
@@ -1374,10 +1357,24 @@ medium_free_try_depot_unmap_no_lock(rack_t *rack, magazine_t *depot_ptr, region_
int objects_in_use = medium_free_detach_region(rack, depot_ptr, sparse_region);
if (0 == objects_in_use) {
- if (!rack_region_remove(rack, sparse_region, node)) {
+ // Invalidate the hash table entry for this region with HASHRING_REGION_DEALLOCATED.
+ // Using HASHRING_REGION_DEALLOCATED preserves the collision chain, using HASHRING_OPEN_ENTRY (0) would not.
+ rgnhdl_t pSlot = hash_lookup_region_no_lock(rack->region_generation->hashed_regions,
+ rack->region_generation->num_regions_allocated,
+ rack->region_generation->num_regions_allocated_shift,
+ sparse_region);
+ if (NULL == pSlot) {
+ malloc_zone_error(rack->debug_flags, true, "medium_free_try_depot_unmap_no_lock hash lookup failed: %p\n", sparse_region);
return NULL;
}
+ *pSlot = HASHRING_REGION_DEALLOCATED;
depot_ptr->num_bytes_in_magazine -= MEDIUM_REGION_PAYLOAD_BYTES;
+ // Atomically increment num_regions_dealloc
+#ifdef __LP64___
+ OSAtomicIncrement64(&rack->num_regions_dealloc);
+#else
+ OSAtomicIncrement32((int32_t *)&rack->num_regions_dealloc);
+#endif
// Caller will transfer ownership of the region back to the OS with no locks held
MAGMALLOC_DEALLOCREGION(MEDIUM_SZONE_FROM_RACK(rack), (void *)sparse_region, (int)MEDIUM_REGION_SIZE); // DTrace USDT Probe
@@ -1937,12 +1934,6 @@ medium_try_realloc_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new
/* there's some left, so put the remainder back */
leftover = (unsigned char *)ptr + MEDIUM_BYTES_FOR_MSIZE(new_msize);
medium_free_list_add_ptr(rack, medium_mag_ptr, leftover, leftover_msize);
- msize_t leftover_index = MEDIUM_META_INDEX_FOR_PTR(leftover);
- if (madv_headers[leftover_index] & MEDIUM_IS_ADVISED) {
- medium_madvise_header_mark_clean(madv_headers, leftover_index, leftover_msize);
- } else {
- medium_madvise_header_mark_dirty(madv_headers, leftover_index, leftover_msize);
- }
}
medium_meta_header_set_in_use(meta_headers, index, new_msize);
medium_madvise_header_mark_dirty(madv_headers, index, new_msize);
diff --git a/src/magazine_rack.c b/src/magazine_rack.c
index 9217ce1..3197724 100644
--- a/src/magazine_rack.c
+++ b/src/magazine_rack.c
@@ -159,66 +159,3 @@ rack_region_insert(rack_t *rack, region_t region)
rack->num_regions++;
_malloc_lock_unlock(&rack->region_lock);
}
-
-bool
-rack_region_remove(rack_t *rack, region_t region, region_trailer_t *trailer)
-{
- bool rv = true;
-
- rack_region_lock(rack);
- rgnhdl_t pSlot = hash_lookup_region_no_lock(
- rack->region_generation->hashed_regions,
- rack->region_generation->num_regions_allocated,
- rack->region_generation->num_regions_allocated_shift,
- region);
-
- if ((trailer->dispose_flags & RACK_DISPOSE_DELAY) != 0) {
- // Still remove this region from the hash table but don't allow the
- // current caller to deallocate the region until the pressure thread is
- // done with it.
- trailer->dispose_flags |= RACK_DISPOSE_NEEDED;
- rv = false;
- }
-
- if (NULL == pSlot) {
- malloc_zone_error(rack->debug_flags, true,
- "tiny_free_try_depot_unmap_no_lock hash lookup failed: %p\n",
- region);
- rv = false;
- } else {
- // Invalidate the hash table entry for this region with
- // HASHRING_REGION_DEALLOCATED. Using HASHRING_REGION_DEALLOCATED
- // preserves the collision chain, using HASHRING_OPEN_ENTRY (0) would not.
- *pSlot = HASHRING_REGION_DEALLOCATED;
-
- // Atomically increment num_regions_dealloc
-#ifdef __LP64__
- OSAtomicIncrement64((int64_t *)&rack->num_regions_dealloc);
-#else
- OSAtomicIncrement32((int32_t *)&rack->num_regions_dealloc);
-#endif
- }
-
- rack_region_unlock(rack);
- return rv;
-}
-
-bool
-rack_region_maybe_dispose(rack_t *rack, region_t region, size_t region_size,
- region_trailer_t *trailer)
-{
- bool rv = false;
- rack_region_lock(rack);
-
- if ((trailer->dispose_flags & RACK_DISPOSE_NEEDED) != 0) {
- // We tried to dispose of this region while the pressure thread was
- // using it, so now that it's finished we can deallocate it now.
- mvm_deallocate_pages((void *)region, region_size,
- MALLOC_FIX_GUARD_PAGE_FLAGS(rack->debug_flags));
- rv = true;
- } else {
- trailer->dispose_flags &= ~RACK_DISPOSE_DELAY;
- }
- rack_region_unlock(rack);
- return rv;
-}
diff --git a/src/magazine_rack.h b/src/magazine_rack.h
index d77fc6d..d918dd0 100644
--- a/src/magazine_rack.h
+++ b/src/magazine_rack.h
@@ -30,7 +30,6 @@
typedef void *region_t;
typedef region_t *rgnhdl_t; /* A pointer into hashed_regions array. */
-typedef struct region_trailer region_trailer_t;
#define INITIAL_NUM_REGIONS_SHIFT 6 // log2(INITIAL_NUM_REGIONS)
#define INITIAL_NUM_REGIONS (1 << INITIAL_NUM_REGIONS_SHIFT) // Must be a power of 2!
@@ -96,27 +95,4 @@ MALLOC_NOEXPORT
void
rack_region_insert(rack_t *rack, region_t region);
-MALLOC_NOEXPORT
-bool
-rack_region_remove(rack_t *rack, region_t region, region_trailer_t *trailer);
-
-MALLOC_NOEXPORT
-bool
-rack_region_maybe_dispose(rack_t *rack, region_t region, size_t region_size,
- region_trailer_t *trailer);
-
-MALLOC_NOEXPORT MALLOC_ALWAYS_INLINE
-static void
-rack_region_lock(rack_t *rack)
-{
- _malloc_lock_lock(&rack->region_lock);
-}
-
-MALLOC_NOEXPORT MALLOC_ALWAYS_INLINE
-static void
-rack_region_unlock(rack_t *rack)
-{
- _malloc_lock_unlock(&rack->region_lock);
-}
-
#endif // __MAGAZINE_RACK_H
diff --git a/src/magazine_small.c b/src/magazine_small.c
index afebb86..ff1eca0 100644
--- a/src/magazine_small.c
+++ b/src/magazine_small.c
@@ -72,13 +72,13 @@ small_mag_get_thread_index(void)
{
#if CONFIG_SMALL_USES_HYPER_SHIFT
if (os_likely(_os_cpu_number_override == -1)) {
- return _malloc_cpu_number() >> hyper_shift;
+ return _os_cpu_number() >> hyper_shift;
} else {
return _os_cpu_number_override >> hyper_shift;
}
#else // CONFIG_SMALL_USES_HYPER_SHIFT
if (os_likely(_os_cpu_number_override == -1)) {
- return _malloc_cpu_number();
+ return _os_cpu_number();
} else {
return _os_cpu_number_override;
}
@@ -994,30 +994,18 @@ small_madvise_pressure_relief(rack_t *rack)
for (mag_index = 0; mag_index < rack->num_magazines; mag_index++) {
size_t index;
for (index = 0; index < rack->region_generation->num_regions_allocated; ++index) {
- rack_region_lock(rack);
+ SZONE_LOCK(SMALL_SZONE_FROM_RACK(rack));
region_t small = rack->region_generation->hashed_regions[index];
if (!small || small == HASHRING_REGION_DEALLOCATED) {
- rack_region_unlock(rack);
+ SZONE_UNLOCK(SMALL_SZONE_FROM_RACK(rack));
continue;
}
- region_trailer_t *trailer = REGION_TRAILER_FOR_SMALL_REGION(small);
- // Make sure that the owning magazine doesn't try and take this out
- // from under our feet.
- trailer->dispose_flags |= RACK_DISPOSE_DELAY;
- rack_region_unlock(rack);
-
magazine_t *mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines,
- trailer, MAGAZINE_INDEX_FOR_SMALL_REGION(small));
-
- // If acquiring the region lock was enough to prevent the owning
- // magazine from deallocating the region, free it now so we don't
- // do wasted work.
- if (rack_region_maybe_dispose(rack, small, SMALL_REGION_SIZE, trailer)) {
- SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr);
- continue;
- }
+ REGION_TRAILER_FOR_SMALL_REGION(small),
+ MAGAZINE_INDEX_FOR_SMALL_REGION(small));
+ SZONE_UNLOCK(SMALL_SZONE_FROM_RACK(rack));
/* Ordering is important here, the magazine of a region may potentially change
* during mag_lock_zine_for_region_trailer, so src_mag_index must be taken
@@ -1155,18 +1143,31 @@ small_free_try_depot_unmap_no_lock(rack_t *rack, magazine_t *depot_ptr, region_t
int objects_in_use = small_free_detach_region(rack, depot_ptr, sparse_region);
if (0 == objects_in_use) {
- if (!rack_region_remove(rack, sparse_region, node)) {
+ // Invalidate the hash table entry for this region with HASHRING_REGION_DEALLOCATED.
+ // Using HASHRING_REGION_DEALLOCATED preserves the collision chain, using HASHRING_OPEN_ENTRY (0) would not.
+ rgnhdl_t pSlot = hash_lookup_region_no_lock(rack->region_generation->hashed_regions,
+ rack->region_generation->num_regions_allocated,
+ rack->region_generation->num_regions_allocated_shift,
+ sparse_region);
+ if (NULL == pSlot) {
+ malloc_zone_error(rack->debug_flags, true, "small_free_try_depot_unmap_no_lock hash lookup failed: %p\n", sparse_region);
return NULL;
}
+ *pSlot = HASHRING_REGION_DEALLOCATED;
depot_ptr->num_bytes_in_magazine -= SMALL_HEAP_SIZE;
+ // Atomically increment num_regions_dealloc
+#ifdef __LP64___
+ OSAtomicIncrement64(&rack->num_regions_dealloc);
+#else
+ OSAtomicIncrement32((int32_t *)&rack->num_regions_dealloc);
+#endif
// Caller will transfer ownership of the region back to the OS with no locks held
MAGMALLOC_DEALLOCREGION(SMALL_SZONE_FROM_RACK(rack), (void *)sparse_region, (int)SMALL_REGION_SIZE); // DTrace USDT Probe
return sparse_region;
} else {
- malloc_zone_error(rack->debug_flags, true,
- "small_free_try_depot_unmap_no_lock objects_in_use not zero: %d\n", objects_in_use);
+ malloc_zone_error(rack->debug_flags, true, "small_free_try_depot_unmap_no_lock objects_in_use not zero: %d\n", objects_in_use);
return NULL;
}
}
diff --git a/src/magazine_tiny.c b/src/magazine_tiny.c
index 4b8116a..ce3f31f 100644
--- a/src/magazine_tiny.c
+++ b/src/magazine_tiny.c
@@ -44,13 +44,13 @@ tiny_mag_get_thread_index(void)
{
#if CONFIG_TINY_USES_HYPER_SHIFT
if (os_likely(_os_cpu_number_override == -1)) {
- return _malloc_cpu_number() >> hyper_shift;
+ return _os_cpu_number() >> hyper_shift;
} else {
return _os_cpu_number_override >> hyper_shift;
}
#else // CONFIG_SMALL_USES_HYPER_SHIFT
if (os_likely(_os_cpu_number_override == -1)) {
- return _malloc_cpu_number();
+ return _os_cpu_number();
} else {
return _os_cpu_number_override;
}
@@ -836,30 +836,18 @@ tiny_madvise_pressure_relief(rack_t *rack)
for (mag_index = 0; mag_index < rack->num_magazines; mag_index++) {
size_t index;
for (index = 0; index < rack->region_generation->num_regions_allocated; ++index) {
- rack_region_lock(rack);
+ SZONE_LOCK(TINY_SZONE_FROM_RACK(rack));
region_t tiny = rack->region_generation->hashed_regions[index];
if (!tiny || tiny == HASHRING_REGION_DEALLOCATED) {
- rack_region_unlock(rack);
+ SZONE_UNLOCK(TINY_SZONE_FROM_RACK(rack));
continue;
}
- region_trailer_t *trailer = REGION_TRAILER_FOR_TINY_REGION(tiny);
- // Make sure that the owning magazine doesn't try and take this out
- // from under our feet.
- trailer->dispose_flags |= RACK_DISPOSE_DELAY;
- rack_region_unlock(rack);
-
magazine_t *mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines,
- trailer, MAGAZINE_INDEX_FOR_TINY_REGION(tiny));
-
- // If acquiring the region lock was enough to prevent the owning
- // magazine from deallocating the region, free it now so we don't
- // do wasted work.
- if (rack_region_maybe_dispose(rack, tiny, TINY_REGION_SIZE, trailer)) {
- SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr);
- continue;
- }
+ REGION_TRAILER_FOR_TINY_REGION(tiny),
+ MAGAZINE_INDEX_FOR_TINY_REGION(tiny));
+ SZONE_UNLOCK(TINY_SZONE_FROM_RACK(rack));
/* Ordering is important here, the magazine of a region may potentially change
* during mag_lock_zine_for_region_trailer, so src_mag_index must be taken
@@ -1061,19 +1049,32 @@ tiny_free_try_depot_unmap_no_lock(rack_t *rack, magazine_t *depot_ptr, region_tr
int objects_in_use = tiny_free_detach_region(rack, depot_ptr, sparse_region);
if (0 == objects_in_use) {
- if (!rack_region_remove(rack, sparse_region, node)) {
+ // Invalidate the hash table entry for this region with HASHRING_REGION_DEALLOCATED.
+ // Using HASHRING_REGION_DEALLOCATED preserves the collision chain, using HASHRING_OPEN_ENTRY (0) would not.
+ rgnhdl_t pSlot = hash_lookup_region_no_lock(rack->region_generation->hashed_regions,
+ rack->region_generation->num_regions_allocated,
+ rack->region_generation->num_regions_allocated_shift,
+ sparse_region);
+
+ if (NULL == pSlot) {
+ malloc_zone_error(rack->debug_flags, true, "tiny_free_try_depot_unmap_no_lock hash lookup failed: %p\n", sparse_region);
return NULL;
}
+ *pSlot = HASHRING_REGION_DEALLOCATED;
depot_ptr->num_bytes_in_magazine -= TINY_HEAP_SIZE;
+ // Atomically increment num_regions_dealloc
+#ifdef __LP64___
+ OSAtomicIncrement64(&rack->num_regions_dealloc);
+#else
+ OSAtomicIncrement32((int32_t *)&rack->num_regions_dealloc);
+#endif
+
// Caller will transfer ownership of the region back to the OS with no locks held
- MAGMALLOC_DEALLOCREGION(TINY_SZONE_FROM_RACK(rack),
- (void *)sparse_region, TINY_REGION_SIZE); // DTrace USDT Probe
+ MAGMALLOC_DEALLOCREGION(TINY_SZONE_FROM_RACK(rack), (void *)sparse_region, TINY_REGION_SIZE); // DTrace USDT Probe
return sparse_region;
} else {
- malloc_zone_error(rack->debug_flags, true,
- "tiny_free_try_depot_unmap_no_lock objects_in_use not zero: %d\n",
- objects_in_use);
+ malloc_zone_error(rack->debug_flags, true, "tiny_free_try_depot_unmap_no_lock objects_in_use not zero: %d\n", objects_in_use);
return NULL;
}
}
diff --git a/src/magazine_zone.h b/src/magazine_zone.h
index 51c167a..33c633e 100644
--- a/src/magazine_zone.h
+++ b/src/magazine_zone.h
@@ -226,11 +226,6 @@ typedef struct {
typedef uint32_t region_cookie_t;
-OS_ENUM(rack_dispose_flags, uint32_t,
- RACK_DISPOSE_DELAY = 0x1,
- RACK_DISPOSE_NEEDED = 0x2,
-);
-
typedef struct region_trailer {
struct region_trailer *prev;
struct region_trailer *next;
@@ -239,8 +234,6 @@ typedef struct region_trailer {
mag_index_t mag_index;
volatile int32_t pinned_to_depot;
bool recirc_suitable;
- // Locking: dispose_flags must be locked under the rack's region lock
- rack_dispose_flags_t dispose_flags;
} region_trailer_t;
typedef struct tiny_region {
diff --git a/src/malloc.c b/src/malloc.c
index c6c2a5a..16a27d0 100644
--- a/src/malloc.c
+++ b/src/malloc.c
@@ -50,7 +50,6 @@ int32_t malloc_num_zones_allocated = 0;
malloc_zone_t **malloc_zones = (malloc_zone_t **)0xdeaddeaddeaddead;
malloc_logger_t *malloc_logger = NULL;
-static uint32_t initial_num_zones;
static malloc_zone_t *initial_scalable_zone;
static malloc_zone_t *initial_nano_zone;
static malloc_zone_t *initial_default_zone = NULL;
@@ -122,7 +121,7 @@ static inline malloc_zone_t *inline_malloc_default_zone(void) __attribute__((alw
#define DEFAULT_MALLOC_ZONE_STRING "DefaultMallocZone"
#define DEFAULT_PUREGEABLE_ZONE_STRING "DefaultPurgeableMallocZone"
#define MALLOC_HELPER_ZONE_STRING "MallocHelperZone"
-#define MALLOC_PGUARD_ZONE_STRING "ProbGuardMallocZone"
+#define MALLOC_PGUARD_ZONE_STRING "PGuardMallocZone"
MALLOC_NOEXPORT
unsigned int phys_ncpus;
@@ -202,24 +201,6 @@ __is_translated(void)
}
#endif /* TARGET_OS_OSX */
-
-#define LIBMALLOC_EXPERIMENT_FACTORS_KEY "MallocExperiment="
-#define LIBMALLOC_EXPERIMENT_DISABLE_MEDIUM (1ULL)
-static void
-__malloc_init_experiments(const char *str)
-{
- uint64_t experiment_factors = 0;
- str = strchr(str, '=');
- if (str) {
- experiment_factors = strtoull_l(str + 1, NULL, 16, NULL);
- }
- switch (experiment_factors) {
- case LIBMALLOC_EXPERIMENT_DISABLE_MEDIUM:
- magazine_medium_enabled = false;
- break;
- }
-}
-
static void
__malloc_init_from_bootargs(const char *bootargs)
{
@@ -341,7 +322,6 @@ __malloc_init(const char *apple[])
}
const char **p;
- const char *malloc_experiments = NULL;
for (p = apple; p && *p; p++) {
if (strstr(*p, "malloc_entropy") == *p) {
int count = __entropy_from_kernel(*p);
@@ -350,9 +330,7 @@ __malloc_init(const char *apple[])
if (sizeof(malloc_entropy) / sizeof(malloc_entropy[0]) == count) {
_malloc_entropy_initialized = true;
}
- }
- if (strstr(*p, LIBMALLOC_EXPERIMENT_FACTORS_KEY) == *p) {
- malloc_experiments = *p;
+ break;
}
}
if (!_malloc_entropy_initialized) {
@@ -360,9 +338,6 @@ __malloc_init(const char *apple[])
_malloc_entropy_initialized = true;
}
- if (malloc_experiments) {
- __malloc_init_experiments(malloc_experiments);
- }
__malloc_init_from_bootargs(bootargs);
mvm_aslr_init();
@@ -379,19 +354,6 @@ __malloc_init(const char *apple[])
_malloc_initialize(apple, bootargs);
}
-static void register_pgm_zone(bool internal_diagnostics);
-static void stack_logging_early_finished(const struct _malloc_late_init *funcs);
-
-// WARNING: The passed _malloc_late_init is a stack variable in
-// libSystem_initializer(). We must not hold on to it.
-void
-__malloc_late_init(const struct _malloc_late_init *mli)
-{
- register_pgm_zone(mli->internal_diagnostics);
- stack_logging_early_finished(mli);
- initial_num_zones = malloc_num_zones;
-}
-
MALLOC_NOEXPORT malloc_zone_t* lite_zone = NULL;
MALLOC_ALWAYS_INLINE
@@ -672,37 +634,32 @@ find_registered_zone(const void *ptr, size_t *returned_size)
return default_zone;
}
}
+
+ // The default zone is registered in malloc_zones[0]. There's no danger that it will ever be unregistered.
+ // So don't advance the FRZ counter yet.
+ malloc_zone_t *zone = malloc_zones[0];
+ size_t size = zone->size(zone, ptr);
+ if (size) { // Claimed by this zone?
+ if (returned_size) {
+ *returned_size = size;
+ }
- malloc_zone_t *zone;
- size_t size;
-
- // We assume that the initial zones will never be unregistered concurrently while this code is running so we can have
- // a fast path without locking. Callers who really do unregister these (to install their own default zone) need to
- // ensure they establish their zone setup during initialization and before entering a multi-threaded environment.
- for (uint32_t i = 0; i < initial_num_zones; i++) {
- zone = malloc_zones[i];
- size = zone->size(zone, ptr);
-
- if (size) { // Claimed by this zone?
- if (returned_size) {
- *returned_size = size;
- }
-
- // Asan and others replace the zone at position 0 with their own zone.
- // In that case just return that zone as they need this information.
- // Otherwise return the virtual default zone, not the actual zone in position 0.
- if (i == 0 && has_default_zone0()) {
- return default_zone;
- }
-
+ // Asan and others replace the zone at position 0 with their own zone.
+ // In that case just return that zone as they need this information.
+ // Otherwise return the virtual default zone, not the actual zone in position 0.
+ if (!has_default_zone0()) {
return zone;
+ } else {
+ return default_zone;
}
}
int32_t volatile *pFRZCounter = pFRZCounterLive; // Capture pointer to the counter of the moment
OSAtomicIncrement32Barrier(pFRZCounter); // Advance this counter -- our thread is in FRZ
+ unsigned index;
int32_t limit = *(int32_t volatile *)&malloc_num_zones;
+ malloc_zone_t **zones = &malloc_zones[1];
// From this point on, FRZ is accessing the malloc_zones[] array without locking
// in order to avoid contention on common operations (such as non-default-zone free()).
@@ -719,8 +676,8 @@ find_registered_zone(const void *ptr, size_t *returned_size)
// are still valid). It also ensures that all the pointers in the zones array are
// valid until it returns, so that a stale value in limit is not dangerous.
- for (uint32_t i = initial_num_zones; i < limit; i++) {
- zone = malloc_zones[i];
+ for (index = 1; index < limit; ++index, ++zones) {
+ zone = *zones;
size = zone->size(zone, ptr);
if (size) { // Claimed by this zone?
goto out;
@@ -907,7 +864,7 @@ _malloc_initialize(const char *apple[], const char *bootargs)
nano_common_init(envp, apple, bootargs);
#endif
- const uint32_t k_max_zones = 2;
+ const uint32_t k_max_zones = 3;
malloc_zone_t *zone_stack[k_max_zones];
const char *name_stack[k_max_zones];
uint32_t num_zones = 0;
@@ -938,6 +895,14 @@ _malloc_initialize(const char *apple[], const char *bootargs)
}
#endif
+ if (pguard_enabled()) {
+ malloc_zone_t *wrapped_zone = zone_stack[num_zones - 1];
+ zone_stack[num_zones] = pguard_create_zone(wrapped_zone, malloc_debug_flags);
+ name_stack[num_zones] = MALLOC_PGUARD_ZONE_STRING;
+ // TODO(yln): what is the external contract for zone names?
+ num_zones++;
+ }
+
MALLOC_ASSERT(num_zones <= k_max_zones);
initial_default_zone = zone_stack[num_zones - 1];
@@ -945,27 +910,11 @@ _malloc_initialize(const char *apple[], const char *bootargs)
for (int i = num_zones - 1; i >= 0; i--) malloc_zone_register_while_locked(zone_stack[i]);
for (int i = num_zones - 1; i >= 0; i--) malloc_set_zone_name(zone_stack[i], name_stack[i]);
- initial_num_zones = malloc_num_zones;
-
// malloc_report(ASL_LEVEL_INFO, "%d registered zones\n", malloc_num_zones);
// malloc_report(ASL_LEVEL_INFO, "malloc_zones is at %p; malloc_num_zones is at %p\n", (unsigned)&malloc_zones,
// (unsigned)&malloc_num_zones);
}
-static void make_last_zone_default_zone(void);
-static void
-register_pgm_zone(bool internal_diagnostics)
-{
- if (pguard_enabled(internal_diagnostics)) {
- malloc_zone_t *wrapped_zone = malloc_zones[0];
- malloc_zone_t *pgm_zone = pguard_create_zone(wrapped_zone);
- malloc_zone_register_while_locked(pgm_zone);
- make_last_zone_default_zone();
- initial_default_zone = pgm_zone;
- malloc_set_zone_name(pgm_zone, MALLOC_PGUARD_ZONE_STRING);
- }
-}
-
static inline malloc_zone_t *
inline_malloc_default_zone(void)
{
@@ -1395,23 +1344,6 @@ malloc_create_zone(vm_size_t start_size, unsigned flags)
return zone;
}
-static void
-make_last_zone_default_zone(void)
-{
- unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
- mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
-
- malloc_zone_t *last_zone = malloc_zones[malloc_num_zones - 1];
-
- // assert(zone == malloc_zones[malloc_num_zones - 1];
- for (int i = malloc_num_zones - 1; i > 0; --i) {
- malloc_zones[i] = malloc_zones[i - 1];
- }
- malloc_zones[0] = last_zone;
-
- mprotect(malloc_zones, protect_size, PROT_READ);
-}
-
/*
* For use by CheckFix: establish a new default zone whose behavior is, apart from
* the use of death-row and per-CPU magazines, that of Leopard.
@@ -1420,6 +1352,7 @@ void
malloc_create_legacy_default_zone(void)
{
malloc_zone_t *zone;
+ int i;
zone = create_legacy_scalable_zone(0, malloc_debug_flags);
@@ -1435,7 +1368,16 @@ malloc_create_legacy_default_zone(void)
}
malloc_set_zone_name(zone, DEFAULT_MALLOC_ZONE_STRING);
- make_last_zone_default_zone();
+ unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
+ mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
+
+ // assert(zone == malloc_zones[malloc_num_zones - 1];
+ for (i = malloc_num_zones - 1; i > 0; --i) {
+ malloc_zones[i] = malloc_zones[i - 1];
+ }
+ malloc_zones[0] = zone;
+
+ mprotect(malloc_zones, protect_size, PROT_READ);
MALLOC_UNLOCK();
}
@@ -1831,10 +1773,6 @@ malloc_zone_unregister(malloc_zone_t *z)
mprotect(malloc_zones, protect_size, PROT_READ);
- // MAX(num_zones, 1) enables the fast path in find_registered_zone() for zone 0 even
- // if it is a custom zone, e.g., ASan and user zones.
- initial_num_zones = MIN(MAX(malloc_num_zones, 1), initial_num_zones);
-
// Exchange the roles of the FRZ counters. The counter that has captured the number of threads presently
// executing *inside* find_registered_zone is swapped with the counter drained to zero last time through.
// The former is then allowed to drain to zero while this thread yields.
@@ -2055,11 +1993,9 @@ malloc_claimed_address(void *ptr)
return true;
}
- // Next, try the initial zones.
- for (uint32_t i = 0; i < initial_num_zones; i++) {
- if (malloc_zone_claimed_address(malloc_zones[i], ptr)) {
- return true;
- }
+ // Next, try the default zone, which is always present.
+ if (malloc_zone_claimed_address(malloc_zones[0], ptr)) {
+ return true;
}
// Try all the other zones. Increment the FRZ barrier so that we can
@@ -2069,9 +2005,10 @@ malloc_claimed_address(void *ptr)
OSAtomicIncrement32Barrier(pFRZCounter);
int32_t limit = *(int32_t volatile *)&malloc_num_zones;
+ malloc_zone_t **zones = &malloc_zones[1];
boolean_t result = false;
- for (uint32_t i = initial_num_zones; i < limit; i++) {
- malloc_zone_t *zone = malloc_zones[i];
+ for (unsigned index = 1; index < limit; ++index, ++zones) {
+ malloc_zone_t *zone = *zones;
if (malloc_zone_claimed_address(zone, ptr)) {
result = true;
break;
@@ -2660,8 +2597,8 @@ malloc_debug(int level)
/* this is called from libsystem during initialization. */
-static void
-stack_logging_early_finished(const struct _malloc_late_init *funcs)
+void
+__stack_logging_early_finished(const struct _malloc_functions *funcs)
{
#if !TARGET_OS_DRIVERKIT
_dlopen = funcs->dlopen;
diff --git a/src/nano_malloc.c b/src/nano_malloc.c
index bb191d4..475b957 100644
--- a/src/nano_malloc.c
+++ b/src/nano_malloc.c
@@ -37,7 +37,7 @@ static MALLOC_ALWAYS_INLINE unsigned int
nano_mag_index(const nanozone_t *nanozone)
{
if (os_likely(_os_cpu_number_override == -1)) {
- return (_malloc_cpu_number() >> hyper_shift) % nano_common_max_magazines;
+ return (_os_cpu_number() >> hyper_shift) % nano_common_max_magazines;
}
return (_os_cpu_number_override >> hyper_shift) % nano_common_max_magazines;
}
diff --git a/src/nanov2_malloc.c b/src/nanov2_malloc.c
index 4c03a56..7245da7 100644
--- a/src/nanov2_malloc.c
+++ b/src/nanov2_malloc.c
@@ -596,15 +596,15 @@ nanov2_get_allocation_block_index(void)
#if CONFIG_NANO_USES_HYPER_SHIFT
if (os_likely(nano_common_max_magazines_is_ncpu)) {
// Default case is max magazines == physical number of CPUs, which
- // must be > _malloc_cpu_number() >> hyper_shift, so the modulo
+ // must be > _os_cpu_number() >> hyper_shift, so the modulo
// operation is not required.
- return _malloc_cpu_number() >> hyper_shift;
+ return _os_cpu_number() >> hyper_shift;
}
#else // CONFIG_NANO_USES_HYPER_SHIFT
if (os_likely(nano_common_max_magazines_is_ncpu)) {
// Default case is max magazines == logical number of CPUs, which
- // must be > _malloc_cpu_number() so the modulo operation is not required.
- return _malloc_cpu_number();
+ // must be > _os_cpu_number() so the modulo operation is not required.
+ return _os_cpu_number();
}
#endif // CONFIG_NANO_USES_HYPER_SHIFT
@@ -614,7 +614,7 @@ nanov2_get_allocation_block_index(void)
#endif // CONFIG_NANO_USES_HYPER_SHIFT
if (os_likely(_os_cpu_number_override == -1)) {
- return (_malloc_cpu_number() >> shift) % nano_common_max_magazines;
+ return (_os_cpu_number() >> shift) % nano_common_max_magazines;
}
return (_os_cpu_number_override >> shift) % nano_common_max_magazines;
}
@@ -1334,7 +1334,7 @@ nanov2_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask,
if (kr) {
return kr;
}
- boolean_t self_zone = (task == mach_task_self() && (nanozonev2_t *)zone_address == nanozone);
+ boolean_t self_zone = (nanozonev2_t *)zone_address == nanozone;
memcpy(&zone_copy, nanozone, sizeof(zone_copy));
nanozone = &zone_copy;
nanov2_meta_index_t metablock_meta_index = nanov2_metablock_meta_index(nanozone);
diff --git a/src/pguard_malloc.c b/src/pguard_malloc.c
index cfd80ff..f8692a0 100644
--- a/src/pguard_malloc.c
+++ b/src/pguard_malloc.c
@@ -28,7 +28,6 @@
# include // dladdr()
#endif
#include // mach_absolute_time()
-#include // csops()
#include "internal.h"
@@ -306,20 +305,14 @@ choose_available_slot(pguard_zone_t *zone)
return slot;
}
+// Choose a random metadata index.
static uint32_t
choose_metadata(pguard_zone_t *zone)
{
if (zone->num_metadata < zone->max_metadata) {
return zone->num_metadata++;
}
-
- while (true) {
- uint32_t index = rand_uniform(zone->max_metadata);
- uint32_t s = zone->metadata[index].slot;
- if (zone->slots[s].state == ss_freed) {
- return index;
- }
- }
+ return rand_uniform(zone->max_metadata);
}
static boolean_t
@@ -914,7 +907,7 @@ static const malloc_zone_t malloc_zone_template = {
#pragma mark -
-#pragma mark Configuration Options
+#pragma mark Zone Configuration
static const char *
env_var(const char *name)
@@ -937,72 +930,18 @@ env_bool(const char *name) {
return value[0] == '1';
}
+boolean_t
+pguard_enabled(void)
+{
+ if (env_var("MallocPGuard")) {
+ return env_bool("MallocPGuard");
+ }
#if CONFIG_FEATUREFLAGS_SIMPLE
-# define FEATURE_FLAG(feature, default) os_feature_enabled_simple(libmalloc, feature, default)
+ return os_feature_enabled_simple(libmalloc, PGuardAllProcesses, FALSE) ||
+ (os_feature_enabled_simple(libmalloc, PGuardViaLaunchd, FALSE) && env_bool("MallocPGuardViaLaunchd"));
#else
-# define FEATURE_FLAG(feature, default) (default)
+ return FALSE;
#endif
-
-
-#pragma mark -
-#pragma mark Zone Configuration
-
-static bool
-is_platform_binary(void)
-{
- uint32_t flags = 0;
- int err = csops(getpid(), CS_OPS_STATUS, &flags, sizeof(flags));
- if (err) {
- return false;
- }
- return (flags & CS_PLATFORM_BINARY);
-}
-
-static bool
-should_activate(bool internal_build)
-{
- uint32_t activation_rate = (internal_build ? 250 : 1000);
- return rand_uniform(activation_rate) == 0;
-}
-
-bool
-pguard_enabled(bool internal_build)
-{
- if (env_var("MallocProbGuard")) {
- return env_bool("MallocProbGuard");
- }
-#if TARGET_OS_OSX || TARGET_OS_IOS
- if (FEATURE_FLAG(ProbGuard, true) && (internal_build || is_platform_binary())) {
- bool activate = TARGET_OS_OSX ?
- should_activate(internal_build) :
- env_bool("MallocProbGuardViaLaunchd");
- if (activate) {
- return true;
- }
- }
-#endif // macOS || iOS
- if (FEATURE_FLAG(ProbGuardAllProcesses, false)) {
- return true;
- }
- return false;
-}
-
-static uint32_t
-choose_memory_budget_in_kb(void)
-{
- return (TARGET_OS_OSX ? 8 : 2) * 1024;
-}
-
-// TODO(yln): uniform sampling is likely not optimal here, since we will tend to
-// sample around the average of our range, which is probably more frequent than
-// what we want. We probably want the average to be less frequent, but still be
-// able to reach the "very frequent" end of our range occassionally. Consider
-// using a geometric (or other weighted distribution) here.
-static uint32_t
-choose_sample_rate(void)
-{
- uint32_t min = 500, max = 10000;
- return rand_uniform(max - min) + min;
}
static const double k_slot_multiplier = 10.0;
@@ -1027,9 +966,22 @@ compute_max_allocations(size_t memory_budget_in_kb)
return max_allocations;
}
+static uint32_t
+choose_sample_rate(void)
+{
+#if CONFIG_FEATUREFLAGS_SIMPLE
+ if (os_feature_enabled_simple(libmalloc, PGuardAllProcesses, FALSE)) {
+ return 1000;
+ }
+#endif
+ uint32_t rates[] = {10, 50, 100, 500, 1000, 5000};
+ const uint32_t count = (sizeof(rates) / sizeof(rates[0]));
+ return rates[rand_uniform(count)];
+}
+
static void
configure_zone(pguard_zone_t *zone) {
- uint32_t memory_budget_in_kb = env_uint("MallocPGuardMemoryBudgetInKB", choose_memory_budget_in_kb());
+ uint32_t memory_budget_in_kb = env_uint("MallocPGuardMemoryBudgetInKB", 2 * 1024); // 2MB
zone->max_allocations = env_uint("MallocPGuardAllocations", compute_max_allocations(memory_budget_in_kb));
zone->num_slots = env_uint("MallocPGuardSlots", k_slot_multiplier * zone->max_allocations);
zone->max_metadata = env_uint("MallocPGuardMetadata", k_metadata_multiplier * zone->max_allocations);
@@ -1085,8 +1037,9 @@ setup_zone(pguard_zone_t *zone, malloc_zone_t *wrapped_zone) {
static void install_signal_handler(void *unused);
malloc_zone_t *
-pguard_create_zone(malloc_zone_t *wrapped_zone)
+pguard_create_zone(malloc_zone_t *wrapped_zone, unsigned debug_flags)
{
+ // TODO(yln): debug_flags unused
pguard_zone_t *zone = (pguard_zone_t *)my_vm_map(sizeof(pguard_zone_t), VM_PROT_READ_WRITE, VM_MEMORY_MALLOC);
setup_zone(zone, wrapped_zone);
my_vm_protect((vm_address_t)zone, PAGE_MAX_SIZE, VM_PROT_READ);
@@ -1403,6 +1356,7 @@ mark_read_write(vm_address_t page)
#pragma mark -
#pragma mark Mach VM Helpers
+// TODO(yln): try to replace these helpers with functions from vm.c
static vm_address_t
my_vm_map(size_t size, vm_prot_t protection, int tag)
{
diff --git a/src/pguard_malloc.h b/src/pguard_malloc.h
index 4008d82..100d1a2 100644
--- a/src/pguard_malloc.h
+++ b/src/pguard_malloc.h
@@ -26,14 +26,13 @@
#include "base.h"
#include "malloc/malloc.h"
-#include
MALLOC_NOEXPORT
-bool
-pguard_enabled(bool internal_build);
+boolean_t
+pguard_enabled(void);
MALLOC_NOEXPORT
malloc_zone_t *
-pguard_create_zone(malloc_zone_t *wrapped_zone);
+pguard_create_zone(malloc_zone_t *wrapped_zone, unsigned debug_flags);
#endif // _PGUARD_MALLOC_H_
diff --git a/tests/Makefile b/tests/Makefile
index d8f1a4f..5a29c67 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -71,7 +71,7 @@ ifeq ($(Embedded),NO)
tsan: CFLAGS := $(filter-out $(ARCH_FLAGS),$(CFLAGS)) -arch x86_64 -fsanitize=thread
tsan: OTHER_LDFLAGS += -Wl,-rpath -Wl,$(SANITIZER_DYLIB_PATH)
else
-EXCLUDED_SOURCES += tsan.c magazine_medium_test.c
+EXCLUDED_SOURCES += tsan.c
endif
madvise: OTHER_CFLAGS += -I../src
diff --git a/tests/MallocBenchTest/MallocBench.plist b/tests/MallocBenchTest/MallocBench.plist
index d1b0d99..c4bbcc5 100644
--- a/tests/MallocBenchTest/MallocBench.plist
+++ b/tests/MallocBenchTest/MallocBench.plist
@@ -33,9 +33,6 @@
perf
-
- Disabled
-
diff --git a/tests/basic_malloc_free_perf.c b/tests/basic_malloc_free_perf.c
index a4ac321..256651d 100644
--- a/tests/basic_malloc_free_perf.c
+++ b/tests/basic_malloc_free_perf.c
@@ -542,7 +542,7 @@ T_DECL(basic_perf_parallel_by_size_class_offset,
"Malloc/Free by size class with offset parallel",
T_META_TAG_PERF, T_META_ALL_VALID_ARCHS(NO),
T_META_LTEPHASE(LTE_POSTINIT),
- T_META_ENVVAR("MallocNanoZone=0"))
+ T_META_ENVVAR("MallocNanoZone"))
{
basic_perf_malloc_free_by_size_class_offset(false);
}
diff --git a/tests/magazine_medium_test.c b/tests/magazine_medium_test.c
deleted file mode 100644
index 46836c6..0000000
--- a/tests/magazine_medium_test.c
+++ /dev/null
@@ -1,152 +0,0 @@
-//
-// magazine_medium_test.c
-// libmalloc_test
-//
-// Created by Jason Teplitz on 5/17/21.
-//
-
-#include
-
-#include "../src/magazine_medium.c"
-#include "magazine_testing.h"
-
-bool aggressive_madvise_enabled = false;
-T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true));
-
-static inline void
-medium_test_rack_setup(rack_t *rack)
-{
- test_rack_setup(rack, RACK_TYPE_MEDIUM);
-}
-
-static void
-assert_block_madvise_headers(void *ptr, msize_t msize, bool dirty, bool intrusive_free_list)
-{
- msize_t *madv_headers = MEDIUM_MADVISE_HEADER_FOR_PTR(ptr);
- msize_t index = MEDIUM_META_INDEX_FOR_PTR(ptr);
- msize_t orig_msize = msize;
- uintptr_t safe_start_ptr = (uintptr_t) ptr;
- uintptr_t end_ptr = (uintptr_t)ptr + (msize << SHIFT_MEDIUM_QUANTUM);
- uintptr_t safe_end_ptr = end_ptr;
- if (intrusive_free_list && !dirty) {
- safe_start_ptr = round_page_kernel((uintptr_t)ptr + sizeof(medium_inplace_free_entry_s) + sizeof(msize_t));
- safe_end_ptr = trunc_page_kernel((uintptr_t) ptr + (msize << SHIFT_MEDIUM_QUANTUM) - sizeof(msize_t));
- }
- index = MEDIUM_META_INDEX_FOR_PTR(safe_start_ptr);
- msize = MEDIUM_META_INDEX_FOR_PTR(safe_end_ptr) - index;
- msize_t end_index = MEDIUM_META_INDEX_FOR_PTR(safe_end_ptr);
- msize_t expected = msize | (dirty ? 0 : MEDIUM_IS_ADVISED);
- T_ASSERT_EQ(madv_headers[index], expected, "Start of block is marked correctly");
- if (msize > 1) {
- T_ASSERT_EQ(madv_headers[end_index - 1], expected, "End of block is marked correctly");
- }
- for (msize_t i = 1; i < msize - 1; i++) {
- T_QUIET; T_ASSERT_EQ(madv_headers[index + i], (msize_t) 0, "Middle of block is marked correctly");
- }
- if (intrusive_free_list) {
- // Make sure that the first and last pages are marked dirty
- index = MEDIUM_META_INDEX_FOR_PTR(ptr);
- if (MEDIUM_META_INDEX_FOR_PTR(safe_start_ptr) > MEDIUM_META_INDEX_FOR_PTR(ptr)) {
- msize_t first_page_header = madv_headers[index];
- T_ASSERT_NE(first_page_header, (msize_t)0, "free list is not marked as middle");
- T_ASSERT_EQ(first_page_header & MEDIUM_IS_ADVISED, 0, "free list is marked as dirty");
- }
- if (MEDIUM_META_INDEX_FOR_PTR(safe_end_ptr) < MEDIUM_META_INDEX_FOR_PTR(end_ptr)) {
- msize_t last_page_header = madv_headers[MEDIUM_META_INDEX_FOR_PTR(safe_end_ptr)];
- T_ASSERT_NE(last_page_header, (msize_t)0, "free list is not marked as middle");
- T_ASSERT_EQ(last_page_header & MEDIUM_IS_ADVISED, 0, "free list is marked as dirty");
- }
- }
-}
-
-static inline magazine_t *
-get_magazine(struct rack_s *rack, void *ptr)
-{
- mag_index_t mag_index = MAGAZINE_INDEX_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(ptr));
- return &(rack->magazines[mag_index]);
-}
-
-
-T_DECL(medium_realloc_madvise_headers, "medium realloc in place maintains madvise headers",
- T_META_ENABLED(CONFIG_MEDIUM_ALLOCATOR))
-{
- struct rack_s rack;
- medium_test_rack_setup(&rack);
-
- // Allocate two blocks and free the second, then try to realloc() the first.
- // This should extend in-place
-
- void *ptr = medium_malloc_should_clear(&rack, 1, false);
- T_ASSERT_NOTNULL(ptr, "allocation");
- void *ptr2 = medium_malloc_should_clear(&rack, 4, false);
- T_ASSERT_NOTNULL(ptr2, "allocation 2");
- T_ASSERT_EQ_PTR(ptr2, (void *)((uintptr_t)ptr + MEDIUM_BYTES_FOR_MSIZE(1)), "sequential allocations");
- // Allocate an extra block and free it last so we don't hit in medium's last free cache
- void *extra_ptr = medium_malloc_should_clear(&rack, 1, false);
-
- free_medium(&rack, ptr2, MEDIUM_REGION_FOR_PTR(ptr2), 0);
- free_medium(&rack, extra_ptr, MEDIUM_REGION_FOR_PTR(extra_ptr), 0);
-
- boolean_t realloced = medium_try_realloc_in_place(&rack, ptr, MEDIUM_BYTES_FOR_MSIZE(1), MEDIUM_BYTES_FOR_MSIZE(2));
- T_ASSERT_TRUE(realloced, "realloced");
-
- // Make sure the madvise headers are correct for both the realloc'd block and the new smaller block after it.
- assert_block_madvise_headers(ptr, 2, true, false);
- void *next_block = (unsigned char *)ptr + MEDIUM_BYTES_FOR_MSIZE(2);
- assert_block_madvise_headers(next_block, 3, true, false);
-}
-
-T_DECL(free_end_of_region, "End of region's footer is marked dirty",
- T_META_ENABLED(CONFIG_MEDIUM_ALLOCATOR))
-{
- // Check that the headers for the last block in a region are correct
- // when the block has been coalesced and is using an intrusive free list.
- struct rack_s rack;
- medium_test_rack_setup(&rack);
-
- // Use up all of the OOB entries so we force an intrusive free list
- void *oob_ptrs[MEDIUM_OOB_COUNT];
- for (size_t i = 0; i < MEDIUM_OOB_COUNT * 2; i++) {
- void *ptr = medium_malloc_should_clear(&rack, 1, false);
- T_QUIET; T_ASSERT_NOTNULL(ptr, "oob allocation");
- if (i % 2 == 0){
- oob_ptrs[i / 2] = ptr;
- }
- }
- for (size_t i = 0; i < MEDIUM_OOB_COUNT; i++){
- void *ptr = oob_ptrs[i];
- free_medium(&rack, ptr, MEDIUM_REGION_FOR_PTR(ptr), 0);
- }
-
- // Allocate the rest of the region in allocations just below the madvise window
- size_t num_allocated = MEDIUM_OOB_COUNT * 2;
- magazine_t *mag = get_magazine(&rack, oob_ptrs[0]);
- void *ptr = NULL, *last_ptr = NULL;
- size_t block_size = 0, final_block_size = 0;
- while (num_allocated < NUM_MEDIUM_BLOCKS) {
- size_t curr_block_size = ((medium_sliding_madvise_granularity(mag)) >> SHIFT_MEDIUM_QUANTUM) - 1;
- if (curr_block_size + num_allocated >= NUM_MEDIUM_BLOCKS) {
- // Last block, just allocate whatever remains
- curr_block_size = NUM_MEDIUM_BLOCKS - num_allocated;
- final_block_size = curr_block_size;
- } else {
- block_size = curr_block_size;
- }
- last_ptr = ptr;
- ptr = medium_malloc_should_clear(&rack, curr_block_size, false);
- T_QUIET; T_ASSERT_NOTNULL(ptr, "allocation under madvise window");
- num_allocated += curr_block_size;
- }
-
- // Now free the final two blocks so they coalesced together and madvised
- free_medium(&rack, last_ptr, MEDIUM_REGION_FOR_PTR(last_ptr), 0);
-
- free_medium(&rack, ptr, MEDIUM_REGION_FOR_PTR(ptr), 0);
-
- // The magazine caches the most recently freed pointer
- // so free one more to trigger madvise of last_ptr
- void *before_trailing_2 = (void *) ((uintptr_t)oob_ptrs[0] + (1UL << SHIFT_MEDIUM_QUANTUM));
- free_medium(&rack, before_trailing_2, MEDIUM_REGION_FOR_PTR(before_trailing_2), 0);
-
- assert_block_madvise_headers(last_ptr, block_size + final_block_size, false, true);
-}
diff --git a/tests/magazine_rack.c b/tests/magazine_rack.c
index f0fa97c..c075fc6 100644
--- a/tests/magazine_rack.c
+++ b/tests/magazine_rack.c
@@ -33,90 +33,3 @@ T_DECL(basic_magazine_deinit, "allocate deallocate magazines")
rack_destroy(&rack);
T_ASSERT_NULL(rack.magazines, "magazine deinit");
}
-
-void *
-pressure_thread(void *arg)
-{
- T_LOG("pressure thread started\n");
- while (1) {
- malloc_zone_pressure_relief(0, 0);
- }
-}
-
-void *
-thread(void *arg)
-{
- uintptr_t sz = (uintptr_t)arg;
- T_LOG("thread started (allocation size: %lu bytes)\n", sz);
- void *temp = malloc(sz);
-
- uint64_t c = 100;
- while (c-- > 0) {
- uint32_t num = arc4random_uniform(100000);
- void **allocs = malloc(sizeof(void *) * num);
-
- for (int i=0; imagazines, "magazine initialisation");
}
T_DECL(basic_small_alloc, "small rack init and alloc")
{
struct rack_s rack;
- small_test_rack_setup(&rack);
+ test_rack_setup(&rack);
void *ptr = small_malloc_should_clear(&rack, SMALL_MSIZE_FOR_BYTES(512), false);
T_ASSERT_NOTNULL(ptr, "allocation");
@@ -40,7 +42,7 @@ T_DECL(basic_small_alloc, "small rack init and alloc")
T_DECL(basic_small_teardown, "small rack init, alloc, teardown")
{
struct rack_s rack;
- small_test_rack_setup(&rack);
+ test_rack_setup(&rack);
void *ptr = small_malloc_should_clear(&rack, TINY_MSIZE_FOR_BYTES(512), false);
T_ASSERT_NOTNULL(ptr, "allocation");
@@ -60,7 +62,7 @@ T_DECL(basic_small_teardown, "small rack init, alloc, teardown")
T_DECL(basic_small_free, "small free")
{
struct rack_s rack;
- small_test_rack_setup(&rack);
+ test_rack_setup(&rack);
void *ptr = small_malloc_should_clear(&rack, SMALL_MSIZE_FOR_BYTES(512), false);
T_ASSERT_NOTNULL(ptr, "allocation");
@@ -75,7 +77,7 @@ T_DECL(basic_small_free, "small free")
T_DECL(basic_small_shrink, "small rack shrink")
{
struct rack_s rack;
- small_test_rack_setup(&rack);
+ test_rack_setup(&rack);
void *ptr = small_malloc_should_clear(&rack, SMALL_MSIZE_FOR_BYTES(1024), false);
T_ASSERT_NOTNULL(ptr, "allocation");
@@ -92,7 +94,7 @@ T_DECL(basic_small_shrink, "small rack shrink")
T_DECL(basic_small_realloc_in_place, "small rack realloc in place")
{
struct rack_s rack;
- small_test_rack_setup(&rack);
+ test_rack_setup(&rack);
// Allocate two blocks and free the second, then try to realloc() the first.
// This should extend in-place using the one-level death row cache that's
diff --git a/tests/magazine_testing.h b/tests/magazine_testing.h
index 14089a0..67a39bb 100644
--- a/tests/magazine_testing.h
+++ b/tests/magazine_testing.h
@@ -64,12 +64,4 @@ szone_malloc(szone_t *szone, size_t size)
__builtin_trap();
}
-void
-test_rack_setup(rack_t *rack, rack_type_t rack_type)
-{
- memset(rack, 'a', sizeof(rack));
- rack_init(rack, rack_type, 1, 0);
- T_QUIET; T_ASSERT_NOTNULL(rack->magazines, "magazine initialisation");
-}
-
#endif // __MAGAZINE_TESTING
diff --git a/tests/magazine_tiny_test.c b/tests/magazine_tiny_test.c
index d2f7353..7558d8a 100644
--- a/tests/magazine_tiny_test.c
+++ b/tests/magazine_tiny_test.c
@@ -17,15 +17,17 @@ bool aggressive_madvise_enabled = DEFAULT_AGGRESSIVE_MADVISE_ENABLED;
T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true));
static inline void
-tiny_test_rack_setup(rack_t *rack)
+test_rack_setup(rack_t *rack)
{
- test_rack_setup(rack, RACK_TYPE_TINY);
+ memset(rack, 'a', sizeof(rack));
+ rack_init(rack, RACK_TYPE_TINY, 1, 0);
+ T_QUIET; T_ASSERT_NOTNULL(rack->magazines, "magazine initialisation");
}
T_DECL(basic_tiny_alloc, "tiny rack init and alloc")
{
struct rack_s rack;
- tiny_test_rack_setup(&rack);
+ test_rack_setup(&rack);
void *ptr = tiny_malloc_should_clear(&rack, TINY_MSIZE_FOR_BYTES(32), false);
T_ASSERT_NOTNULL(ptr, "allocation");
@@ -40,7 +42,7 @@ T_DECL(basic_tiny_alloc, "tiny rack init and alloc")
T_DECL(basic_tiny_teardown, "tiny rack init, alloc, teardown")
{
struct rack_s rack;
- tiny_test_rack_setup(&rack);
+ test_rack_setup(&rack);
void *ptr = tiny_malloc_should_clear(&rack, TINY_MSIZE_FOR_BYTES(32), false);
T_ASSERT_NOTNULL(ptr, "allocation");
@@ -60,7 +62,7 @@ T_DECL(basic_tiny_teardown, "tiny rack init, alloc, teardown")
T_DECL(basic_tiny_free, "tiny free")
{
struct rack_s rack;
- tiny_test_rack_setup(&rack);
+ test_rack_setup(&rack);
void *ptr = tiny_malloc_should_clear(&rack, TINY_MSIZE_FOR_BYTES(32), false);
T_ASSERT_NOTNULL(ptr, "allocation");
@@ -75,7 +77,7 @@ T_DECL(basic_tiny_free, "tiny free")
T_DECL(basic_tiny_shrink, "tiny rack shrink")
{
struct rack_s rack;
- tiny_test_rack_setup(&rack);
+ test_rack_setup(&rack);
void *ptr = tiny_malloc_should_clear(&rack, TINY_MSIZE_FOR_BYTES(64), false);
T_ASSERT_NOTNULL(ptr, "allocation");
@@ -92,7 +94,7 @@ T_DECL(basic_tiny_shrink, "tiny rack shrink")
T_DECL(basic_tiny_realloc_in_place, "tiny rack realloc in place")
{
struct rack_s rack;
- tiny_test_rack_setup(&rack);
+ test_rack_setup(&rack);
// Allocate two blocks and free the second, then try to realloc() the first.
// This should extend in-place using the one-level death row cache that's
diff --git a/tests/malloc_zone_unregister_test.c b/tests/malloc_zone_unregister_test.c
deleted file mode 100644
index b2d4827..0000000
--- a/tests/malloc_zone_unregister_test.c
+++ /dev/null
@@ -1,66 +0,0 @@
-//
-// malloc_zone_unregister_test.c
-// libmalloc
-//
-// Tests for malloc_zone_unregister().
-//
-
-#include
-
-#include
-#include
-#include
-
-T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true));
-
-extern int32_t malloc_num_zones;
-extern malloc_zone_t **malloc_zones;
-
-T_DECL(malloc_zone_unregister_establish_custom_default_zone,
- "Unregister all initial zones and register a custom default zone",
- T_META_ENVVAR("MallocNanoZone=1"))
-{
- void *ptr = malloc(7);
- T_EXPECT_NOTNULL(malloc_zone_from_ptr(ptr), "can find zone for allocation");
- T_EXPECT_TRUE(malloc_claimed_address(ptr), "ptr is claimed");
-
- T_ASSERT_LE(malloc_num_zones, 10, "at most 10 initial zones");
- malloc_zone_t *initial_zones[10];
- uint32_t initial_zone_count = malloc_num_zones;
-
- // Unregister initial zones
- for (uint32_t i = 0; i < initial_zone_count; i++) {
- initial_zones[i] = malloc_zones[0];
- malloc_zone_unregister(malloc_zones[0]);
- }
- T_EXPECT_EQ(malloc_num_zones, 0, "unregistered initial zones");
-
- // No zones, no results, no crash
- T_EXPECT_NULL(malloc_zone_from_ptr(ptr), "cannot find zone");
- T_EXPECT_FALSE(malloc_claimed_address(ptr), "ptr not claimed");
-
- // Create and register custom default zone
- malloc_zone_t *custom_zone = malloc_create_zone(0, 0);
-
- // Custom default zone only, no results, no crash
- T_EXPECT_NULL(malloc_zone_from_ptr(ptr), "cannot find zone");
- T_EXPECT_FALSE(malloc_claimed_address(ptr), "ptr not claimed");
-
- // Re-register initial zones
- for (uint32_t i = 0; i < initial_zone_count; i++) {
- malloc_zone_register(initial_zones[i]);
- }
- T_EXPECT_EQ(malloc_num_zones, initial_zone_count + 1, "re-registered initial zones");
-
- // Custom default zone plus initial zones
- T_EXPECT_NOTNULL(malloc_zone_from_ptr(ptr), "can find zone for allocation");
- T_EXPECT_TRUE(malloc_claimed_address(ptr), "ptr is claimed");
-
- // Check that the custom zone is the default zone
- void *ptr2 = malloc(7);
- T_EXPECT_EQ(malloc_zone_from_ptr(ptr2), custom_zone, "can find custom zone for allocation");
- T_EXPECT_TRUE(malloc_claimed_address(ptr2), "ptr from custom zone is claimed");
-
- free(ptr2);
- free(ptr);
-}
diff --git a/tests/pguard_allocator.c b/tests/pguard_allocator.c
index 1b30d5f..eb38cc4 100644
--- a/tests/pguard_allocator.c
+++ b/tests/pguard_allocator.c
@@ -49,7 +49,7 @@ T_DECL(allocate, "allocate")
zone.max_allocations = 2; // is_full
zone.num_slots = 2; zone.rr_slot_index = 1; // choose_available_slot
zone.max_metadata = 4; zone.num_metadata = 2; // choose_metadata
- expected_upper_bound = 2; rand_ret_value = false; // choose_offset_on_page
+ expected_upper_bound = 2; rand_value = FALSE; //choose_offset_on_page
expected_traces[0] = &metadata[2].alloc_trace; // capture_trace
zone.begin = 640000; // page_addr
expected_read_write_page = 643072; // mark_read_write
@@ -105,7 +105,7 @@ T_DECL(reallocate_guarded_to_sampled, "reallocate: guarded -> sampled")
slots[0] = (slot_t){ .state = ss_allocated, .metadata = 1, .size = 5 }; // lookup_size
zone.max_allocations = 2; // is_full
zone.num_slots = 2; // allocate
- expected_upper_bound = 2; rand_ret_value = true; // allocate -> choose_offset_on_page
+ expected_upper_bound = 2; rand_value = TRUE; // allocate -> choose_available_slot
zone.max_metadata = 1; // allocate -> choose_metadata
expected_traces[0] = &metadata[0].alloc_trace; // allocate -> capture_trace
expected_read_write_page = 643072; // allocate -> mark_read_write
@@ -122,7 +122,7 @@ T_DECL(reallocate_unguarded_to_sampled, "reallocate: unguarded -> sampled")
expected_size_ptr = 1337; size_ret_value = 5; // wrapped_size
zone.max_allocations = 2; // is_full
zone.num_slots = 2; // allocate
- expected_upper_bound = 2; rand_ret_value = true; // allocate -> choose_offset_on_page
+ expected_upper_bound = 2; rand_value = TRUE; // allocate -> choose_available_slot
zone.max_metadata = 1; // allocate -> choose_metadata
expected_traces[0] = &metadata[0].alloc_trace; // allocate -> capture_trace
zone.begin = 640000; // allocate -> page_addr
diff --git a/tests/pguard_internals.c b/tests/pguard_internals.c
index 9698367..3fafb1a 100644
--- a/tests/pguard_internals.c
+++ b/tests/pguard_internals.c
@@ -38,17 +38,17 @@ T_DECL(is_full, "is_full")
T_DECL(should_sample_counter, "should_sample_counter")
{
expected_upper_bound = 7;
- rand_ret_value = 0;
+ rand_value = 0;
T_EXPECT_TRUE(should_sample_counter(7), "1/1 -> sample");
T_EXPECT_TRUE(should_sample_counter(7), "1/1 -> sample");
- rand_ret_value = 1;
+ rand_value = 1;
T_EXPECT_FALSE(should_sample_counter(7), "1/2 -> skip");
T_EXPECT_TRUE (should_sample_counter(7), "2/2 -> sample");
T_EXPECT_FALSE(should_sample_counter(7), "1/2 -> skip");
T_EXPECT_TRUE (should_sample_counter(7), "2/2 -> sample");
- rand_ret_value = 2;
+ rand_value = 2;
T_EXPECT_FALSE(should_sample_counter(7), "1/3 -> skip");
T_EXPECT_FALSE(should_sample_counter(7), "2/3 -> skip");
T_EXPECT_TRUE (should_sample_counter(7), "3/3 -> sample");
@@ -62,22 +62,20 @@ T_DECL(should_sample, "should_sample")
T_EXPECT_TRUE (should_sample(&zone, 5), "normal size");
T_EXPECT_TRUE (should_sample(&zone, PAGE_SIZE), "page size");
T_EXPECT_FALSE(should_sample(&zone, PAGE_SIZE + 1), "size > page size");
- T_EXPECT_EQ(rand_call_count, 3, NULL);
zone.num_allocations = 1;
T_EXPECT_FALSE(should_sample(&zone, 5), "zone full");
zone.max_allocations = 2;
- rand_ret_value = 1;
+ rand_value = 1;
T_EXPECT_FALSE(should_sample(&zone, 5), "1/2 -> skip");
T_EXPECT_TRUE (should_sample(&zone, 5), "2/2 -> sample");
// Ensure rand_uniform() is only called when needed.
- T_EXPECT_EQ(rand_call_count, 4, NULL);
+ expected_upper_bound = ~zone.sample_counter_range;
T_EXPECT_FALSE(should_sample(&zone, PAGE_SIZE + 1), "bad size");
zone.num_allocations = 2;
T_EXPECT_FALSE(should_sample(&zone, 5), "zone full");
- T_EXPECT_EQ(rand_call_count, 4, NULL);
}
T_DECL(is_guarded, "is_guarded")
@@ -203,16 +201,11 @@ T_DECL(choose_available_slot, "choose_available_slot")
T_DECL(choose_metadata, "choose_metadata")
{
- zone.max_metadata = 2;
+ expected_upper_bound = zone.max_metadata = 2;
+ rand_value = 7;
T_EXPECT_EQ(choose_metadata(&zone), 0, "0/2 -> 0");
T_EXPECT_EQ(choose_metadata(&zone), 1, "1/2 -> 1");
- T_EXPECT_EQ(rand_call_count, 0, NULL);
-
- expected_upper_bound = 2; rand_use_ret_values = true;
- slots[0].state = ss_allocated; metadata[0].slot = 0; rand_ret_values[0] = 0;
- slots[1].state = ss_freed; metadata[1].slot = 1; rand_ret_values[1] = 1;
- T_EXPECT_EQ(choose_metadata(&zone), 1, "full -> random metadata (for freed slot)");
- T_EXPECT_EQ(rand_call_count, 2, "try random index until we find metadata for a freed slot");
+ T_EXPECT_EQ(choose_metadata(&zone), 7, "full -> random");
}
T_DECL(is_power_of_2, "is_power_of_2")
@@ -233,10 +226,10 @@ T_DECL(choose_offset_on_page, "choose_offset_on_page")
uint16_t page_size = 32;
expected_upper_bound = 2;
- rand_ret_value = 1;
+ rand_value = 1;
T_EXPECT_EQ(choose_offset_on_page(5, 16, page_size), (uint16_t)0, "left-aligned");
- rand_ret_value = 0;
+ rand_value = 0;
T_EXPECT_EQ(choose_offset_on_page( 0, 1, page_size), (uint16_t)32, "size 0, perfectly right-aligned");
T_EXPECT_EQ(choose_offset_on_page( 1, 1, page_size), (uint16_t)31, "size 1, perfectly right-aligned");
T_EXPECT_EQ(choose_offset_on_page( 5, 1, page_size), (uint16_t)27, "perfectly right-aligned");
diff --git a/tests/pguard_testing.h b/tests/pguard_testing.h
index 91a0dc3..b1361cf 100644
--- a/tests/pguard_testing.h
+++ b/tests/pguard_testing.h
@@ -12,33 +12,25 @@
#pragma mark Mocks
#define PGUARD_MOCK_RANDOM
+static uint32_t rand_value;
static uint32_t expected_upper_bound;
-static uint32_t rand_ret_value;
-static uint32_t rand_ret_values[10];
-static uint32_t rand_call_count;
-static bool rand_use_ret_values;
static uint32_t
rand_uniform(uint32_t upper_bound)
{
T_QUIET; T_EXPECT_EQ(upper_bound, expected_upper_bound, "rand_uniform(upper_bound)");
- if (rand_use_ret_values) {
- T_QUIET; T_ASSERT_LT(rand_call_count, 10, NULL);
- rand_ret_value = rand_ret_values[rand_call_count];
- }
- rand_call_count++;
- return rand_ret_value;
+ return rand_value;
}
#define PGUARD_MOCK_CAPTURE_TRACE
static stack_trace_t *expected_traces[10];
-static uint32_t capture_trace_call_count;
+static uint32_t expected_trace_index;
MALLOC_ALWAYS_INLINE
static inline void
capture_trace(stack_trace_t *trace)
{
- T_QUIET; T_ASSERT_LT(capture_trace_call_count, 10, NULL);
- T_QUIET; T_EXPECT_EQ(trace, expected_traces[capture_trace_call_count], "capture_trace(trace)");
- capture_trace_call_count++;
+ assert(expected_trace_index < 10);
+ T_QUIET; T_EXPECT_EQ(trace, expected_traces[expected_trace_index], "capture_trace(trace)");
+ expected_trace_index++;
}
#define PGUARD_MOCK_PAGE_ACCESS