Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions include/mempool_heap.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,18 +35,18 @@ struct k_mem_pool {
* that k_heap does not. We make space for the number of maximum
* objects defined, and include extra so there's enough metadata space
* available for the maximum number of minimum-sized objects to be
* stored: 8 bytes for each desired chunk header, and a 24 word block
* to reserve room for a "typical" set of bucket list heads (this size
* was picked more to conform with existing test expectations than any
* rigorous theory -- we have tests that rely on being able to
* allocate the blocks promised and ones that make assumptions about
* stored: 8 bytes for each desired chunk header, and a 15 word block
* to reserve room for a "typical" set of bucket list heads and the heap
* footer(this size was picked more to conform with existing test
* expectations than any rigorous theory -- we have tests that rely on being
* able to allocate the blocks promised and ones that make assumptions about
* when memory will run out).
*/
#define Z_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
K_HEAP_DEFINE(poolheap_##name, \
((maxsz) * (nmax)) \
+ 8 * ((maxsz) * (nmax) / (minsz)) \
+ 24 * sizeof(void *)); \
+ 15 * sizeof(void *)); \
struct k_mem_pool name = { \
.heap = &poolheap_##name \
}
Expand Down
63 changes: 24 additions & 39 deletions lib/os/heap-validate.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,24 +19,25 @@

static size_t max_chunkid(struct z_heap *h)
{
return h->len - bytes_to_chunksz(h, 1);
return h->len - min_chunk_size(h);
}

static bool in_bounds(struct z_heap *h, chunkid_t c)
{
return (c >= h->chunk0)
return (c >= right_chunk(h, 0))
&& (c <= max_chunkid(h))
&& (size(h, c) < h->len);
&& (chunk_size(h, c) < h->len);
}

static bool valid_chunk(struct z_heap *h, chunkid_t c)
{
return (size(h, c) > 0
&& (c + size(h, c) <= h->len)
return (chunk_size(h, c) > 0
&& (c + chunk_size(h, c) <= h->len)
&& in_bounds(h, c)
&& ((c == h->chunk0) || in_bounds(h, c - left_size(h, c)))
&& (used(h, c) || in_bounds(h, free_prev(h, c)))
&& (used(h, c) || in_bounds(h, free_next(h, c))));
&& (right_chunk(h, left_chunk(h, c)) == c)
&& (left_chunk(h, right_chunk(h, c)) == c)
&& (chunk_used(h, c) || in_bounds(h, prev_free_chunk(h, c)))
&& (chunk_used(h, c) || in_bounds(h, next_free_chunk(h, c))));
}

/* Validate multiple state dimensions for the bucket "next" pointer
Expand All @@ -49,23 +50,14 @@ static inline void check_nexts(struct z_heap *h, int bidx)

bool emptybit = (h->avail_buckets & (1 << bidx)) == 0;
bool emptylist = b->next == 0;
bool emptycount = b->list_size == 0;
bool empties_match = emptybit == emptylist && emptybit == emptycount;
bool empties_match = emptybit == emptylist;

(void)empties_match;
CHECK(empties_match);

if (b->next != 0) {
CHECK(valid_chunk(h, b->next));
}

if (b->list_size == 2) {
CHECK(free_next(h, b->next) == free_prev(h, b->next));
CHECK(free_next(h, b->next) != b->next);
} else if (b->list_size == 1) {
CHECK(free_next(h, b->next) == free_prev(h, b->next));
CHECK(free_next(h, b->next) == b->next);
}
}

bool sys_heap_validate(struct sys_heap *heap)
Expand All @@ -84,11 +76,11 @@ bool sys_heap_validate(struct sys_heap *heap)
check_nexts(h, b);

for (c = c0; c != 0 && (n == 0 || c != c0);
n++, c = free_next(h, c)) {
n++, c = next_free_chunk(h, c)) {
if (!valid_chunk(h, c)) {
return false;
}
chunk_set_used(h, c, true);
set_chunk_used(h, c, true);
}

bool empty = (h->avail_buckets & (1 << b)) == 0;
Expand All @@ -101,35 +93,28 @@ bool sys_heap_validate(struct sys_heap *heap)
if (empty && h->buckets[b].next != 0) {
return false;
}

if (n != h->buckets[b].list_size) {
return false;
}
}

/* Walk through the chunks linearly, verifying sizes and end
* pointer and that the all chunks are now USED (i.e. all free
* blocks were found during enumeration). Mark all blocks
* UNUSED
*/
size_t prev_size = 0;
chunkid_t prev_chunk = 0;

for (c = h->chunk0; c <= max_chunkid(h); c = right_chunk(h, c)) {
for (c = right_chunk(h, 0); c <= max_chunkid(h); c = right_chunk(h, c)) {
if (!valid_chunk(h, c)) {
return false;
}
if (!used(h, c)) {
if (!chunk_used(h, c)) {
return false;
}

if (c != h->chunk0) {
if (left_size(h, c) != prev_size) {
return false;
}
if (left_chunk(h, c) != prev_chunk) {
return false;
}
prev_size = size(h, c);
prev_chunk = c;

chunk_set_used(h, c, false);
set_chunk_used(h, c, false);
}
if (c != h->len) {
return false; /* Should have exactly consumed the buffer */
Expand All @@ -147,19 +132,19 @@ bool sys_heap_validate(struct sys_heap *heap)
continue;
}

for (c = c0; n == 0 || c != c0; n++, c = free_next(h, c)) {
if (used(h, c)) {
for (c = c0; n == 0 || c != c0; n++, c = next_free_chunk(h, c)) {
if (chunk_used(h, c)) {
return false;
}
chunk_set_used(h, c, true);
set_chunk_used(h, c, true);
}
}

/* Now we are valid, but have managed to invert all the in-use
* fields. One more linear pass to fix them up
*/
for (c = h->chunk0; c <= max_chunkid(h); c = right_chunk(h, c)) {
chunk_set_used(h, c, !used(h, c));
for (c = right_chunk(h, 0); c <= max_chunkid(h); c = right_chunk(h, c)) {
set_chunk_used(h, c, !chunk_used(h, c));
}
return true;
}
Expand Down
Loading