Skip to content

Commit e582bc1

Browse files
Andy Rossandrewboie
authored andcommitted
tests/kernel/mem_pool: Adjust tests to work with k_heap backend
The original k_mem_pool tests were a mix of code that tests routine allocator behavior, the synchronization layer above that, and a significant amount of code that made low-level assumptions about the specific memory layout of the original allocator, which doesn't run out of memory in exactly the same way. Adjust the expectations as needed for the backend. A few test cases were skipped if they were too specific. Most have been generalized (for example, iteratively allocating to use up all memory instead of assuming that it will be empty after N allocations). Signed-off-by: Andy Ross <[email protected]>
1 parent 8f0959c commit e582bc1

File tree

6 files changed

+125
-11
lines changed

6 files changed

+125
-11
lines changed

tests/kernel/mem_pool/mem_pool/src/main.c

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,21 @@
2727

2828
#define NUM_BLOCKS 64
2929

30+
/* This test is written to a set of known allocation patterns and
31+
* their results, making free assumptions about the fragmentation
32+
* behavior of the original mem_pool implementation. The newer k_heap
33+
* backend is more flexible, and also has allocation behavior that
34+
* depends on words size. So we keep separate tables for different
35+
* configs.
36+
*/
37+
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
38+
# ifdef CONFIG_64BIT
39+
# define HEAP64
40+
# else
41+
# define HEAP32
42+
# endif
43+
#endif
44+
3045
/* size of stack area used by each thread */
3146
#define STACKSIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
3247

@@ -63,24 +78,44 @@ static struct TEST_CASE get_set[] = {
6378
{ &block_list[5], &POOL_ID, 256, 0, 0 },
6479
{ &block_list[6], &POOL_ID, 512, 0, 0 },
6580
{ &block_list[7], &POOL_ID, 1024, 0, 0 },
81+
#if defined(HEAP32) || defined(HEAP64)
82+
{ &block_list[8], &POOL_ID, 2048, 0, 0 },
83+
#else
6684
{ &block_list[8], &POOL_ID, 2048, 0, -ENOMEM },
85+
#endif
6786
{ &block_list[9], &POOL_ID, 4096, 0, -ENOMEM }
6887
};
6988

7089
static struct TEST_CASE get_set2[] = {
7190
{ &block_list[0], &POOL_ID, 4096, 0, 0 },
7291
{ &block_list[1], &POOL_ID, 2048, 0, -ENOMEM },
7392
{ &block_list[2], &POOL_ID, 1024, 0, -ENOMEM },
93+
#if defined(HEAP32)
94+
{ &block_list[3], &POOL_ID, 512, 0, -ENOMEM },
95+
{ &block_list[4], &POOL_ID, 256, 0, 0 }
96+
#elif defined(HEAP64)
97+
{ &block_list[3], &POOL_ID, 512, 0, 0 },
98+
{ &block_list[4], &POOL_ID, 256, 0, -ENOMEM }
99+
#else
74100
{ &block_list[3], &POOL_ID, 512, 0, -ENOMEM },
75101
{ &block_list[4], &POOL_ID, 256, 0, -ENOMEM }
102+
#endif
76103
};
77104

78105
static struct TEST_CASE getwt_set[] = {
79106
{ &block_list[0], &POOL_ID, 4096, TENTH_SECOND, 0 },
80107
{ &block_list[1], &POOL_ID, 2048, TENTH_SECOND, -EAGAIN },
81108
{ &block_list[2], &POOL_ID, 1024, TENTH_SECOND, -EAGAIN },
109+
#if defined(HEAP32)
82110
{ &block_list[3], &POOL_ID, 512, TENTH_SECOND, -EAGAIN },
111+
{ &block_list[4], &POOL_ID, 256, TENTH_SECOND, 0 }
112+
#elif defined(HEAP64)
113+
{ &block_list[3], &POOL_ID, 512, TENTH_SECOND, 0 },
83114
{ &block_list[4], &POOL_ID, 256, TENTH_SECOND, -EAGAIN }
115+
#else
116+
{ &block_list[3], &POOL_ID, 512, TENTH_SECOND, -EAGAIN },
117+
{ &block_list[4], &POOL_ID, 256, TENTH_SECOND, -EAGAIN }
118+
#endif
84119
};
85120

86121

@@ -282,6 +317,11 @@ void alternate_task(void)
282317
* amount of usable space, due to the hidden block descriptor info the
283318
* kernel adds at the start of any block allocated from this memory pool.)
284319
*
320+
* NOTE: when CONFIG_MEM_POOL_HEAP_BACKEND is in use, the splitting
321+
* algorithm being exercised by this test is not used. In fact the
322+
* k_heap backend is significantly more fragmentation resistant, so
323+
* calls expected to fail here actually succeed. These are disabled
324+
* here.
285325
*
286326
* @see k_malloc(), k_free()
287327
*/
@@ -294,9 +334,11 @@ static void test_pool_malloc(void)
294334
block[0] = k_malloc(150);
295335
zassert_not_null(block[0], "150 byte allocation failed");
296336

337+
#ifndef CONFIG_MEM_POOL_HEAP_BACKEND
297338
/* ensure a small block can no longer be allocated */
298339
block[1] = k_malloc(16);
299340
zassert_is_null(block[1], "16 byte allocation did not fail");
341+
#endif
300342

301343
/* return the large block */
302344
k_free(block[0]);
@@ -305,18 +347,22 @@ static void test_pool_malloc(void)
305347
block[0] = k_malloc(16);
306348
zassert_not_null(block[0], "16 byte allocation 0 failed");
307349

350+
#ifndef CONFIG_MEM_POOL_HEAP_BACKEND
308351
/* ensure a large block can no longer be allocated */
309352
block[1] = k_malloc(80);
310353
zassert_is_null(block[1], "80 byte allocation did not fail");
354+
#endif
311355

312356
/* ensure all remaining small blocks can be allocated */
313357
for (j = 1; j < 4; j++) {
314358
block[j] = k_malloc(16);
315359
zassert_not_null(block[j], "16 byte allocation %d failed\n", j);
316360
}
317361

362+
#ifndef CONFIG_MEM_POOL_HEAP_BACKEND
318363
/* ensure a small block can no longer be allocated */
319364
zassert_is_null(k_malloc(8), "8 byte allocation did not fail");
365+
#endif
320366

321367
/* return the small blocks to pool in a "random" order */
322368
k_free(block[2]);
@@ -328,8 +374,11 @@ static void test_pool_malloc(void)
328374
block[0] = k_malloc(100);
329375
zassert_not_null(block[0], "100 byte allocation failed");
330376

377+
378+
#ifndef CONFIG_MEM_POOL_HEAP_BACKEND
331379
/* ensure a small block can no longer be allocated */
332380
zassert_is_null(k_malloc(32), "32 byte allocation did not fail");
381+
#endif
333382

334383
/* ensure overflow detection is working */
335384
zassert_is_null(k_malloc(0xffffffff), "overflow check failed");

tests/kernel/mem_pool/mem_pool_api/src/test_mpool_api.c

Lines changed: 29 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,14 @@ void test_mpool_alloc_size(void)
102102
size_t size = BLK_SIZE_MAX;
103103
int i = 0;
104104

105+
/* The sys_heap backend doesn't use the specific block
106+
* breaking algorithm tested here. This is a test of the
107+
* legacy sys_mem_pool allocator only.
108+
*/
109+
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
110+
ztest_test_skip();
111+
}
112+
105113
/**TESTPOINT: The memory pool allows blocks to be repeatedly partitioned
106114
* into quarters, down to blocks of @a min_size bytes long.
107115
*/
@@ -144,14 +152,29 @@ void test_mpool_alloc_size(void)
144152
*/
145153
void test_mpool_alloc_timeout(void)
146154
{
147-
static struct k_mem_block block[BLK_NUM_MIN], fblock;
155+
static struct k_mem_block block[2 * BLK_NUM_MIN], fblock;
148156
s64_t tms;
149-
150-
for (int i = 0; i < BLK_NUM_MIN; i++) {
151-
zassert_equal(k_mem_pool_alloc(&kmpool, &block[i], BLK_SIZE_MIN,
152-
K_NO_WAIT), 0, NULL);
157+
int nb;
158+
159+
/* allocate all blocks */
160+
for (nb = 0; nb < ARRAY_SIZE(block); nb++) {
161+
if (k_mem_pool_alloc(&kmpool, &block[nb], BLK_SIZE_MIN,
162+
K_NO_WAIT) != 0) {
163+
break;
164+
}
153165
}
154166

167+
/* The original mem_pool would always be able to allocate
168+
* exactly "min blocks" before running out of space, the
169+
* heuristics used to size the sys_heap backend are more
170+
* flexible.
171+
*/
172+
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
173+
zassert_true(nb >= BLK_NUM_MIN, NULL);
174+
#else
175+
zassert_true(nb == BLK_NUM_MIN, NULL);
176+
#endif
177+
155178
/** TESTPOINT: Use K_NO_WAIT to return without waiting*/
156179
/** TESTPOINT: @retval -ENOMEM Returned without waiting*/
157180
zassert_equal(k_mem_pool_alloc(&kmpool, &fblock, BLK_SIZE_MIN,
@@ -166,7 +189,7 @@ void test_mpool_alloc_timeout(void)
166189
*/
167190
zassert_true(k_uptime_delta(&tms) >= TIMEOUT_MS, NULL);
168191

169-
for (int i = 0; i < BLK_NUM_MIN; i++) {
192+
for (int i = 0; i < nb; i++) {
170193
k_mem_pool_free(&block[i]);
171194
block[i].data = NULL;
172195
}

tests/kernel/mem_pool/mem_pool_concept/src/test_mpool_alloc_size.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,14 @@ void test_mpool_alloc_size_roundup(void)
2525
{
2626
struct k_mem_block block[BLK_NUM_MAX], block_fail;
2727

28+
/* This test is written to assume specific heap layout, in
29+
* fact the sys_heap backend can routinely see more than "min
30+
* blocks" allocated.
31+
*/
32+
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
33+
ztest_test_skip();
34+
}
35+
2836
/**
2937
* TESTPOINT: When an application issues a request for a memory block,
3038
* the memory pool first determines the size of the smallest block that

tests/kernel/mem_pool/mem_pool_concept/src/test_mpool_alloc_wait.c

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -49,16 +49,32 @@ void tmpool_alloc_wait_ok(void *p1, void *p2, void *p3)
4949
*/
5050
void test_mpool_alloc_wait_prio(void)
5151
{
52-
struct k_mem_block block[BLK_NUM_MIN];
52+
struct k_mem_block block[2 * BLK_NUM_MIN];
5353
k_tid_t tid[THREAD_NUM];
54+
int nb;
5455

5556
k_sem_init(&sync_sema, 0, THREAD_NUM);
57+
5658
/*allocated up all blocks*/
57-
for (int i = 0; i < BLK_NUM_MIN; i++) {
58-
zassert_true(k_mem_pool_alloc(&mpool1, &block[i], BLK_SIZE_MIN,
59-
K_NO_WAIT) == 0, NULL);
59+
for (nb = 0; nb < ARRAY_SIZE(block); nb++) {
60+
if (k_mem_pool_alloc(&mpool1, &block[nb], BLK_SIZE_MIN,
61+
K_NO_WAIT) != 0) {
62+
break;
63+
}
6064
}
6165

66+
/* The original mem_pool would always be able to allocate
67+
* exactly "min blocks" before running out of space, the
68+
* heuristics used to size the sys_heap backend are more
69+
* flexible.
70+
*/
71+
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
72+
zassert_true(nb >= BLK_NUM_MIN, "nb %d want %d", nb, BLK_NUM_MIN);
73+
#else
74+
zassert_true(nb == BLK_NUM_MIN, NULL);
75+
#endif
76+
77+
6278
/**
6379
* TESTPOINT: when a suitable memory block becomes available, it is
6480
* given to the highest-priority thread that has waited the longest
@@ -93,7 +109,7 @@ void test_mpool_alloc_wait_prio(void)
93109
k_thread_abort(tid[i]);
94110
}
95111
k_mem_pool_free(&block_ok);
96-
for (int i = 1; i < BLK_NUM_MIN; i++) {
112+
for (int i = 1; i < nb; i++) {
97113
k_mem_pool_free(&block[i]);
98114
}
99115
}

tests/kernel/mem_pool/mem_pool_concept/src/test_mpool_merge_fail_diff_parent.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,15 @@ extern struct k_mem_pool mpool1;
2525
*/
2626
void test_mpool_alloc_merge_failed_diff_parent(void)
2727
{
28+
/* The heap backend doesn't use the splitting mechanism tested
29+
* here, and in fact is significantly more fragmentation
30+
* resistant and succeeds at the "failed" allocation desired
31+
* below.
32+
*/
33+
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
34+
ztest_test_skip();
35+
}
36+
2837
struct k_mem_block block[BLK_NUM_MIN], block_fail;
2938

3039
for (int i = 0; i < BLK_NUM_MIN; i++) {

tests/kernel/mem_pool/mem_pool_concept/src/test_mpool_merge_fail_diff_size.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,15 @@ K_MEM_POOL_DEFINE(mpool3, BLK_SIZE_MIN, BLK_SIZE_MAX, BLK_NUM_MAX, BLK_ALIGN);
3030
*/
3131
void test_mpool_alloc_merge_failed_diff_size(void)
3232
{
33+
/* The heap backend doesn't use the splitting mechanism tested
34+
* here, and in fact is significantly more fragmentation
35+
* resistant and succeeds at the "failed" allocation desired
36+
* below.
37+
*/
38+
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
39+
ztest_test_skip();
40+
}
41+
3342
struct k_mem_block block[BLK_NUM_MIN], block_fail;
3443
size_t block_size[] = {
3544
BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN,

0 commit comments

Comments
 (0)