Skip to content

Commit cd7e30e

Browse files
committed
8271242: Add Arena regression tests
Reviewed-by: mseledtsov, coleenp
1 parent 5b3c418 commit cd7e30e

File tree

4 files changed

+465
-0
lines changed

4 files changed

+465
-0
lines changed

test/hotspot/gtest/memory/test_arena.cpp

Lines changed: 298 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,307 @@
2525

2626
#include "precompiled.hpp"
2727
#include "memory/arena.hpp"
28+
#include "runtime/os.hpp"
2829
#include "utilities/align.hpp"
2930
#include "utilities/globalDefinitions.hpp"
3031
#include "unittest.hpp"
32+
#include "testutils.hpp"
33+
34+
#define ASSERT_CONTAINS(ar, p) ASSERT_TRUE(ar.contains(p))
35+
36+
// Note:
37+
// - Amalloc returns 64bit aligned pointer (also on 32-bit)
38+
// - AmallocWords returns word-aligned pointer
39+
#define ASSERT_ALIGN_AMALLOC(p) ASSERT_ALIGN(p, ARENA_AMALLOC_ALIGNMENT)
40+
#define ASSERT_ALIGN_AMALLOCWORDS(p) ASSERT_ALIGN(p, BytesPerWords)
41+
42+
// Do a couple of checks on the return of a successful Amalloc
43+
#define ASSERT_AMALLOC(ar, p) \
44+
ASSERT_NOT_NULL(p); \
45+
ASSERT_CONTAINS(ar, p); \
46+
ASSERT_ALIGN_AMALLOC(p);
47+
48+
// #define LOG(s) tty->print_cr s;
49+
#define LOG(s)
50+
51+
// Test behavior for Amalloc(0).
52+
// Amalloc just ignores Amalloc(0) and returns the current hwm without increasing it.
53+
// Therefore, the returned pointer should be not null, aligned, but not (!) contained
54+
// in the arena since hwm points beyond the arena.
55+
TEST_VM(Arena, alloc_size_0) {
56+
Arena ar(mtTest);
57+
void* p = ar.Amalloc(0);
58+
ASSERT_NOT_NULL(p);
59+
ASSERT_ALIGN_AMALLOC(p);
60+
if (!UseMallocOnly) {
61+
// contains works differently for malloced mode (and there its broken anyway)
62+
ASSERT_FALSE(ar.contains(p));
63+
}
64+
// Allocate again. The new allocations should have the same position as the 0-sized
65+
// first one.
66+
if (!UseMallocOnly) {
67+
void* p2 = ar.Amalloc(1);
68+
ASSERT_AMALLOC(ar, p2);
69+
ASSERT_EQ(p2, p);
70+
}
71+
}
72+
73+
// Test behavior for Arealloc(p, 0)
74+
TEST_VM(Arena, realloc_size_0) {
75+
// Arealloc(p, 0) behaves like Afree(p). It should release the memory
76+
// and, if top position, roll back the hwm.
77+
Arena ar(mtTest);
78+
void* p1 = ar.Amalloc(0x10);
79+
ASSERT_AMALLOC(ar, p1);
80+
void* p2 = ar.Arealloc(p1, 0x10, 0);
81+
ASSERT_NULL(p2);
82+
83+
// a subsequent allocation should get the same pointer
84+
if (!UseMallocOnly) {
85+
void* p3 = ar.Amalloc(0x20);
86+
ASSERT_EQ(p3, p1);
87+
}
88+
}
89+
90+
// Realloc equal sizes is a noop
91+
TEST_VM(Arena, realloc_same_size) {
92+
Arena ar(mtTest);
93+
void* p1 = ar.Amalloc(0x200);
94+
ASSERT_AMALLOC(ar, p1);
95+
GtestUtils::mark_range(p1, 0x200);
96+
97+
void* p2 = ar.Arealloc(p1, 0x200, 0x200);
98+
99+
if (!UseMallocOnly) {
100+
ASSERT_EQ(p2, p1);
101+
}
102+
ASSERT_RANGE_IS_MARKED(p2, 0x200);
103+
}
104+
105+
// Test behavior for Afree(NULL) and Arealloc(NULL, x)
106+
TEST_VM(Arena, free_null) {
107+
Arena ar(mtTest);
108+
ar.Afree(NULL, 10); // should just be ignored
109+
}
110+
111+
TEST_VM(Arena, realloc_null) {
112+
Arena ar(mtTest);
113+
void* p = ar.Arealloc(NULL, 0, 20); // equivalent to Amalloc(20)
114+
ASSERT_AMALLOC(ar, p);
115+
}
116+
117+
// Check Arena.Afree in a non-top position.
118+
// The free'd allocation should be zapped (debug only),
119+
// surrounding blocks should be unaffected.
120+
TEST_VM(Arena, free_nontop) {
121+
Arena ar(mtTest);
122+
123+
void* p_before = ar.Amalloc(0x10);
124+
ASSERT_AMALLOC(ar, p_before);
125+
GtestUtils::mark_range(p_before, 0x10);
126+
127+
void* p = ar.Amalloc(0x10);
128+
ASSERT_AMALLOC(ar, p);
129+
GtestUtils::mark_range_with(p, 0x10, 'Z');
130+
131+
void* p_after = ar.Amalloc(0x10);
132+
ASSERT_AMALLOC(ar, p_after);
133+
GtestUtils::mark_range(p_after, 0x10);
134+
135+
ASSERT_RANGE_IS_MARKED(p_before, 0x10);
136+
ASSERT_RANGE_IS_MARKED_WITH(p, 0x10, 'Z');
137+
ASSERT_RANGE_IS_MARKED(p_after, 0x10);
138+
139+
ar.Afree(p, 0x10);
140+
141+
ASSERT_RANGE_IS_MARKED(p_before, 0x10);
142+
DEBUG_ONLY(ASSERT_RANGE_IS_MARKED_WITH(p, 0x10, badResourceValue);)
143+
ASSERT_RANGE_IS_MARKED(p_after, 0x10);
144+
}
145+
146+
// Check Arena.Afree in a top position.
147+
// The free'd allocation (non-top) should be zapped (debug only),
148+
// the hwm should have been rolled back.
149+
TEST_VM(Arena, free_top) {
150+
Arena ar(mtTest);
151+
152+
void* p = ar.Amalloc(0x10);
153+
ASSERT_AMALLOC(ar, p);
154+
GtestUtils::mark_range_with(p, 0x10, 'Z');
155+
156+
ar.Afree(p, 0x10);
157+
DEBUG_ONLY(ASSERT_RANGE_IS_MARKED_WITH(p, 0x10, badResourceValue);)
158+
159+
// a subsequent allocation should get the same pointer
160+
if (!UseMallocOnly) {
161+
void* p2 = ar.Amalloc(0x20);
162+
ASSERT_EQ(p2, p);
163+
}
164+
}
165+
166+
// In-place shrinking.
167+
TEST_VM(Arena, realloc_top_shrink) {
168+
if (!UseMallocOnly) {
169+
Arena ar(mtTest);
170+
171+
void* p1 = ar.Amalloc(0x200);
172+
ASSERT_AMALLOC(ar, p1);
173+
GtestUtils::mark_range(p1, 0x200);
174+
175+
void* p2 = ar.Arealloc(p1, 0x200, 0x100);
176+
ASSERT_EQ(p1, p2);
177+
ASSERT_RANGE_IS_MARKED(p2, 0x100); // realloc should preserve old content
178+
179+
// A subsequent allocation should be placed right after the end of the first, shrunk, allocation
180+
void* p3 = ar.Amalloc(1);
181+
ASSERT_EQ(p3, ((char*)p1) + 0x100);
182+
}
183+
}
184+
185+
// not-in-place shrinking.
186+
TEST_VM(Arena, realloc_nontop_shrink) {
187+
Arena ar(mtTest);
188+
189+
void* p1 = ar.Amalloc(200);
190+
ASSERT_AMALLOC(ar, p1);
191+
GtestUtils::mark_range(p1, 200);
192+
193+
void* p_other = ar.Amalloc(20); // new top, p1 not top anymore
194+
195+
void* p2 = ar.Arealloc(p1, 200, 100);
196+
if (!UseMallocOnly) {
197+
ASSERT_EQ(p1, p2); // should still shrink in place
198+
}
199+
ASSERT_RANGE_IS_MARKED(p2, 100); // realloc should preserve old content
200+
}
201+
202+
// in-place growing.
203+
TEST_VM(Arena, realloc_top_grow) {
204+
Arena ar(mtTest); // initial chunk size large enough to ensure below allocation grows in-place.
205+
206+
void* p1 = ar.Amalloc(0x10);
207+
ASSERT_AMALLOC(ar, p1);
208+
GtestUtils::mark_range(p1, 0x10);
209+
210+
void* p2 = ar.Arealloc(p1, 0x10, 0x20);
211+
if (!UseMallocOnly) {
212+
ASSERT_EQ(p1, p2);
213+
}
214+
ASSERT_RANGE_IS_MARKED(p2, 0x10); // realloc should preserve old content
215+
}
216+
217+
// not-in-place growing.
218+
TEST_VM(Arena, realloc_nontop_grow) {
219+
Arena ar(mtTest);
220+
221+
void* p1 = ar.Amalloc(10);
222+
ASSERT_AMALLOC(ar, p1);
223+
GtestUtils::mark_range(p1, 10);
224+
225+
void* p_other = ar.Amalloc(20); // new top, p1 not top anymore
226+
227+
void* p2 = ar.Arealloc(p1, 10, 20);
228+
ASSERT_AMALLOC(ar, p2);
229+
ASSERT_RANGE_IS_MARKED(p2, 10); // realloc should preserve old content
230+
}
231+
232+
// -------- random alloc test -------------
233+
234+
static uint8_t canary(int i) {
235+
return (uint8_t)('A' + i % 26);
236+
}
237+
238+
// Randomly allocate and reallocate with random sizes and differing alignments;
239+
// check alignment; check for overwriters.
240+
// We do this a number of times, to give chunk pool handling a good workout too.
241+
TEST_VM(Arena, random_allocs) {
242+
243+
const int num_allocs = 250 * 1000;
244+
const int avg_alloc_size = 64;
245+
246+
void** ptrs = NEW_C_HEAP_ARRAY(void*, num_allocs, mtTest);
247+
size_t* sizes = NEW_C_HEAP_ARRAY(size_t, num_allocs, mtTest);
248+
size_t* alignments = NEW_C_HEAP_ARRAY(size_t, num_allocs, mtTest);
249+
250+
Arena ar(mtTest);
251+
252+
// Allocate
253+
for (int i = 0; i < num_allocs; i ++) {
254+
size_t size = os::random() % (avg_alloc_size * 2); // Note: size==0 is okay; we want to test that too
255+
size_t alignment = 0;
256+
void* p = NULL;
257+
if (os::random() % 2) { // randomly switch between Amalloc and AmallocWords
258+
p = ar.Amalloc(size);
259+
alignment = BytesPerLong;
260+
} else {
261+
// Inconsistency: AmallocWords wants its input size word aligned, whereas Amalloc takes
262+
// care of alignment itself. We may want to clean this up, but for now just go with it.
263+
size = align_up(size, BytesPerWord);
264+
p = ar.AmallocWords(size);
265+
alignment = BytesPerWord;
266+
}
267+
LOG(("[%d]: " PTR_FORMAT ", size " SIZE_FORMAT ", aligned " SIZE_FORMAT,
268+
i, p2i(p), size, alignment));
269+
ASSERT_NOT_NULL(p);
270+
ASSERT_ALIGN(p, alignment);
271+
if (size > 0) {
272+
ASSERT_CONTAINS(ar, p);
273+
}
274+
GtestUtils::mark_range_with(p, size, canary(i));
275+
ptrs[i] = p; sizes[i] = size; alignments[i] = alignment;
276+
}
277+
278+
// Check pattern in allocations for overwriters.
279+
for (int i = 0; i < num_allocs; i ++) {
280+
ASSERT_RANGE_IS_MARKED_WITH(ptrs[i], sizes[i], canary(i));
281+
}
282+
283+
// realloc all of them
284+
for (int i = 0; i < num_allocs; i ++) {
285+
size_t new_size = os::random() % (avg_alloc_size * 2); // Note: 0 is possible and should work
286+
void* p2 = ar.Arealloc(ptrs[i], sizes[i], new_size);
287+
if (new_size > 0) {
288+
ASSERT_NOT_NULL(p2);
289+
ASSERT_CONTAINS(ar, p2);
290+
ASSERT_ALIGN(p2, alignments[i]); // Realloc guarantees at least the original alignment
291+
ASSERT_RANGE_IS_MARKED_WITH(p2, MIN2(sizes[i], new_size), canary(i)); // old content should have been preserved
292+
293+
GtestUtils::mark_range_with(p2, new_size, canary(i)); // mark new range with canary
294+
} else {
295+
ASSERT_NULL(p2);
296+
}
297+
ptrs[i] = p2; sizes[i] = new_size;
298+
LOG(("[%d]: realloc " PTR_FORMAT ", size " SIZE_FORMAT ", aligned " SIZE_FORMAT,
299+
i, p2i(p2), new_size, alignments[i]));
300+
}
301+
302+
// Check test pattern again
303+
// Note that we don't check the gap pattern anymore since if allocations had been shrunk in place
304+
// this now gets difficult.
305+
for (int i = 0; i < num_allocs; i ++) {
306+
ASSERT_RANGE_IS_MARKED_WITH(ptrs[i], sizes[i], canary(i));
307+
}
308+
309+
// Randomly free a bunch of allocations.
310+
for (int i = 0; i < num_allocs; i ++) {
311+
if (os::random() % 10 == 0) {
312+
ar.Afree(ptrs[i], sizes[i]);
313+
// In debug builds the freed space should be filled the space with badResourceValue
314+
DEBUG_ONLY(ASSERT_RANGE_IS_MARKED_WITH(ptrs[i], sizes[i], badResourceValue));
315+
ptrs[i] = NULL;
316+
}
317+
}
318+
319+
// Check test pattern again
320+
for (int i = 0; i < num_allocs; i ++) {
321+
ASSERT_RANGE_IS_MARKED_WITH(ptrs[i], sizes[i], canary(i));
322+
}
323+
324+
// Free temp data
325+
FREE_C_HEAP_ARRAY(char*, ptrs);
326+
FREE_C_HEAP_ARRAY(size_t, sizes);
327+
FREE_C_HEAP_ARRAY(size_t, alignments);
328+
}
31329

32330
#ifndef LP64
33331
// These tests below are about alignment issues when mixing Amalloc and AmallocWords.

test/hotspot/gtest/testutils.cpp

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
/*
2+
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
3+
* Copyright (c) 2021 SAP SE. All rights reserved.
4+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5+
*
6+
* This code is free software; you can redistribute it and/or modify it
7+
* under the terms of the GNU General Public License version 2 only, as
8+
* published by the Free Software Foundation.
9+
*
10+
* This code is distributed in the hope that it will be useful, but WITHOUT
11+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13+
* version 2 for more details (a copy is included in the LICENSE file that
14+
* accompanied this code).
15+
*
16+
* You should have received a copy of the GNU General Public License version
17+
* 2 along with this work; if not, write to the Free Software Foundation,
18+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19+
*
20+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21+
* or visit www.oracle.com if you need additional information or have any
22+
* questions.
23+
*/
24+
25+
#include "precompiled.hpp"
26+
#include "runtime/os.hpp"
27+
#include "utilities/align.hpp"
28+
#include "utilities/ostream.hpp"
29+
30+
#include "testutils.hpp"
31+
#include "unittest.hpp"
32+
33+
#include <string.h>
34+
35+
// Note: these could be made more suitable for covering large ranges (e.g. just mark one byte per page).
36+
37+
void GtestUtils::mark_range_with(void* p, size_t s, uint8_t mark) {
38+
if (p != NULL && s > 0) {
39+
::memset(p, mark, s);
40+
}
41+
}
42+
43+
bool GtestUtils::check_range(const void* p, size_t s, uint8_t expected) {
44+
if (p == NULL || s == 0) {
45+
return true;
46+
}
47+
48+
const char* first_wrong = NULL;
49+
char* p2 = (char*)p;
50+
const char* const end = p2 + s;
51+
while (p2 < end) {
52+
if (*p2 != (char)expected) {
53+
first_wrong = p2;
54+
break;
55+
}
56+
p2 ++;
57+
}
58+
59+
if (first_wrong != NULL) {
60+
tty->print_cr("wrong pattern around " PTR_FORMAT, p2i(first_wrong));
61+
// Note: We deliberately print the surroundings too without bounds check. Might be interesting,
62+
// and os::print_hex_dump uses SafeFetch, so this is fine without bounds checks.
63+
os::print_hex_dump(tty, (address)(align_down(p2, 0x10) - 0x10),
64+
(address)(align_up(end, 0x10) + 0x10), 1);
65+
}
66+
67+
return first_wrong == NULL;
68+
}

0 commit comments

Comments
 (0)