1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
|
/*
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021 SAP SE. All rights reserved.
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "memory/arena.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/globalDefinitions.hpp"
#include "unittest.hpp"
#include "testutils.hpp"
#define ASSERT_CONTAINS(ar, p) ASSERT_TRUE(ar.contains(p))
// Note:
// - Amalloc returns 64bit aligned pointer (also on 32-bit)
// - AmallocWords returns word-aligned pointer
#define ASSERT_ALIGN_AMALLOC(p) ASSERT_ALIGN(p, ARENA_AMALLOC_ALIGNMENT)
#define ASSERT_ALIGN_AMALLOCWORDS(p) ASSERT_ALIGN(p, BytesPerWords)
// Do a couple of checks on the return of a successful Amalloc
#define ASSERT_AMALLOC(ar, p) \
ASSERT_NOT_NULL(p); \
ASSERT_CONTAINS(ar, p); \
ASSERT_ALIGN_AMALLOC(p);
// #define LOG(s) tty->print_cr s;
#define LOG(s)
// Test behavior for Amalloc(0).
// Amalloc just ignores Amalloc(0) and returns the current hwm without increasing it.
// Therefore, the returned pointer should be not null, aligned, but not (!) contained
// in the arena since hwm points beyond the arena.
TEST_VM(Arena, alloc_size_0) {
Arena ar(mtTest);
void* p = ar.Amalloc(0);
ASSERT_NOT_NULL(p);
ASSERT_ALIGN_AMALLOC(p);
ASSERT_FALSE(ar.contains(p));
// Allocate again. The new allocations should have the same position as the 0-sized
// first one.
void* p2 = ar.Amalloc(1);
ASSERT_AMALLOC(ar, p2);
ASSERT_EQ(p2, p);
}
// Test behavior for Arealloc(p, 0)
TEST_VM(Arena, realloc_size_0) {
// Arealloc(p, 0) behaves like Afree(p). It should release the memory
// and, if top position, roll back the hwm.
Arena ar(mtTest);
void* p1 = ar.Amalloc(0x10);
ASSERT_AMALLOC(ar, p1);
void* p2 = ar.Arealloc(p1, 0x10, 0);
ASSERT_NULL(p2);
// a subsequent allocation should get the same pointer
void* p3 = ar.Amalloc(0x20);
ASSERT_EQ(p3, p1);
}
// Realloc equal sizes is a noop
TEST_VM(Arena, realloc_same_size) {
Arena ar(mtTest);
void* p1 = ar.Amalloc(0x200);
ASSERT_AMALLOC(ar, p1);
GtestUtils::mark_range(p1, 0x200);
void* p2 = ar.Arealloc(p1, 0x200, 0x200);
ASSERT_EQ(p2, p1);
ASSERT_RANGE_IS_MARKED(p2, 0x200);
}
// Test behavior for Afree(nullptr) and Arealloc(nullptr, x)
TEST_VM(Arena, free_null) {
Arena ar(mtTest);
ar.Afree(nullptr, 10); // should just be ignored
}
TEST_VM(Arena, realloc_null) {
Arena ar(mtTest);
void* p = ar.Arealloc(nullptr, 0, 20); // equivalent to Amalloc(20)
ASSERT_AMALLOC(ar, p);
}
// Check Arena.Afree in a non-top position.
// The free'd allocation should be zapped (debug only),
// surrounding blocks should be unaffected.
TEST_VM(Arena, free_nontop) {
Arena ar(mtTest);
void* p_before = ar.Amalloc(0x10);
ASSERT_AMALLOC(ar, p_before);
GtestUtils::mark_range(p_before, 0x10);
void* p = ar.Amalloc(0x10);
ASSERT_AMALLOC(ar, p);
GtestUtils::mark_range_with(p, 0x10, 'Z');
void* p_after = ar.Amalloc(0x10);
ASSERT_AMALLOC(ar, p_after);
GtestUtils::mark_range(p_after, 0x10);
ASSERT_RANGE_IS_MARKED(p_before, 0x10);
ASSERT_RANGE_IS_MARKED_WITH(p, 0x10, 'Z');
ASSERT_RANGE_IS_MARKED(p_after, 0x10);
ar.Afree(p, 0x10);
ASSERT_RANGE_IS_MARKED(p_before, 0x10);
DEBUG_ONLY(ASSERT_RANGE_IS_MARKED_WITH(p, 0x10, badResourceValue);)
ASSERT_RANGE_IS_MARKED(p_after, 0x10);
}
// Check Arena.Afree in a top position.
// The free'd allocation (non-top) should be zapped (debug only),
// the hwm should have been rolled back.
TEST_VM(Arena, free_top) {
Arena ar(mtTest);
void* p = ar.Amalloc(0x10);
ASSERT_AMALLOC(ar, p);
GtestUtils::mark_range_with(p, 0x10, 'Z');
ar.Afree(p, 0x10);
DEBUG_ONLY(ASSERT_RANGE_IS_MARKED_WITH(p, 0x10, badResourceValue);)
// a subsequent allocation should get the same pointer
void* p2 = ar.Amalloc(0x20);
ASSERT_EQ(p2, p);
}
// In-place shrinking.
TEST_VM(Arena, realloc_top_shrink) {
Arena ar(mtTest);
void* p1 = ar.Amalloc(0x200);
ASSERT_AMALLOC(ar, p1);
GtestUtils::mark_range(p1, 0x200);
void* p2 = ar.Arealloc(p1, 0x200, 0x100);
ASSERT_EQ(p1, p2);
ASSERT_RANGE_IS_MARKED(p2, 0x100); // realloc should preserve old content
// A subsequent allocation should be placed right after the end of the first, shrunk, allocation
void* p3 = ar.Amalloc(1);
ASSERT_EQ(p3, ((char*)p1) + 0x100);
}
// not-in-place shrinking.
TEST_VM(Arena, realloc_nontop_shrink) {
Arena ar(mtTest);
void* p1 = ar.Amalloc(200);
ASSERT_AMALLOC(ar, p1);
GtestUtils::mark_range(p1, 200);
void* p_other = ar.Amalloc(20); // new top, p1 not top anymore
void* p2 = ar.Arealloc(p1, 200, 100);
ASSERT_EQ(p1, p2); // should still shrink in place
ASSERT_RANGE_IS_MARKED(p2, 100); // realloc should preserve old content
}
// in-place growing.
TEST_VM(Arena, realloc_top_grow) {
Arena ar(mtTest); // initial chunk size large enough to ensure below allocation grows in-place.
void* p1 = ar.Amalloc(0x10);
ASSERT_AMALLOC(ar, p1);
GtestUtils::mark_range(p1, 0x10);
void* p2 = ar.Arealloc(p1, 0x10, 0x20);
ASSERT_EQ(p1, p2);
ASSERT_RANGE_IS_MARKED(p2, 0x10); // realloc should preserve old content
}
// not-in-place growing.
TEST_VM(Arena, realloc_nontop_grow) {
Arena ar(mtTest);
void* p1 = ar.Amalloc(10);
ASSERT_AMALLOC(ar, p1);
GtestUtils::mark_range(p1, 10);
void* p_other = ar.Amalloc(20); // new top, p1 not top anymore
void* p2 = ar.Arealloc(p1, 10, 20);
ASSERT_AMALLOC(ar, p2);
ASSERT_RANGE_IS_MARKED(p2, 10); // realloc should preserve old content
}
// -------- random alloc test -------------
static uint8_t canary(int i) {
return (uint8_t)('A' + i % 26);
}
// Randomly allocate and reallocate with random sizes and differing alignments;
// check alignment; check for overwriters.
// We do this a number of times, to give chunk pool handling a good workout too.
TEST_VM(Arena, random_allocs) {
const int num_allocs = 250 * 1000;
const int avg_alloc_size = 64;
void** ptrs = NEW_C_HEAP_ARRAY(void*, num_allocs, mtTest);
size_t* sizes = NEW_C_HEAP_ARRAY(size_t, num_allocs, mtTest);
size_t* alignments = NEW_C_HEAP_ARRAY(size_t, num_allocs, mtTest);
Arena ar(mtTest);
// Allocate
for (int i = 0; i < num_allocs; i ++) {
size_t size = os::random() % (avg_alloc_size * 2); // Note: size==0 is okay; we want to test that too
size_t alignment = 0;
void* p = nullptr;
if (os::random() % 2) { // randomly switch between Amalloc and AmallocWords
p = ar.Amalloc(size);
alignment = BytesPerLong;
} else {
// Inconsistency: AmallocWords wants its input size word aligned, whereas Amalloc takes
// care of alignment itself. We may want to clean this up, but for now just go with it.
size = align_up(size, BytesPerWord);
p = ar.AmallocWords(size);
alignment = BytesPerWord;
}
LOG(("[%d]: " PTR_FORMAT ", size " SIZE_FORMAT ", aligned " SIZE_FORMAT,
i, p2i(p), size, alignment));
ASSERT_NOT_NULL(p);
ASSERT_ALIGN(p, alignment);
if (size > 0) {
ASSERT_CONTAINS(ar, p);
}
GtestUtils::mark_range_with(p, size, canary(i));
ptrs[i] = p; sizes[i] = size; alignments[i] = alignment;
}
// Check pattern in allocations for overwriters.
for (int i = 0; i < num_allocs; i ++) {
ASSERT_RANGE_IS_MARKED_WITH(ptrs[i], sizes[i], canary(i));
}
// realloc all of them
for (int i = 0; i < num_allocs; i ++) {
size_t new_size = os::random() % (avg_alloc_size * 2); // Note: 0 is possible and should work
void* p2 = ar.Arealloc(ptrs[i], sizes[i], new_size);
if (new_size > 0) {
ASSERT_NOT_NULL(p2);
ASSERT_CONTAINS(ar, p2);
ASSERT_ALIGN(p2, alignments[i]); // Realloc guarantees at least the original alignment
ASSERT_RANGE_IS_MARKED_WITH(p2, MIN2(sizes[i], new_size), canary(i)); // old content should have been preserved
GtestUtils::mark_range_with(p2, new_size, canary(i)); // mark new range with canary
} else {
ASSERT_NULL(p2);
}
ptrs[i] = p2; sizes[i] = new_size;
LOG(("[%d]: realloc " PTR_FORMAT ", size " SIZE_FORMAT ", aligned " SIZE_FORMAT,
i, p2i(p2), new_size, alignments[i]));
}
// Check test pattern again
// Note that we don't check the gap pattern anymore since if allocations had been shrunk in place
// this now gets difficult.
for (int i = 0; i < num_allocs; i ++) {
ASSERT_RANGE_IS_MARKED_WITH(ptrs[i], sizes[i], canary(i));
}
// Randomly free a bunch of allocations.
for (int i = 0; i < num_allocs; i ++) {
if (os::random() % 10 == 0) {
ar.Afree(ptrs[i], sizes[i]);
// In debug builds the freed space should be filled the space with badResourceValue
DEBUG_ONLY(ASSERT_RANGE_IS_MARKED_WITH(ptrs[i], sizes[i], badResourceValue));
ptrs[i] = nullptr;
}
}
// Check test pattern again
for (int i = 0; i < num_allocs; i ++) {
ASSERT_RANGE_IS_MARKED_WITH(ptrs[i], sizes[i], canary(i));
}
// Free temp data
FREE_C_HEAP_ARRAY(char*, ptrs);
FREE_C_HEAP_ARRAY(size_t, sizes);
FREE_C_HEAP_ARRAY(size_t, alignments);
}
#ifndef LP64
// These tests below are about alignment issues when mixing Amalloc and AmallocWords.
// Since on 64-bit these APIs offer the same alignment, they only matter for 32-bit.
TEST_VM(Arena, mixed_alignment_allocation) {
// Test that mixed alignment allocations work and provide allocations with the correct
// alignment
Arena ar(mtTest);
void* p1 = ar.AmallocWords(BytesPerWord);
void* p2 = ar.Amalloc(BytesPerLong);
ASSERT_TRUE(is_aligned(p1, BytesPerWord));
ASSERT_TRUE(is_aligned(p2, ARENA_AMALLOC_ALIGNMENT));
}
TEST_VM(Arena, Arena_with_crooked_initial_size) {
// Test that an arena with a crooked, not 64-bit aligned initial size
// works
Arena ar(mtTest, 4097);
void* p1 = ar.AmallocWords(BytesPerWord);
void* p2 = ar.Amalloc(BytesPerLong);
ASSERT_TRUE(is_aligned(p1, BytesPerWord));
ASSERT_TRUE(is_aligned(p2, ARENA_AMALLOC_ALIGNMENT));
}
TEST_VM(Arena, Arena_grows_large_unaligned) {
// Test that if the arena grows with a large unaligned value, nothing bad happens.
// We trigger allocation of a new, large, unaligned chunk with a non-standard size
// (only possible on 32-bit when allocating with word alignment).
// Then we alloc some more. If Arena::grow() does not correctly align, on 32-bit
// something should assert at some point.
Arena ar(mtTest, 100); // first chunk is small
void* p = ar.AmallocWords(Chunk::size + BytesPerWord); // if Arena::grow() misaligns, this asserts
// some more allocations for good measure
for (int i = 0; i < 100; i ++) {
ar.Amalloc(1);
}
}
#endif // LP64
static size_t random_arena_chunk_size() {
// Return with a 50% rate a standard size, otherwise some random size
if (os::random() % 10 < 5) {
static const size_t standard_sizes[4] = {
Chunk::tiny_size, Chunk::init_size, Chunk::size, Chunk::medium_size
};
return standard_sizes[os::random() % 4];
}
return ARENA_ALIGN(os::random() % 1024);
}
TEST_VM(Arena, different_chunk_sizes) {
// Test the creation/pooling of chunks; since ChunkPool is hidden, the
// only way to test this is to create/destroy arenas with different init sizes,
// which determines the initial chunk size.
// Note that since the chunk pools are global and get cleaned out periodically,
// there is no safe way to actually test their occupancy here.
for (int i = 0; i < 1000; i ++) {
// Unfortunately, Arenas cannot be newed,
// so we are left with awkwardly placing a few on the stack.
Arena ar0(mtTest, random_arena_chunk_size());
Arena ar1(mtTest, random_arena_chunk_size());
Arena ar2(mtTest, random_arena_chunk_size());
Arena ar3(mtTest, random_arena_chunk_size());
Arena ar4(mtTest, random_arena_chunk_size());
Arena ar5(mtTest, random_arena_chunk_size());
Arena ar6(mtTest, random_arena_chunk_size());
Arena ar7(mtTest, random_arena_chunk_size());
}
}
|