1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
|
#include "test/jemalloc_test.h"
#define arraylen(arr) (sizeof(arr)/sizeof(arr[0]))
static size_t ptr_ind;
static void *volatile ptrs[100];
static void *last_junked_ptr;
static size_t last_junked_usize;
static void
reset() {
ptr_ind = 0;
last_junked_ptr = NULL;
last_junked_usize = 0;
}
static void
test_junk(void *ptr, size_t usize) {
last_junked_ptr = ptr;
last_junked_usize = usize;
}
static void
do_allocs(size_t size, bool zero, size_t lg_align) {
#define JUNK_ALLOC(...) \
do { \
assert(ptr_ind + 1 < arraylen(ptrs)); \
void *ptr = __VA_ARGS__; \
assert_ptr_not_null(ptr, ""); \
ptrs[ptr_ind++] = ptr; \
if (opt_junk_alloc && !zero) { \
expect_ptr_eq(ptr, last_junked_ptr, ""); \
expect_zu_eq(last_junked_usize, \
TEST_MALLOC_SIZE(ptr), ""); \
} \
} while (0)
if (!zero && lg_align == 0) {
JUNK_ALLOC(malloc(size));
}
if (!zero) {
JUNK_ALLOC(aligned_alloc(1 << lg_align, size));
}
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
if (!zero) {
JUNK_ALLOC(je_memalign(1 << lg_align, size));
}
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
if (!zero && lg_align == LG_PAGE) {
JUNK_ALLOC(je_valloc(size));
}
#endif
int zero_flag = zero ? MALLOCX_ZERO : 0;
JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)));
JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)
| MALLOCX_TCACHE_NONE));
if (lg_align >= LG_SIZEOF_PTR) {
void *memalign_result;
int err = posix_memalign(&memalign_result, (1 << lg_align),
size);
assert_d_eq(err, 0, "");
JUNK_ALLOC(memalign_result);
}
}
TEST_BEGIN(test_junk_alloc_free) {
bool zerovals[] = {false, true};
size_t sizevals[] = {
1, 8, 100, 1000, 100*1000
/*
* Memory allocation failure is a real possibility in 32-bit mode.
* Rather than try to check in the face of resource exhaustion, we just
* rely more on the 64-bit tests. This is a little bit white-box-y in
* the sense that this is only a good test strategy if we know that the
* junk pathways don't touch interact with the allocation selection
* mechanisms; but this is in fact the case.
*/
#if LG_SIZEOF_PTR == 3
, 10 * 1000 * 1000
#endif
};
size_t lg_alignvals[] = {
0, 4, 10, 15, 16, LG_PAGE
#if LG_SIZEOF_PTR == 3
, 20, 24
#endif
};
#define JUNK_FREE(...) \
do { \
do_allocs(size, zero, lg_align); \
for (size_t n = 0; n < ptr_ind; n++) { \
void *ptr = ptrs[n]; \
__VA_ARGS__; \
if (opt_junk_free) { \
assert_ptr_eq(ptr, last_junked_ptr, \
""); \
assert_zu_eq(usize, last_junked_usize, \
""); \
} \
reset(); \
} \
} while (0)
for (size_t i = 0; i < arraylen(zerovals); i++) {
for (size_t j = 0; j < arraylen(sizevals); j++) {
for (size_t k = 0; k < arraylen(lg_alignvals); k++) {
bool zero = zerovals[i];
size_t size = sizevals[j];
size_t lg_align = lg_alignvals[k];
size_t usize = nallocx(size,
MALLOCX_LG_ALIGN(lg_align));
JUNK_FREE(free(ptr));
JUNK_FREE(dallocx(ptr, 0));
JUNK_FREE(dallocx(ptr, MALLOCX_TCACHE_NONE));
JUNK_FREE(dallocx(ptr, MALLOCX_LG_ALIGN(
lg_align)));
JUNK_FREE(sdallocx(ptr, usize, MALLOCX_LG_ALIGN(
lg_align)));
JUNK_FREE(sdallocx(ptr, usize,
MALLOCX_TCACHE_NONE | MALLOCX_LG_ALIGN(lg_align)));
if (opt_zero_realloc_action
== zero_realloc_action_free) {
JUNK_FREE(realloc(ptr, 0));
}
}
}
}
}
TEST_END
TEST_BEGIN(test_realloc_expand) {
char *volatile ptr;
char *volatile expanded;
test_skip_if(!opt_junk_alloc);
/* Realloc */
ptr = malloc(SC_SMALL_MAXCLASS);
expanded = realloc(ptr, SC_LARGE_MINCLASS);
expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
expect_zu_eq(last_junked_usize,
SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
free(expanded);
/* rallocx(..., 0) */
ptr = malloc(SC_SMALL_MAXCLASS);
expanded = rallocx(ptr, SC_LARGE_MINCLASS, 0);
expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
expect_zu_eq(last_junked_usize,
SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
free(expanded);
/* rallocx(..., nonzero) */
ptr = malloc(SC_SMALL_MAXCLASS);
expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
expect_zu_eq(last_junked_usize,
SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
free(expanded);
/* rallocx(..., MALLOCX_ZERO) */
ptr = malloc(SC_SMALL_MAXCLASS);
last_junked_ptr = (void *)-1;
last_junked_usize = (size_t)-1;
expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_ZERO);
expect_ptr_eq(last_junked_ptr, (void *)-1, "");
expect_zu_eq(last_junked_usize, (size_t)-1, "");
free(expanded);
/*
* Unfortunately, testing xallocx reliably is difficult to do portably
* (since allocations can be expanded / not expanded differently on
* different platforms. We rely on manual inspection there -- the
* xallocx pathway is easy to inspect, though.
*
* Likewise, we don't test the shrinking pathways. It's difficult to do
* so consistently (because of the risk of split failure or memory
* exhaustion, in which case no junking should happen). This is fine
* -- junking is a best-effort debug mechanism in the first place.
*/
}
TEST_END
int
main(void) {
junk_alloc_callback = &test_junk;
junk_free_callback = &test_junk;
/*
* We check the last pointer junked. If a reentrant call happens, that
* might be an internal allocation.
*/
return test_no_reentrancy(
test_junk_alloc_free,
test_realloc_expand);
}
|