mirror of
https://github.com/fluencelabs/redis
synced 2025-06-13 01:01:22 +00:00
19
deps/jemalloc/test/unit/a0.c
vendored
19
deps/jemalloc/test/unit/a0.c
vendored
@ -1,19 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
TEST_BEGIN(test_a0)
|
||||
{
|
||||
void *p;
|
||||
|
||||
p = a0malloc(1);
|
||||
assert_ptr_not_null(p, "Unexpected a0malloc() error");
|
||||
a0dalloc(p);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test_no_malloc_init(
|
||||
test_a0));
|
||||
}
|
159
deps/jemalloc/test/unit/arena_reset.c
vendored
159
deps/jemalloc/test/unit/arena_reset.c
vendored
@ -1,159 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
#ifdef JEMALLOC_PROF
|
||||
const char *malloc_conf = "prof:true,lg_prof_sample:0";
|
||||
#endif
|
||||
|
||||
static unsigned
|
||||
get_nsizes_impl(const char *cmd)
|
||||
{
|
||||
unsigned ret;
|
||||
size_t z;
|
||||
|
||||
z = sizeof(unsigned);
|
||||
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
|
||||
"Unexpected mallctl(\"%s\", ...) failure", cmd);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static unsigned
|
||||
get_nsmall(void)
|
||||
{
|
||||
|
||||
return (get_nsizes_impl("arenas.nbins"));
|
||||
}
|
||||
|
||||
static unsigned
|
||||
get_nlarge(void)
|
||||
{
|
||||
|
||||
return (get_nsizes_impl("arenas.nlruns"));
|
||||
}
|
||||
|
||||
static unsigned
|
||||
get_nhuge(void)
|
||||
{
|
||||
|
||||
return (get_nsizes_impl("arenas.nhchunks"));
|
||||
}
|
||||
|
||||
static size_t
|
||||
get_size_impl(const char *cmd, size_t ind)
|
||||
{
|
||||
size_t ret;
|
||||
size_t z;
|
||||
size_t mib[4];
|
||||
size_t miblen = 4;
|
||||
|
||||
z = sizeof(size_t);
|
||||
assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
|
||||
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
|
||||
mib[2] = ind;
|
||||
z = sizeof(size_t);
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
|
||||
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static size_t
|
||||
get_small_size(size_t ind)
|
||||
{
|
||||
|
||||
return (get_size_impl("arenas.bin.0.size", ind));
|
||||
}
|
||||
|
||||
static size_t
|
||||
get_large_size(size_t ind)
|
||||
{
|
||||
|
||||
return (get_size_impl("arenas.lrun.0.size", ind));
|
||||
}
|
||||
|
||||
static size_t
|
||||
get_huge_size(size_t ind)
|
||||
{
|
||||
|
||||
return (get_size_impl("arenas.hchunk.0.size", ind));
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_arena_reset)
|
||||
{
|
||||
#define NHUGE 4
|
||||
unsigned arena_ind, nsmall, nlarge, nhuge, nptrs, i;
|
||||
size_t sz, miblen;
|
||||
void **ptrs;
|
||||
int flags;
|
||||
size_t mib[3];
|
||||
tsdn_t *tsdn;
|
||||
|
||||
test_skip_if((config_valgrind && unlikely(in_valgrind)) || (config_fill
|
||||
&& unlikely(opt_quarantine)));
|
||||
|
||||
sz = sizeof(unsigned);
|
||||
assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
|
||||
0, "Unexpected mallctl() failure");
|
||||
|
||||
flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
|
||||
|
||||
nsmall = get_nsmall();
|
||||
nlarge = get_nlarge();
|
||||
nhuge = get_nhuge() > NHUGE ? NHUGE : get_nhuge();
|
||||
nptrs = nsmall + nlarge + nhuge;
|
||||
ptrs = (void **)malloc(nptrs * sizeof(void *));
|
||||
assert_ptr_not_null(ptrs, "Unexpected malloc() failure");
|
||||
|
||||
/* Allocate objects with a wide range of sizes. */
|
||||
for (i = 0; i < nsmall; i++) {
|
||||
sz = get_small_size(i);
|
||||
ptrs[i] = mallocx(sz, flags);
|
||||
assert_ptr_not_null(ptrs[i],
|
||||
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
|
||||
}
|
||||
for (i = 0; i < nlarge; i++) {
|
||||
sz = get_large_size(i);
|
||||
ptrs[nsmall + i] = mallocx(sz, flags);
|
||||
assert_ptr_not_null(ptrs[i],
|
||||
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
|
||||
}
|
||||
for (i = 0; i < nhuge; i++) {
|
||||
sz = get_huge_size(i);
|
||||
ptrs[nsmall + nlarge + i] = mallocx(sz, flags);
|
||||
assert_ptr_not_null(ptrs[i],
|
||||
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
|
||||
}
|
||||
|
||||
tsdn = tsdn_fetch();
|
||||
|
||||
/* Verify allocations. */
|
||||
for (i = 0; i < nptrs; i++) {
|
||||
assert_zu_gt(ivsalloc(tsdn, ptrs[i], false), 0,
|
||||
"Allocation should have queryable size");
|
||||
}
|
||||
|
||||
/* Reset. */
|
||||
miblen = sizeof(mib)/sizeof(size_t);
|
||||
assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
|
||||
/* Verify allocations no longer exist. */
|
||||
for (i = 0; i < nptrs; i++) {
|
||||
assert_zu_eq(ivsalloc(tsdn, ptrs[i], false), 0,
|
||||
"Allocation should no longer exist");
|
||||
}
|
||||
|
||||
free(ptrs);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_arena_reset));
|
||||
}
|
26
deps/jemalloc/test/unit/bitmap.c
vendored
26
deps/jemalloc/test/unit/bitmap.c
vendored
@ -6,11 +6,7 @@ TEST_BEGIN(test_bitmap_size)
|
||||
|
||||
prev_size = 0;
|
||||
for (i = 1; i <= BITMAP_MAXBITS; i++) {
|
||||
bitmap_info_t binfo;
|
||||
size_t size;
|
||||
|
||||
bitmap_info_init(&binfo, i);
|
||||
size = bitmap_size(&binfo);
|
||||
size_t size = bitmap_size(i);
|
||||
assert_true(size >= prev_size,
|
||||
"Bitmap size is smaller than expected");
|
||||
prev_size = size;
|
||||
@ -27,8 +23,8 @@ TEST_BEGIN(test_bitmap_init)
|
||||
bitmap_info_init(&binfo, i);
|
||||
{
|
||||
size_t j;
|
||||
bitmap_t *bitmap = (bitmap_t *)malloc(
|
||||
bitmap_size(&binfo));
|
||||
bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
|
||||
bitmap_info_ngroups(&binfo));
|
||||
bitmap_init(bitmap, &binfo);
|
||||
|
||||
for (j = 0; j < i; j++) {
|
||||
@ -50,8 +46,8 @@ TEST_BEGIN(test_bitmap_set)
|
||||
bitmap_info_init(&binfo, i);
|
||||
{
|
||||
size_t j;
|
||||
bitmap_t *bitmap = (bitmap_t *)malloc(
|
||||
bitmap_size(&binfo));
|
||||
bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
|
||||
bitmap_info_ngroups(&binfo));
|
||||
bitmap_init(bitmap, &binfo);
|
||||
|
||||
for (j = 0; j < i; j++)
|
||||
@ -73,8 +69,8 @@ TEST_BEGIN(test_bitmap_unset)
|
||||
bitmap_info_init(&binfo, i);
|
||||
{
|
||||
size_t j;
|
||||
bitmap_t *bitmap = (bitmap_t *)malloc(
|
||||
bitmap_size(&binfo));
|
||||
bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
|
||||
bitmap_info_ngroups(&binfo));
|
||||
bitmap_init(bitmap, &binfo);
|
||||
|
||||
for (j = 0; j < i; j++)
|
||||
@ -101,9 +97,9 @@ TEST_BEGIN(test_bitmap_sfu)
|
||||
bitmap_info_t binfo;
|
||||
bitmap_info_init(&binfo, i);
|
||||
{
|
||||
size_t j;
|
||||
bitmap_t *bitmap = (bitmap_t *)malloc(
|
||||
bitmap_size(&binfo));
|
||||
ssize_t j;
|
||||
bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
|
||||
bitmap_info_ngroups(&binfo));
|
||||
bitmap_init(bitmap, &binfo);
|
||||
|
||||
/* Iteratively set bits starting at the beginning. */
|
||||
@ -119,7 +115,7 @@ TEST_BEGIN(test_bitmap_sfu)
|
||||
* Iteratively unset bits starting at the end, and
|
||||
* verify that bitmap_sfu() reaches the unset bits.
|
||||
*/
|
||||
for (j = i - 1; j < i; j--) { /* (i..0] */
|
||||
for (j = i - 1; j >= 0; j--) {
|
||||
bitmap_unset(bitmap, &binfo, j);
|
||||
assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
|
||||
"First unset bit should the bit previously "
|
||||
|
8
deps/jemalloc/test/unit/ckh.c
vendored
8
deps/jemalloc/test/unit/ckh.c
vendored
@ -7,8 +7,8 @@ TEST_BEGIN(test_new_delete)
|
||||
|
||||
tsd = tsd_fetch();
|
||||
|
||||
assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
|
||||
ckh_string_keycomp), "Unexpected ckh_new() error");
|
||||
assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp),
|
||||
"Unexpected ckh_new() error");
|
||||
ckh_delete(tsd, &ckh);
|
||||
|
||||
assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash,
|
||||
@ -32,8 +32,8 @@ TEST_BEGIN(test_count_insert_search_remove)
|
||||
|
||||
tsd = tsd_fetch();
|
||||
|
||||
assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
|
||||
ckh_string_keycomp), "Unexpected ckh_new() error");
|
||||
assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp),
|
||||
"Unexpected ckh_new() error");
|
||||
assert_zu_eq(ckh_count(&ckh), 0,
|
||||
"ckh_count() should return %zu, but it returned %zu", ZU(0),
|
||||
ckh_count(&ckh));
|
||||
|
374
deps/jemalloc/test/unit/decay.c
vendored
374
deps/jemalloc/test/unit/decay.c
vendored
@ -1,374 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
const char *malloc_conf = "purge:decay,decay_time:1";
|
||||
|
||||
static nstime_monotonic_t *nstime_monotonic_orig;
|
||||
static nstime_update_t *nstime_update_orig;
|
||||
|
||||
static unsigned nupdates_mock;
|
||||
static nstime_t time_mock;
|
||||
static bool monotonic_mock;
|
||||
|
||||
static bool
|
||||
nstime_monotonic_mock(void)
|
||||
{
|
||||
|
||||
return (monotonic_mock);
|
||||
}
|
||||
|
||||
static bool
|
||||
nstime_update_mock(nstime_t *time)
|
||||
{
|
||||
|
||||
nupdates_mock++;
|
||||
if (monotonic_mock)
|
||||
nstime_copy(time, &time_mock);
|
||||
return (!monotonic_mock);
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_decay_ticks)
|
||||
{
|
||||
ticker_t *decay_ticker;
|
||||
unsigned tick0, tick1;
|
||||
size_t sz, huge0, large0;
|
||||
void *p;
|
||||
|
||||
test_skip_if(opt_purge != purge_mode_decay);
|
||||
|
||||
decay_ticker = decay_ticker_get(tsd_fetch(), 0);
|
||||
assert_ptr_not_null(decay_ticker,
|
||||
"Unexpected failure getting decay ticker");
|
||||
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("arenas.hchunk.0.size", (void *)&huge0, &sz, NULL,
|
||||
0), 0, "Unexpected mallctl failure");
|
||||
assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL,
|
||||
0), 0, "Unexpected mallctl failure");
|
||||
|
||||
/*
|
||||
* Test the standard APIs using a huge size class, since we can't
|
||||
* control tcache interactions (except by completely disabling tcache
|
||||
* for the entire test program).
|
||||
*/
|
||||
|
||||
/* malloc(). */
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
p = malloc(huge0);
|
||||
assert_ptr_not_null(p, "Unexpected malloc() failure");
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
|
||||
/* free(). */
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
free(p);
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
|
||||
|
||||
/* calloc(). */
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
p = calloc(1, huge0);
|
||||
assert_ptr_not_null(p, "Unexpected calloc() failure");
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
|
||||
free(p);
|
||||
|
||||
/* posix_memalign(). */
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
assert_d_eq(posix_memalign(&p, sizeof(size_t), huge0), 0,
|
||||
"Unexpected posix_memalign() failure");
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0,
|
||||
"Expected ticker to tick during posix_memalign()");
|
||||
free(p);
|
||||
|
||||
/* aligned_alloc(). */
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
p = aligned_alloc(sizeof(size_t), huge0);
|
||||
assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0,
|
||||
"Expected ticker to tick during aligned_alloc()");
|
||||
free(p);
|
||||
|
||||
/* realloc(). */
|
||||
/* Allocate. */
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
p = realloc(NULL, huge0);
|
||||
assert_ptr_not_null(p, "Unexpected realloc() failure");
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
|
||||
/* Reallocate. */
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
p = realloc(p, huge0);
|
||||
assert_ptr_not_null(p, "Unexpected realloc() failure");
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
|
||||
/* Deallocate. */
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
realloc(p, 0);
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
|
||||
|
||||
/*
|
||||
* Test the *allocx() APIs using huge, large, and small size classes,
|
||||
* with tcache explicitly disabled.
|
||||
*/
|
||||
{
|
||||
unsigned i;
|
||||
size_t allocx_sizes[3];
|
||||
allocx_sizes[0] = huge0;
|
||||
allocx_sizes[1] = large0;
|
||||
allocx_sizes[2] = 1;
|
||||
|
||||
for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
|
||||
sz = allocx_sizes[i];
|
||||
|
||||
/* mallocx(). */
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
p = mallocx(sz, MALLOCX_TCACHE_NONE);
|
||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0,
|
||||
"Expected ticker to tick during mallocx() (sz=%zu)",
|
||||
sz);
|
||||
/* rallocx(). */
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
|
||||
assert_ptr_not_null(p, "Unexpected rallocx() failure");
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0,
|
||||
"Expected ticker to tick during rallocx() (sz=%zu)",
|
||||
sz);
|
||||
/* xallocx(). */
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0,
|
||||
"Expected ticker to tick during xallocx() (sz=%zu)",
|
||||
sz);
|
||||
/* dallocx(). */
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
dallocx(p, MALLOCX_TCACHE_NONE);
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0,
|
||||
"Expected ticker to tick during dallocx() (sz=%zu)",
|
||||
sz);
|
||||
/* sdallocx(). */
|
||||
p = mallocx(sz, MALLOCX_TCACHE_NONE);
|
||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
sdallocx(p, sz, MALLOCX_TCACHE_NONE);
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0,
|
||||
"Expected ticker to tick during sdallocx() "
|
||||
"(sz=%zu)", sz);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Test tcache fill/flush interactions for large and small size classes,
|
||||
* using an explicit tcache.
|
||||
*/
|
||||
if (config_tcache) {
|
||||
unsigned tcache_ind, i;
|
||||
size_t tcache_sizes[2];
|
||||
tcache_sizes[0] = large0;
|
||||
tcache_sizes[1] = 1;
|
||||
|
||||
sz = sizeof(unsigned);
|
||||
assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
|
||||
NULL, 0), 0, "Unexpected mallctl failure");
|
||||
|
||||
for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
|
||||
sz = tcache_sizes[i];
|
||||
|
||||
/* tcache fill. */
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
|
||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0,
|
||||
"Expected ticker to tick during tcache fill "
|
||||
"(sz=%zu)", sz);
|
||||
/* tcache flush. */
|
||||
dallocx(p, MALLOCX_TCACHE(tcache_ind));
|
||||
tick0 = ticker_read(decay_ticker);
|
||||
assert_d_eq(mallctl("tcache.flush", NULL, NULL,
|
||||
(void *)&tcache_ind, sizeof(unsigned)), 0,
|
||||
"Unexpected mallctl failure");
|
||||
tick1 = ticker_read(decay_ticker);
|
||||
assert_u32_ne(tick1, tick0,
|
||||
"Expected ticker to tick during tcache flush "
|
||||
"(sz=%zu)", sz);
|
||||
}
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_decay_ticker)
|
||||
{
|
||||
#define NPS 1024
|
||||
int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
|
||||
void *ps[NPS];
|
||||
uint64_t epoch;
|
||||
uint64_t npurge0 = 0;
|
||||
uint64_t npurge1 = 0;
|
||||
size_t sz, large;
|
||||
unsigned i, nupdates0;
|
||||
nstime_t time, decay_time, deadline;
|
||||
|
||||
test_skip_if(opt_purge != purge_mode_decay);
|
||||
|
||||
/*
|
||||
* Allocate a bunch of large objects, pause the clock, deallocate the
|
||||
* objects, restore the clock, then [md]allocx() in a tight loop to
|
||||
* verify the ticker triggers purging.
|
||||
*/
|
||||
|
||||
if (config_tcache) {
|
||||
size_t tcache_max;
|
||||
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
|
||||
&sz, NULL, 0), 0, "Unexpected mallctl failure");
|
||||
large = nallocx(tcache_max + 1, flags);
|
||||
} else {
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large, &sz,
|
||||
NULL, 0), 0, "Unexpected mallctl failure");
|
||||
}
|
||||
|
||||
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctl failure");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
|
||||
sizeof(uint64_t)), 0, "Unexpected mallctl failure");
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz,
|
||||
NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
|
||||
|
||||
for (i = 0; i < NPS; i++) {
|
||||
ps[i] = mallocx(large, flags);
|
||||
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
|
||||
}
|
||||
|
||||
nupdates_mock = 0;
|
||||
nstime_init(&time_mock, 0);
|
||||
nstime_update(&time_mock);
|
||||
monotonic_mock = true;
|
||||
|
||||
nstime_monotonic_orig = nstime_monotonic;
|
||||
nstime_update_orig = nstime_update;
|
||||
nstime_monotonic = nstime_monotonic_mock;
|
||||
nstime_update = nstime_update_mock;
|
||||
|
||||
for (i = 0; i < NPS; i++) {
|
||||
dallocx(ps[i], flags);
|
||||
nupdates0 = nupdates_mock;
|
||||
assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected arena.0.decay failure");
|
||||
assert_u_gt(nupdates_mock, nupdates0,
|
||||
"Expected nstime_update() to be called");
|
||||
}
|
||||
|
||||
nstime_monotonic = nstime_monotonic_orig;
|
||||
nstime_update = nstime_update_orig;
|
||||
|
||||
nstime_init(&time, 0);
|
||||
nstime_update(&time);
|
||||
nstime_init2(&decay_time, opt_decay_time, 0);
|
||||
nstime_copy(&deadline, &time);
|
||||
nstime_add(&deadline, &decay_time);
|
||||
do {
|
||||
for (i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) {
|
||||
void *p = mallocx(1, flags);
|
||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
dallocx(p, flags);
|
||||
}
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
|
||||
sizeof(uint64_t)), 0, "Unexpected mallctl failure");
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1,
|
||||
&sz, NULL, 0), config_stats ? 0 : ENOENT,
|
||||
"Unexpected mallctl result");
|
||||
|
||||
nstime_update(&time);
|
||||
} while (nstime_compare(&time, &deadline) <= 0 && npurge1 == npurge0);
|
||||
|
||||
if (config_stats)
|
||||
assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
|
||||
#undef NPS
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_decay_nonmonotonic)
|
||||
{
|
||||
#define NPS (SMOOTHSTEP_NSTEPS + 1)
|
||||
int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
|
||||
void *ps[NPS];
|
||||
uint64_t epoch;
|
||||
uint64_t npurge0 = 0;
|
||||
uint64_t npurge1 = 0;
|
||||
size_t sz, large0;
|
||||
unsigned i, nupdates0;
|
||||
|
||||
test_skip_if(opt_purge != purge_mode_decay);
|
||||
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL,
|
||||
0), 0, "Unexpected mallctl failure");
|
||||
|
||||
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctl failure");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
|
||||
sizeof(uint64_t)), 0, "Unexpected mallctl failure");
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz,
|
||||
NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
|
||||
|
||||
nupdates_mock = 0;
|
||||
nstime_init(&time_mock, 0);
|
||||
nstime_update(&time_mock);
|
||||
monotonic_mock = false;
|
||||
|
||||
nstime_monotonic_orig = nstime_monotonic;
|
||||
nstime_update_orig = nstime_update;
|
||||
nstime_monotonic = nstime_monotonic_mock;
|
||||
nstime_update = nstime_update_mock;
|
||||
|
||||
for (i = 0; i < NPS; i++) {
|
||||
ps[i] = mallocx(large0, flags);
|
||||
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
|
||||
}
|
||||
|
||||
for (i = 0; i < NPS; i++) {
|
||||
dallocx(ps[i], flags);
|
||||
nupdates0 = nupdates_mock;
|
||||
assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected arena.0.decay failure");
|
||||
assert_u_gt(nupdates_mock, nupdates0,
|
||||
"Expected nstime_update() to be called");
|
||||
}
|
||||
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
|
||||
sizeof(uint64_t)), 0, "Unexpected mallctl failure");
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, &sz,
|
||||
NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
|
||||
|
||||
if (config_stats)
|
||||
assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
|
||||
|
||||
nstime_monotonic = nstime_monotonic_orig;
|
||||
nstime_update = nstime_update_orig;
|
||||
#undef NPS
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_decay_ticks,
|
||||
test_decay_ticker,
|
||||
test_decay_nonmonotonic));
|
||||
}
|
64
deps/jemalloc/test/unit/fork.c
vendored
64
deps/jemalloc/test/unit/fork.c
vendored
@ -1,64 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
#ifndef _WIN32
|
||||
#include <sys/wait.h>
|
||||
#endif
|
||||
|
||||
TEST_BEGIN(test_fork)
|
||||
{
|
||||
#ifndef _WIN32
|
||||
void *p;
|
||||
pid_t pid;
|
||||
|
||||
p = malloc(1);
|
||||
assert_ptr_not_null(p, "Unexpected malloc() failure");
|
||||
|
||||
pid = fork();
|
||||
|
||||
free(p);
|
||||
|
||||
p = malloc(64);
|
||||
assert_ptr_not_null(p, "Unexpected malloc() failure");
|
||||
free(p);
|
||||
|
||||
if (pid == -1) {
|
||||
/* Error. */
|
||||
test_fail("Unexpected fork() failure");
|
||||
} else if (pid == 0) {
|
||||
/* Child. */
|
||||
_exit(0);
|
||||
} else {
|
||||
int status;
|
||||
|
||||
/* Parent. */
|
||||
while (true) {
|
||||
if (waitpid(pid, &status, 0) == -1)
|
||||
test_fail("Unexpected waitpid() failure");
|
||||
if (WIFSIGNALED(status)) {
|
||||
test_fail("Unexpected child termination due to "
|
||||
"signal %d", WTERMSIG(status));
|
||||
break;
|
||||
}
|
||||
if (WIFEXITED(status)) {
|
||||
if (WEXITSTATUS(status) != 0) {
|
||||
test_fail(
|
||||
"Unexpected child exit value %d",
|
||||
WEXITSTATUS(status));
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
test_skip("fork(2) is irrelevant to Windows");
|
||||
#endif
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_fork));
|
||||
}
|
36
deps/jemalloc/test/unit/hash.c
vendored
36
deps/jemalloc/test/unit/hash.c
vendored
@ -35,7 +35,7 @@ typedef enum {
|
||||
hash_variant_x64_128
|
||||
} hash_variant_t;
|
||||
|
||||
static int
|
||||
static size_t
|
||||
hash_variant_bits(hash_variant_t variant)
|
||||
{
|
||||
|
||||
@ -59,20 +59,19 @@ hash_variant_string(hash_variant_t variant)
|
||||
}
|
||||
}
|
||||
|
||||
#define KEY_SIZE 256
|
||||
static void
|
||||
hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
|
||||
hash_variant_verify(hash_variant_t variant)
|
||||
{
|
||||
const int hashbytes = hash_variant_bits(variant) / 8;
|
||||
const int hashes_size = hashbytes * 256;
|
||||
VARIABLE_ARRAY(uint8_t, hashes, hashes_size);
|
||||
const size_t hashbytes = hash_variant_bits(variant) / 8;
|
||||
uint8_t key[256];
|
||||
VARIABLE_ARRAY(uint8_t, hashes, hashbytes * 256);
|
||||
VARIABLE_ARRAY(uint8_t, final, hashbytes);
|
||||
unsigned i;
|
||||
uint32_t computed, expected;
|
||||
|
||||
memset(key, 0, KEY_SIZE);
|
||||
memset(hashes, 0, hashes_size);
|
||||
memset(final, 0, hashbytes);
|
||||
memset(key, 0, sizeof(key));
|
||||
memset(hashes, 0, sizeof(hashes));
|
||||
memset(final, 0, sizeof(final));
|
||||
|
||||
/*
|
||||
* Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
|
||||
@ -103,17 +102,17 @@ hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
|
||||
/* Hash the result array. */
|
||||
switch (variant) {
|
||||
case hash_variant_x86_32: {
|
||||
uint32_t out = hash_x86_32(hashes, hashes_size, 0);
|
||||
uint32_t out = hash_x86_32(hashes, hashbytes*256, 0);
|
||||
memcpy(final, &out, sizeof(out));
|
||||
break;
|
||||
} case hash_variant_x86_128: {
|
||||
uint64_t out[2];
|
||||
hash_x86_128(hashes, hashes_size, 0, out);
|
||||
hash_x86_128(hashes, hashbytes*256, 0, out);
|
||||
memcpy(final, out, sizeof(out));
|
||||
break;
|
||||
} case hash_variant_x64_128: {
|
||||
uint64_t out[2];
|
||||
hash_x64_128(hashes, hashes_size, 0, out);
|
||||
hash_x64_128(hashes, hashbytes*256, 0, out);
|
||||
memcpy(final, out, sizeof(out));
|
||||
break;
|
||||
} default: not_reached();
|
||||
@ -140,19 +139,6 @@ hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
|
||||
hash_variant_string(variant), expected, computed);
|
||||
}
|
||||
|
||||
static void
|
||||
hash_variant_verify(hash_variant_t variant)
|
||||
{
|
||||
#define MAX_ALIGN 16
|
||||
uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)];
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < MAX_ALIGN; i++)
|
||||
hash_variant_verify_key(variant, &key[i]);
|
||||
#undef MAX_ALIGN
|
||||
}
|
||||
#undef KEY_SIZE
|
||||
|
||||
TEST_BEGIN(test_hash_x86_32)
|
||||
{
|
||||
|
||||
|
17
deps/jemalloc/test/unit/junk.c
vendored
17
deps/jemalloc/test/unit/junk.c
vendored
@ -29,7 +29,7 @@ arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info)
|
||||
|
||||
arena_dalloc_junk_small_orig(ptr, bin_info);
|
||||
for (i = 0; i < bin_info->reg_size; i++) {
|
||||
assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
|
||||
assert_c_eq(((char *)ptr)[i], 0x5a,
|
||||
"Missing junk fill for byte %zu/%zu of deallocated region",
|
||||
i, bin_info->reg_size);
|
||||
}
|
||||
@ -44,7 +44,7 @@ arena_dalloc_junk_large_intercept(void *ptr, size_t usize)
|
||||
|
||||
arena_dalloc_junk_large_orig(ptr, usize);
|
||||
for (i = 0; i < usize; i++) {
|
||||
assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
|
||||
assert_c_eq(((char *)ptr)[i], 0x5a,
|
||||
"Missing junk fill for byte %zu/%zu of deallocated region",
|
||||
i, usize);
|
||||
}
|
||||
@ -69,7 +69,7 @@ huge_dalloc_junk_intercept(void *ptr, size_t usize)
|
||||
static void
|
||||
test_junk(size_t sz_min, size_t sz_max)
|
||||
{
|
||||
uint8_t *s;
|
||||
char *s;
|
||||
size_t sz_prev, sz, i;
|
||||
|
||||
if (opt_junk_free) {
|
||||
@ -82,23 +82,23 @@ test_junk(size_t sz_min, size_t sz_max)
|
||||
}
|
||||
|
||||
sz_prev = 0;
|
||||
s = (uint8_t *)mallocx(sz_min, 0);
|
||||
s = (char *)mallocx(sz_min, 0);
|
||||
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
|
||||
|
||||
for (sz = sallocx(s, 0); sz <= sz_max;
|
||||
sz_prev = sz, sz = sallocx(s, 0)) {
|
||||
if (sz_prev > 0) {
|
||||
assert_u_eq(s[0], 'a',
|
||||
assert_c_eq(s[0], 'a',
|
||||
"Previously allocated byte %zu/%zu is corrupted",
|
||||
ZU(0), sz_prev);
|
||||
assert_u_eq(s[sz_prev-1], 'a',
|
||||
assert_c_eq(s[sz_prev-1], 'a',
|
||||
"Previously allocated byte %zu/%zu is corrupted",
|
||||
sz_prev-1, sz_prev);
|
||||
}
|
||||
|
||||
for (i = sz_prev; i < sz; i++) {
|
||||
if (opt_junk_alloc) {
|
||||
assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK,
|
||||
assert_c_eq(s[i], 0xa5,
|
||||
"Newly allocated byte %zu/%zu isn't "
|
||||
"junk-filled", i, sz);
|
||||
}
|
||||
@ -107,7 +107,7 @@ test_junk(size_t sz_min, size_t sz_max)
|
||||
|
||||
if (xallocx(s, sz+1, 0, 0) == sz) {
|
||||
watch_junking(s);
|
||||
s = (uint8_t *)rallocx(s, sz+1, 0);
|
||||
s = (char *)rallocx(s, sz+1, 0);
|
||||
assert_ptr_not_null((void *)s,
|
||||
"Unexpected rallocx() failure");
|
||||
assert_true(!opt_junk_free || saw_junking,
|
||||
@ -244,6 +244,7 @@ int
|
||||
main(void)
|
||||
{
|
||||
|
||||
assert(!config_fill || opt_junk_alloc || opt_junk_free);
|
||||
return (test(
|
||||
test_junk_small,
|
||||
test_junk_large,
|
||||
|
2
deps/jemalloc/test/unit/junk_alloc.c
vendored
2
deps/jemalloc/test/unit/junk_alloc.c
vendored
@ -1,3 +1,3 @@
|
||||
#define JEMALLOC_TEST_JUNK_OPT "junk:alloc"
|
||||
#define JEMALLOC_TEST_JUNK_OPT "junk:alloc"
|
||||
#include "junk.c"
|
||||
#undef JEMALLOC_TEST_JUNK_OPT
|
||||
|
2
deps/jemalloc/test/unit/junk_free.c
vendored
2
deps/jemalloc/test/unit/junk_free.c
vendored
@ -1,3 +1,3 @@
|
||||
#define JEMALLOC_TEST_JUNK_OPT "junk:free"
|
||||
#define JEMALLOC_TEST_JUNK_OPT "junk:free"
|
||||
#include "junk.c"
|
||||
#undef JEMALLOC_TEST_JUNK_OPT
|
||||
|
319
deps/jemalloc/test/unit/mallctl.c
vendored
Executable file → Normal file
319
deps/jemalloc/test/unit/mallctl.c
vendored
Executable file → Normal file
@ -12,18 +12,16 @@ TEST_BEGIN(test_mallctl_errors)
|
||||
EPERM, "mallctl() should return EPERM on attempt to write "
|
||||
"read-only value");
|
||||
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
|
||||
sizeof(epoch)-1), EINVAL,
|
||||
"mallctl() should return EINVAL for input size mismatch");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
|
||||
sizeof(epoch)+1), EINVAL,
|
||||
"mallctl() should return EINVAL for input size mismatch");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1),
|
||||
EINVAL, "mallctl() should return EINVAL for input size mismatch");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1),
|
||||
EINVAL, "mallctl() should return EINVAL for input size mismatch");
|
||||
|
||||
sz = sizeof(epoch)-1;
|
||||
assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
|
||||
assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
|
||||
"mallctl() should return EINVAL for output size mismatch");
|
||||
sz = sizeof(epoch)+1;
|
||||
assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
|
||||
assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
|
||||
"mallctl() should return EINVAL for output size mismatch");
|
||||
}
|
||||
TEST_END
|
||||
@ -58,20 +56,18 @@ TEST_BEGIN(test_mallctlbymib_errors)
|
||||
assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
|
||||
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
|
||||
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
|
||||
sizeof(epoch)-1), EINVAL,
|
||||
"mallctlbymib() should return EINVAL for input size mismatch");
|
||||
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
|
||||
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
|
||||
sizeof(epoch)+1), EINVAL,
|
||||
"mallctlbymib() should return EINVAL for input size mismatch");
|
||||
|
||||
sz = sizeof(epoch)-1;
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
|
||||
EINVAL,
|
||||
assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
|
||||
"mallctlbymib() should return EINVAL for output size mismatch");
|
||||
sz = sizeof(epoch)+1;
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
|
||||
EINVAL,
|
||||
assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
|
||||
"mallctlbymib() should return EINVAL for output size mismatch");
|
||||
}
|
||||
TEST_END
|
||||
@ -87,19 +83,18 @@ TEST_BEGIN(test_mallctl_read_write)
|
||||
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
|
||||
|
||||
/* Read. */
|
||||
assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
|
||||
assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
|
||||
|
||||
/* Write. */
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
|
||||
sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
|
||||
|
||||
/* Read+write. */
|
||||
assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
|
||||
(void *)&new_epoch, sizeof(new_epoch)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch,
|
||||
sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
|
||||
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
|
||||
}
|
||||
TEST_END
|
||||
@ -122,30 +117,29 @@ TEST_END
|
||||
TEST_BEGIN(test_mallctl_config)
|
||||
{
|
||||
|
||||
#define TEST_MALLCTL_CONFIG(config, t) do { \
|
||||
t oldval; \
|
||||
#define TEST_MALLCTL_CONFIG(config) do { \
|
||||
bool oldval; \
|
||||
size_t sz = sizeof(oldval); \
|
||||
assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \
|
||||
NULL, 0), 0, "Unexpected mallctl() failure"); \
|
||||
assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \
|
||||
0, "Unexpected mallctl() failure"); \
|
||||
assert_b_eq(oldval, config_##config, "Incorrect config value"); \
|
||||
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
|
||||
} while (0)
|
||||
|
||||
TEST_MALLCTL_CONFIG(cache_oblivious, bool);
|
||||
TEST_MALLCTL_CONFIG(debug, bool);
|
||||
TEST_MALLCTL_CONFIG(fill, bool);
|
||||
TEST_MALLCTL_CONFIG(lazy_lock, bool);
|
||||
TEST_MALLCTL_CONFIG(malloc_conf, const char *);
|
||||
TEST_MALLCTL_CONFIG(munmap, bool);
|
||||
TEST_MALLCTL_CONFIG(prof, bool);
|
||||
TEST_MALLCTL_CONFIG(prof_libgcc, bool);
|
||||
TEST_MALLCTL_CONFIG(prof_libunwind, bool);
|
||||
TEST_MALLCTL_CONFIG(stats, bool);
|
||||
TEST_MALLCTL_CONFIG(tcache, bool);
|
||||
TEST_MALLCTL_CONFIG(tls, bool);
|
||||
TEST_MALLCTL_CONFIG(utrace, bool);
|
||||
TEST_MALLCTL_CONFIG(valgrind, bool);
|
||||
TEST_MALLCTL_CONFIG(xmalloc, bool);
|
||||
TEST_MALLCTL_CONFIG(cache_oblivious);
|
||||
TEST_MALLCTL_CONFIG(debug);
|
||||
TEST_MALLCTL_CONFIG(fill);
|
||||
TEST_MALLCTL_CONFIG(lazy_lock);
|
||||
TEST_MALLCTL_CONFIG(munmap);
|
||||
TEST_MALLCTL_CONFIG(prof);
|
||||
TEST_MALLCTL_CONFIG(prof_libgcc);
|
||||
TEST_MALLCTL_CONFIG(prof_libunwind);
|
||||
TEST_MALLCTL_CONFIG(stats);
|
||||
TEST_MALLCTL_CONFIG(tcache);
|
||||
TEST_MALLCTL_CONFIG(tls);
|
||||
TEST_MALLCTL_CONFIG(utrace);
|
||||
TEST_MALLCTL_CONFIG(valgrind);
|
||||
TEST_MALLCTL_CONFIG(xmalloc);
|
||||
|
||||
#undef TEST_MALLCTL_CONFIG
|
||||
}
|
||||
@ -159,8 +153,7 @@ TEST_BEGIN(test_mallctl_opt)
|
||||
t oldval; \
|
||||
size_t sz = sizeof(oldval); \
|
||||
int expected = config_##config ? 0 : ENOENT; \
|
||||
int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \
|
||||
0); \
|
||||
int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \
|
||||
assert_d_eq(result, expected, \
|
||||
"Unexpected mallctl() result for opt."#opt); \
|
||||
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
|
||||
@ -169,10 +162,8 @@ TEST_BEGIN(test_mallctl_opt)
|
||||
TEST_MALLCTL_OPT(bool, abort, always);
|
||||
TEST_MALLCTL_OPT(size_t, lg_chunk, always);
|
||||
TEST_MALLCTL_OPT(const char *, dss, always);
|
||||
TEST_MALLCTL_OPT(unsigned, narenas, always);
|
||||
TEST_MALLCTL_OPT(const char *, purge, always);
|
||||
TEST_MALLCTL_OPT(size_t, narenas, always);
|
||||
TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always);
|
||||
TEST_MALLCTL_OPT(ssize_t, decay_time, always);
|
||||
TEST_MALLCTL_OPT(bool, stats_print, always);
|
||||
TEST_MALLCTL_OPT(const char *, junk, fill);
|
||||
TEST_MALLCTL_OPT(size_t, quarantine, fill);
|
||||
@ -203,7 +194,7 @@ TEST_BEGIN(test_manpage_example)
|
||||
size_t len, miblen;
|
||||
|
||||
len = sizeof(nbins);
|
||||
assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
|
||||
assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
miblen = 4;
|
||||
@ -214,8 +205,8 @@ TEST_BEGIN(test_manpage_example)
|
||||
|
||||
mib[2] = i;
|
||||
len = sizeof(bin_size);
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
|
||||
NULL, 0), 0, "Unexpected mallctlbymib() failure");
|
||||
assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0),
|
||||
0, "Unexpected mallctlbymib() failure");
|
||||
/* Do something with bin_size... */
|
||||
}
|
||||
}
|
||||
@ -264,25 +255,25 @@ TEST_BEGIN(test_tcache)
|
||||
/* Create tcaches. */
|
||||
for (i = 0; i < NTCACHES; i++) {
|
||||
sz = sizeof(unsigned);
|
||||
assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
|
||||
0), 0, "Unexpected mallctl() failure, i=%u", i);
|
||||
assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure, i=%u", i);
|
||||
}
|
||||
|
||||
/* Exercise tcache ID recycling. */
|
||||
for (i = 0; i < NTCACHES; i++) {
|
||||
assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
|
||||
(void *)&tis[i], sizeof(unsigned)), 0,
|
||||
"Unexpected mallctl() failure, i=%u", i);
|
||||
assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i],
|
||||
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
|
||||
i);
|
||||
}
|
||||
for (i = 0; i < NTCACHES; i++) {
|
||||
sz = sizeof(unsigned);
|
||||
assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
|
||||
0), 0, "Unexpected mallctl() failure, i=%u", i);
|
||||
assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure, i=%u", i);
|
||||
}
|
||||
|
||||
/* Flush empty tcaches. */
|
||||
for (i = 0; i < NTCACHES; i++) {
|
||||
assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
|
||||
assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i],
|
||||
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
|
||||
i);
|
||||
}
|
||||
@ -327,16 +318,16 @@ TEST_BEGIN(test_tcache)
|
||||
|
||||
/* Flush some non-empty tcaches. */
|
||||
for (i = 0; i < NTCACHES/2; i++) {
|
||||
assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
|
||||
assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i],
|
||||
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
|
||||
i);
|
||||
}
|
||||
|
||||
/* Destroy tcaches. */
|
||||
for (i = 0; i < NTCACHES; i++) {
|
||||
assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
|
||||
(void *)&tis[i], sizeof(unsigned)), 0,
|
||||
"Unexpected mallctl() failure, i=%u", i);
|
||||
assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i],
|
||||
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
|
||||
i);
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
@ -346,17 +337,15 @@ TEST_BEGIN(test_thread_arena)
|
||||
unsigned arena_old, arena_new, narenas;
|
||||
size_t sz = sizeof(unsigned);
|
||||
|
||||
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
|
||||
arena_new = narenas - 1;
|
||||
assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz,
|
||||
(void *)&arena_new, sizeof(unsigned)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new,
|
||||
sizeof(unsigned)), 0, "Unexpected mallctl() failure");
|
||||
arena_new = 0;
|
||||
assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz,
|
||||
(void *)&arena_new, sizeof(unsigned)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new,
|
||||
sizeof(unsigned)), 0, "Unexpected mallctl() failure");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
@ -365,20 +354,17 @@ TEST_BEGIN(test_arena_i_lg_dirty_mult)
|
||||
ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
|
||||
size_t sz = sizeof(ssize_t);
|
||||
|
||||
test_skip_if(opt_purge != purge_mode_ratio);
|
||||
|
||||
assert_d_eq(mallctl("arena.0.lg_dirty_mult",
|
||||
(void *)&orig_lg_dirty_mult, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
|
||||
NULL, 0), 0, "Unexpected mallctl() failure");
|
||||
|
||||
lg_dirty_mult = -2;
|
||||
assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
|
||||
(void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
|
||||
&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
|
||||
"Unexpected mallctl() success");
|
||||
|
||||
lg_dirty_mult = (sizeof(size_t) << 3);
|
||||
assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
|
||||
(void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
|
||||
&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
|
||||
"Unexpected mallctl() success");
|
||||
|
||||
for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
|
||||
@ -386,48 +372,15 @@ TEST_BEGIN(test_arena_i_lg_dirty_mult)
|
||||
= lg_dirty_mult, lg_dirty_mult++) {
|
||||
ssize_t old_lg_dirty_mult;
|
||||
|
||||
assert_d_eq(mallctl("arena.0.lg_dirty_mult",
|
||||
(void *)&old_lg_dirty_mult, &sz, (void *)&lg_dirty_mult,
|
||||
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("arena.0.lg_dirty_mult", &old_lg_dirty_mult,
|
||||
&sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
|
||||
"Unexpected old arena.0.lg_dirty_mult");
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_arena_i_decay_time)
|
||||
{
|
||||
ssize_t decay_time, orig_decay_time, prev_decay_time;
|
||||
size_t sz = sizeof(ssize_t);
|
||||
|
||||
test_skip_if(opt_purge != purge_mode_decay);
|
||||
|
||||
assert_d_eq(mallctl("arena.0.decay_time", (void *)&orig_decay_time, &sz,
|
||||
NULL, 0), 0, "Unexpected mallctl() failure");
|
||||
|
||||
decay_time = -2;
|
||||
assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
|
||||
(void *)&decay_time, sizeof(ssize_t)), EFAULT,
|
||||
"Unexpected mallctl() success");
|
||||
|
||||
decay_time = 0x7fffffff;
|
||||
assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
|
||||
(void *)&decay_time, sizeof(ssize_t)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
for (prev_decay_time = decay_time, decay_time = -1;
|
||||
decay_time < 20; prev_decay_time = decay_time, decay_time++) {
|
||||
ssize_t old_decay_time;
|
||||
|
||||
assert_d_eq(mallctl("arena.0.decay_time", (void *)&old_decay_time,
|
||||
&sz, (void *)&decay_time, sizeof(ssize_t)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_zd_eq(old_decay_time, prev_decay_time,
|
||||
"Unexpected old arena.0.decay_time");
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_arena_i_purge)
|
||||
{
|
||||
unsigned narenas;
|
||||
@ -438,29 +391,9 @@ TEST_BEGIN(test_arena_i_purge)
|
||||
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = narenas;
|
||||
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_arena_i_decay)
|
||||
{
|
||||
unsigned narenas;
|
||||
size_t sz = sizeof(unsigned);
|
||||
size_t mib[3];
|
||||
size_t miblen = 3;
|
||||
|
||||
assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
|
||||
assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
|
||||
assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = narenas;
|
||||
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
@ -480,35 +413,31 @@ TEST_BEGIN(test_arena_i_dss)
|
||||
"Unexpected mallctlnametomib() error");
|
||||
|
||||
dss_prec_new = "disabled";
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
|
||||
(void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
|
||||
sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
|
||||
assert_str_ne(dss_prec_old, "primary",
|
||||
"Unexpected default for dss precedence");
|
||||
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
|
||||
(void *)&dss_prec_old, sizeof(dss_prec_old)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
|
||||
sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure");
|
||||
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
|
||||
0), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_str_ne(dss_prec_old, "primary",
|
||||
"Unexpected value for dss precedence");
|
||||
|
||||
mib[1] = narenas_total_get();
|
||||
dss_prec_new = "disabled";
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
|
||||
(void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
|
||||
sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
|
||||
assert_str_ne(dss_prec_old, "primary",
|
||||
"Unexpected default for dss precedence");
|
||||
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
|
||||
(void *)&dss_prec_old, sizeof(dss_prec_new)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
|
||||
sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
|
||||
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
|
||||
0), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_str_ne(dss_prec_old, "primary",
|
||||
"Unexpected value for dss precedence");
|
||||
}
|
||||
@ -519,14 +448,14 @@ TEST_BEGIN(test_arenas_initialized)
|
||||
unsigned narenas;
|
||||
size_t sz = sizeof(narenas);
|
||||
|
||||
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
{
|
||||
VARIABLE_ARRAY(bool, initialized, narenas);
|
||||
|
||||
sz = narenas * sizeof(bool);
|
||||
assert_d_eq(mallctl("arenas.initialized", (void *)initialized,
|
||||
&sz, NULL, 0), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("arenas.initialized", initialized, &sz,
|
||||
NULL, 0), 0, "Unexpected mallctl() failure");
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
@ -536,19 +465,17 @@ TEST_BEGIN(test_arenas_lg_dirty_mult)
|
||||
ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
|
||||
size_t sz = sizeof(ssize_t);
|
||||
|
||||
test_skip_if(opt_purge != purge_mode_ratio);
|
||||
|
||||
assert_d_eq(mallctl("arenas.lg_dirty_mult", (void *)&orig_lg_dirty_mult,
|
||||
&sz, NULL, 0), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
|
||||
NULL, 0), 0, "Unexpected mallctl() failure");
|
||||
|
||||
lg_dirty_mult = -2;
|
||||
assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
|
||||
(void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
|
||||
&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
|
||||
"Unexpected mallctl() success");
|
||||
|
||||
lg_dirty_mult = (sizeof(size_t) << 3);
|
||||
assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
|
||||
(void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
|
||||
&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
|
||||
"Unexpected mallctl() success");
|
||||
|
||||
for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
|
||||
@ -556,56 +483,23 @@ TEST_BEGIN(test_arenas_lg_dirty_mult)
|
||||
lg_dirty_mult, lg_dirty_mult++) {
|
||||
ssize_t old_lg_dirty_mult;
|
||||
|
||||
assert_d_eq(mallctl("arenas.lg_dirty_mult",
|
||||
(void *)&old_lg_dirty_mult, &sz, (void *)&lg_dirty_mult,
|
||||
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("arenas.lg_dirty_mult", &old_lg_dirty_mult,
|
||||
&sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
|
||||
"Unexpected old arenas.lg_dirty_mult");
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_arenas_decay_time)
|
||||
{
|
||||
ssize_t decay_time, orig_decay_time, prev_decay_time;
|
||||
size_t sz = sizeof(ssize_t);
|
||||
|
||||
test_skip_if(opt_purge != purge_mode_decay);
|
||||
|
||||
assert_d_eq(mallctl("arenas.decay_time", (void *)&orig_decay_time, &sz,
|
||||
NULL, 0), 0, "Unexpected mallctl() failure");
|
||||
|
||||
decay_time = -2;
|
||||
assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
|
||||
(void *)&decay_time, sizeof(ssize_t)), EFAULT,
|
||||
"Unexpected mallctl() success");
|
||||
|
||||
decay_time = 0x7fffffff;
|
||||
assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
|
||||
(void *)&decay_time, sizeof(ssize_t)), 0,
|
||||
"Expected mallctl() failure");
|
||||
|
||||
for (prev_decay_time = decay_time, decay_time = -1;
|
||||
decay_time < 20; prev_decay_time = decay_time, decay_time++) {
|
||||
ssize_t old_decay_time;
|
||||
|
||||
assert_d_eq(mallctl("arenas.decay_time",
|
||||
(void *)&old_decay_time, &sz, (void *)&decay_time,
|
||||
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
|
||||
assert_zd_eq(old_decay_time, prev_decay_time,
|
||||
"Unexpected old arenas.decay_time");
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_arenas_constants)
|
||||
{
|
||||
|
||||
#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
|
||||
t name; \
|
||||
size_t sz = sizeof(t); \
|
||||
assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \
|
||||
0), 0, "Unexpected mallctl() failure"); \
|
||||
assert_d_eq(mallctl("arenas."#name, &name, &sz, NULL, 0), 0, \
|
||||
"Unexpected mallctl() failure"); \
|
||||
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
|
||||
} while (0)
|
||||
|
||||
@ -625,8 +519,8 @@ TEST_BEGIN(test_arenas_bin_constants)
|
||||
#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
|
||||
t name; \
|
||||
size_t sz = sizeof(t); \
|
||||
assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \
|
||||
NULL, 0), 0, "Unexpected mallctl() failure"); \
|
||||
assert_d_eq(mallctl("arenas.bin.0."#name, &name, &sz, NULL, 0), \
|
||||
0, "Unexpected mallctl() failure"); \
|
||||
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
|
||||
} while (0)
|
||||
|
||||
@ -644,8 +538,8 @@ TEST_BEGIN(test_arenas_lrun_constants)
|
||||
#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \
|
||||
t name; \
|
||||
size_t sz = sizeof(t); \
|
||||
assert_d_eq(mallctl("arenas.lrun.0."#name, (void *)&name, &sz, \
|
||||
NULL, 0), 0, "Unexpected mallctl() failure"); \
|
||||
assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL, \
|
||||
0), 0, "Unexpected mallctl() failure"); \
|
||||
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
|
||||
} while (0)
|
||||
|
||||
@ -661,8 +555,8 @@ TEST_BEGIN(test_arenas_hchunk_constants)
|
||||
#define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \
|
||||
t name; \
|
||||
size_t sz = sizeof(t); \
|
||||
assert_d_eq(mallctl("arenas.hchunk.0."#name, (void *)&name, \
|
||||
&sz, NULL, 0), 0, "Unexpected mallctl() failure"); \
|
||||
assert_d_eq(mallctl("arenas.hchunk.0."#name, &name, &sz, NULL, \
|
||||
0), 0, "Unexpected mallctl() failure"); \
|
||||
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
|
||||
} while (0)
|
||||
|
||||
@ -677,12 +571,12 @@ TEST_BEGIN(test_arenas_extend)
|
||||
unsigned narenas_before, arena, narenas_after;
|
||||
size_t sz = sizeof(unsigned);
|
||||
|
||||
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
|
||||
NULL, 0), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("arenas.extend", (void *)&arena, &sz, NULL, 0), 0,
|
||||
assert_d_eq(mallctl("arenas.narenas", &narenas_before, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("arenas.extend", &arena, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("arenas.narenas", &narenas_after, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
|
||||
0), 0, "Unexpected mallctl() failure");
|
||||
|
||||
assert_u_eq(narenas_before+1, narenas_after,
|
||||
"Unexpected number of arenas before versus after extension");
|
||||
@ -696,14 +590,12 @@ TEST_BEGIN(test_stats_arenas)
|
||||
#define TEST_STATS_ARENAS(t, name) do { \
|
||||
t name; \
|
||||
size_t sz = sizeof(t); \
|
||||
assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \
|
||||
NULL, 0), 0, "Unexpected mallctl() failure"); \
|
||||
assert_d_eq(mallctl("stats.arenas.0."#name, &name, &sz, NULL, \
|
||||
0), 0, "Unexpected mallctl() failure"); \
|
||||
} while (0)
|
||||
|
||||
TEST_STATS_ARENAS(unsigned, nthreads);
|
||||
TEST_STATS_ARENAS(const char *, dss);
|
||||
TEST_STATS_ARENAS(ssize_t, lg_dirty_mult);
|
||||
TEST_STATS_ARENAS(ssize_t, decay_time);
|
||||
TEST_STATS_ARENAS(unsigned, nthreads);
|
||||
TEST_STATS_ARENAS(size_t, pactive);
|
||||
TEST_STATS_ARENAS(size_t, pdirty);
|
||||
|
||||
@ -728,13 +620,10 @@ main(void)
|
||||
test_tcache,
|
||||
test_thread_arena,
|
||||
test_arena_i_lg_dirty_mult,
|
||||
test_arena_i_decay_time,
|
||||
test_arena_i_purge,
|
||||
test_arena_i_decay,
|
||||
test_arena_i_dss,
|
||||
test_arenas_initialized,
|
||||
test_arenas_lg_dirty_mult,
|
||||
test_arenas_decay_time,
|
||||
test_arenas_constants,
|
||||
test_arenas_bin_constants,
|
||||
test_arenas_lrun_constants,
|
||||
|
4
deps/jemalloc/test/unit/math.c
vendored
4
deps/jemalloc/test/unit/math.c
vendored
@ -5,10 +5,6 @@
|
||||
|
||||
#include <float.h>
|
||||
|
||||
#ifdef __PGI
|
||||
#undef INFINITY
|
||||
#endif
|
||||
|
||||
#ifndef INFINITY
|
||||
#define INFINITY (DBL_MAX + DBL_MAX)
|
||||
#endif
|
||||
|
227
deps/jemalloc/test/unit/nstime.c
vendored
227
deps/jemalloc/test/unit/nstime.c
vendored
@ -1,227 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
#define BILLION UINT64_C(1000000000)
|
||||
|
||||
TEST_BEGIN(test_nstime_init)
|
||||
{
|
||||
nstime_t nst;
|
||||
|
||||
nstime_init(&nst, 42000000043);
|
||||
assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read");
|
||||
assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
|
||||
assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_nstime_init2)
|
||||
{
|
||||
nstime_t nst;
|
||||
|
||||
nstime_init2(&nst, 42, 43);
|
||||
assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
|
||||
assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_nstime_copy)
|
||||
{
|
||||
nstime_t nsta, nstb;
|
||||
|
||||
nstime_init2(&nsta, 42, 43);
|
||||
nstime_init(&nstb, 0);
|
||||
nstime_copy(&nstb, &nsta);
|
||||
assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied");
|
||||
assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_nstime_compare)
|
||||
{
|
||||
nstime_t nsta, nstb;
|
||||
|
||||
nstime_init2(&nsta, 42, 43);
|
||||
nstime_copy(&nstb, &nsta);
|
||||
assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal");
|
||||
assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal");
|
||||
|
||||
nstime_init2(&nstb, 42, 42);
|
||||
assert_d_eq(nstime_compare(&nsta, &nstb), 1,
|
||||
"nsta should be greater than nstb");
|
||||
assert_d_eq(nstime_compare(&nstb, &nsta), -1,
|
||||
"nstb should be less than nsta");
|
||||
|
||||
nstime_init2(&nstb, 42, 44);
|
||||
assert_d_eq(nstime_compare(&nsta, &nstb), -1,
|
||||
"nsta should be less than nstb");
|
||||
assert_d_eq(nstime_compare(&nstb, &nsta), 1,
|
||||
"nstb should be greater than nsta");
|
||||
|
||||
nstime_init2(&nstb, 41, BILLION - 1);
|
||||
assert_d_eq(nstime_compare(&nsta, &nstb), 1,
|
||||
"nsta should be greater than nstb");
|
||||
assert_d_eq(nstime_compare(&nstb, &nsta), -1,
|
||||
"nstb should be less than nsta");
|
||||
|
||||
nstime_init2(&nstb, 43, 0);
|
||||
assert_d_eq(nstime_compare(&nsta, &nstb), -1,
|
||||
"nsta should be less than nstb");
|
||||
assert_d_eq(nstime_compare(&nstb, &nsta), 1,
|
||||
"nstb should be greater than nsta");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_nstime_add)
|
||||
{
|
||||
nstime_t nsta, nstb;
|
||||
|
||||
nstime_init2(&nsta, 42, 43);
|
||||
nstime_copy(&nstb, &nsta);
|
||||
nstime_add(&nsta, &nstb);
|
||||
nstime_init2(&nstb, 84, 86);
|
||||
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
|
||||
"Incorrect addition result");
|
||||
|
||||
nstime_init2(&nsta, 42, BILLION - 1);
|
||||
nstime_copy(&nstb, &nsta);
|
||||
nstime_add(&nsta, &nstb);
|
||||
nstime_init2(&nstb, 85, BILLION - 2);
|
||||
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
|
||||
"Incorrect addition result");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_nstime_subtract)
|
||||
{
|
||||
nstime_t nsta, nstb;
|
||||
|
||||
nstime_init2(&nsta, 42, 43);
|
||||
nstime_copy(&nstb, &nsta);
|
||||
nstime_subtract(&nsta, &nstb);
|
||||
nstime_init(&nstb, 0);
|
||||
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
|
||||
"Incorrect subtraction result");
|
||||
|
||||
nstime_init2(&nsta, 42, 43);
|
||||
nstime_init2(&nstb, 41, 44);
|
||||
nstime_subtract(&nsta, &nstb);
|
||||
nstime_init2(&nstb, 0, BILLION - 1);
|
||||
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
|
||||
"Incorrect subtraction result");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_nstime_imultiply)
|
||||
{
|
||||
nstime_t nsta, nstb;
|
||||
|
||||
nstime_init2(&nsta, 42, 43);
|
||||
nstime_imultiply(&nsta, 10);
|
||||
nstime_init2(&nstb, 420, 430);
|
||||
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
|
||||
"Incorrect multiplication result");
|
||||
|
||||
nstime_init2(&nsta, 42, 666666666);
|
||||
nstime_imultiply(&nsta, 3);
|
||||
nstime_init2(&nstb, 127, 999999998);
|
||||
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
|
||||
"Incorrect multiplication result");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_nstime_idivide)
|
||||
{
|
||||
nstime_t nsta, nstb;
|
||||
|
||||
nstime_init2(&nsta, 42, 43);
|
||||
nstime_copy(&nstb, &nsta);
|
||||
nstime_imultiply(&nsta, 10);
|
||||
nstime_idivide(&nsta, 10);
|
||||
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
|
||||
"Incorrect division result");
|
||||
|
||||
nstime_init2(&nsta, 42, 666666666);
|
||||
nstime_copy(&nstb, &nsta);
|
||||
nstime_imultiply(&nsta, 3);
|
||||
nstime_idivide(&nsta, 3);
|
||||
assert_d_eq(nstime_compare(&nsta, &nstb), 0,
|
||||
"Incorrect division result");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_nstime_divide)
|
||||
{
|
||||
nstime_t nsta, nstb, nstc;
|
||||
|
||||
nstime_init2(&nsta, 42, 43);
|
||||
nstime_copy(&nstb, &nsta);
|
||||
nstime_imultiply(&nsta, 10);
|
||||
assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
|
||||
"Incorrect division result");
|
||||
|
||||
nstime_init2(&nsta, 42, 43);
|
||||
nstime_copy(&nstb, &nsta);
|
||||
nstime_imultiply(&nsta, 10);
|
||||
nstime_init(&nstc, 1);
|
||||
nstime_add(&nsta, &nstc);
|
||||
assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
|
||||
"Incorrect division result");
|
||||
|
||||
nstime_init2(&nsta, 42, 43);
|
||||
nstime_copy(&nstb, &nsta);
|
||||
nstime_imultiply(&nsta, 10);
|
||||
nstime_init(&nstc, 1);
|
||||
nstime_subtract(&nsta, &nstc);
|
||||
assert_u64_eq(nstime_divide(&nsta, &nstb), 9,
|
||||
"Incorrect division result");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_nstime_monotonic)
|
||||
{
|
||||
|
||||
nstime_monotonic();
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_nstime_update)
|
||||
{
|
||||
nstime_t nst;
|
||||
|
||||
nstime_init(&nst, 0);
|
||||
|
||||
assert_false(nstime_update(&nst), "Basic time update failed.");
|
||||
|
||||
/* Only Rip Van Winkle sleeps this long. */
|
||||
{
|
||||
nstime_t addend;
|
||||
nstime_init2(&addend, 631152000, 0);
|
||||
nstime_add(&nst, &addend);
|
||||
}
|
||||
{
|
||||
nstime_t nst0;
|
||||
nstime_copy(&nst0, &nst);
|
||||
assert_true(nstime_update(&nst),
|
||||
"Update should detect time roll-back.");
|
||||
assert_d_eq(nstime_compare(&nst, &nst0), 0,
|
||||
"Time should not have been modified");
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_nstime_init,
|
||||
test_nstime_init2,
|
||||
test_nstime_copy,
|
||||
test_nstime_compare,
|
||||
test_nstime_add,
|
||||
test_nstime_subtract,
|
||||
test_nstime_imultiply,
|
||||
test_nstime_idivide,
|
||||
test_nstime_divide,
|
||||
test_nstime_monotonic,
|
||||
test_nstime_update));
|
||||
}
|
206
deps/jemalloc/test/unit/pack.c
vendored
206
deps/jemalloc/test/unit/pack.c
vendored
@ -1,206 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
const char *malloc_conf =
|
||||
/* Use smallest possible chunk size. */
|
||||
"lg_chunk:0"
|
||||
/* Immediately purge to minimize fragmentation. */
|
||||
",lg_dirty_mult:-1"
|
||||
",decay_time:-1"
|
||||
;
|
||||
|
||||
/*
|
||||
* Size class that is a divisor of the page size, ideally 4+ regions per run.
|
||||
*/
|
||||
#if LG_PAGE <= 14
|
||||
#define SZ (ZU(1) << (LG_PAGE - 2))
|
||||
#else
|
||||
#define SZ 4096
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Number of chunks to consume at high water mark. Should be at least 2 so that
|
||||
* if mmap()ed memory grows downward, downward growth of mmap()ed memory is
|
||||
* tested.
|
||||
*/
|
||||
#define NCHUNKS 8
|
||||
|
||||
static unsigned
|
||||
binind_compute(void)
|
||||
{
|
||||
size_t sz;
|
||||
unsigned nbins, i;
|
||||
|
||||
sz = sizeof(nbins);
|
||||
assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl failure");
|
||||
|
||||
for (i = 0; i < nbins; i++) {
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
size_t size;
|
||||
|
||||
assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
|
||||
&miblen), 0, "Unexpected mallctlnametomb failure");
|
||||
mib[2] = (size_t)i;
|
||||
|
||||
sz = sizeof(size);
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
|
||||
0), 0, "Unexpected mallctlbymib failure");
|
||||
if (size == SZ)
|
||||
return (i);
|
||||
}
|
||||
|
||||
test_fail("Unable to compute nregs_per_run");
|
||||
return (0);
|
||||
}
|
||||
|
||||
static size_t
|
||||
nregs_per_run_compute(void)
|
||||
{
|
||||
uint32_t nregs;
|
||||
size_t sz;
|
||||
unsigned binind = binind_compute();
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
|
||||
assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomb failure");
|
||||
mib[2] = (size_t)binind;
|
||||
sz = sizeof(nregs);
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
|
||||
0), 0, "Unexpected mallctlbymib failure");
|
||||
return (nregs);
|
||||
}
|
||||
|
||||
static size_t
|
||||
npages_per_run_compute(void)
|
||||
{
|
||||
size_t sz;
|
||||
unsigned binind = binind_compute();
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
size_t run_size;
|
||||
|
||||
assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomb failure");
|
||||
mib[2] = (size_t)binind;
|
||||
sz = sizeof(run_size);
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz, NULL,
|
||||
0), 0, "Unexpected mallctlbymib failure");
|
||||
return (run_size >> LG_PAGE);
|
||||
}
|
||||
|
||||
static size_t
|
||||
npages_per_chunk_compute(void)
|
||||
{
|
||||
|
||||
return ((chunksize >> LG_PAGE) - map_bias);
|
||||
}
|
||||
|
||||
static size_t
|
||||
nruns_per_chunk_compute(void)
|
||||
{
|
||||
|
||||
return (npages_per_chunk_compute() / npages_per_run_compute());
|
||||
}
|
||||
|
||||
static unsigned
|
||||
arenas_extend_mallctl(void)
|
||||
{
|
||||
unsigned arena_ind;
|
||||
size_t sz;
|
||||
|
||||
sz = sizeof(arena_ind);
|
||||
assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
|
||||
0, "Error in arenas.extend");
|
||||
|
||||
return (arena_ind);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_reset_mallctl(unsigned arena_ind)
|
||||
{
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
|
||||
assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_pack)
|
||||
{
|
||||
unsigned arena_ind = arenas_extend_mallctl();
|
||||
size_t nregs_per_run = nregs_per_run_compute();
|
||||
size_t nruns_per_chunk = nruns_per_chunk_compute();
|
||||
size_t nruns = nruns_per_chunk * NCHUNKS;
|
||||
size_t nregs = nregs_per_run * nruns;
|
||||
VARIABLE_ARRAY(void *, ptrs, nregs);
|
||||
size_t i, j, offset;
|
||||
|
||||
/* Fill matrix. */
|
||||
for (i = offset = 0; i < nruns; i++) {
|
||||
for (j = 0; j < nregs_per_run; j++) {
|
||||
void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
|
||||
MALLOCX_TCACHE_NONE);
|
||||
assert_ptr_not_null(p,
|
||||
"Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
|
||||
" MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
|
||||
SZ, arena_ind, i, j);
|
||||
ptrs[(i * nregs_per_run) + j] = p;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Free all but one region of each run, but rotate which region is
|
||||
* preserved, so that subsequent allocations exercise the within-run
|
||||
* layout policy.
|
||||
*/
|
||||
offset = 0;
|
||||
for (i = offset = 0;
|
||||
i < nruns;
|
||||
i++, offset = (offset + 1) % nregs_per_run) {
|
||||
for (j = 0; j < nregs_per_run; j++) {
|
||||
void *p = ptrs[(i * nregs_per_run) + j];
|
||||
if (offset == j)
|
||||
continue;
|
||||
dallocx(p, MALLOCX_ARENA(arena_ind) |
|
||||
MALLOCX_TCACHE_NONE);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Logically refill matrix, skipping preserved regions and verifying
|
||||
* that the matrix is unmodified.
|
||||
*/
|
||||
offset = 0;
|
||||
for (i = offset = 0;
|
||||
i < nruns;
|
||||
i++, offset = (offset + 1) % nregs_per_run) {
|
||||
for (j = 0; j < nregs_per_run; j++) {
|
||||
void *p;
|
||||
|
||||
if (offset == j)
|
||||
continue;
|
||||
p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
|
||||
MALLOCX_TCACHE_NONE);
|
||||
assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
|
||||
"Unexpected refill discrepancy, run=%zu, reg=%zu\n",
|
||||
i, j);
|
||||
}
|
||||
}
|
||||
|
||||
/* Clean up. */
|
||||
arena_reset_mallctl(arena_ind);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_pack));
|
||||
}
|
27
deps/jemalloc/test/unit/pages.c
vendored
27
deps/jemalloc/test/unit/pages.c
vendored
@ -1,27 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
TEST_BEGIN(test_pages_huge)
|
||||
{
|
||||
bool commit;
|
||||
void *pages;
|
||||
|
||||
commit = true;
|
||||
pages = pages_map(NULL, PAGE, &commit);
|
||||
assert_ptr_not_null(pages, "Unexpected pages_map() error");
|
||||
|
||||
assert_false(pages_huge(pages, PAGE),
|
||||
"Unexpected pages_huge() result");
|
||||
assert_false(pages_nohuge(pages, PAGE),
|
||||
"Unexpected pages_nohuge() result");
|
||||
|
||||
pages_unmap(pages, PAGE);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_pages_huge));
|
||||
}
|
290
deps/jemalloc/test/unit/ph.c
vendored
290
deps/jemalloc/test/unit/ph.c
vendored
@ -1,290 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
typedef struct node_s node_t;
|
||||
|
||||
struct node_s {
|
||||
#define NODE_MAGIC 0x9823af7e
|
||||
uint32_t magic;
|
||||
phn(node_t) link;
|
||||
uint64_t key;
|
||||
};
|
||||
|
||||
static int
|
||||
node_cmp(const node_t *a, const node_t *b)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = (a->key > b->key) - (a->key < b->key);
|
||||
if (ret == 0) {
|
||||
/*
|
||||
* Duplicates are not allowed in the heap, so force an
|
||||
* arbitrary ordering for non-identical items with equal keys.
|
||||
*/
|
||||
ret = (((uintptr_t)a) > ((uintptr_t)b))
|
||||
- (((uintptr_t)a) < ((uintptr_t)b));
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
node_cmp_magic(const node_t *a, const node_t *b) {
|
||||
|
||||
assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
|
||||
assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
|
||||
|
||||
return (node_cmp(a, b));
|
||||
}
|
||||
|
||||
typedef ph(node_t) heap_t;
|
||||
ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic);
|
||||
|
||||
static void
|
||||
node_print(const node_t *node, unsigned depth)
|
||||
{
|
||||
unsigned i;
|
||||
node_t *leftmost_child, *sibling;
|
||||
|
||||
for (i = 0; i < depth; i++)
|
||||
malloc_printf("\t");
|
||||
malloc_printf("%2"FMTu64"\n", node->key);
|
||||
|
||||
leftmost_child = phn_lchild_get(node_t, link, node);
|
||||
if (leftmost_child == NULL)
|
||||
return;
|
||||
node_print(leftmost_child, depth + 1);
|
||||
|
||||
for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
|
||||
NULL; sibling = phn_next_get(node_t, link, sibling)) {
|
||||
node_print(sibling, depth + 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
heap_print(const heap_t *heap)
|
||||
{
|
||||
node_t *auxelm;
|
||||
|
||||
malloc_printf("vvv heap %p vvv\n", heap);
|
||||
if (heap->ph_root == NULL)
|
||||
goto label_return;
|
||||
|
||||
node_print(heap->ph_root, 0);
|
||||
|
||||
for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
|
||||
auxelm = phn_next_get(node_t, link, auxelm)) {
|
||||
assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
|
||||
link, auxelm)), auxelm,
|
||||
"auxelm's prev doesn't link to auxelm");
|
||||
node_print(auxelm, 0);
|
||||
}
|
||||
|
||||
label_return:
|
||||
malloc_printf("^^^ heap %p ^^^\n", heap);
|
||||
}
|
||||
|
||||
static unsigned
|
||||
node_validate(const node_t *node, const node_t *parent)
|
||||
{
|
||||
unsigned nnodes = 1;
|
||||
node_t *leftmost_child, *sibling;
|
||||
|
||||
if (parent != NULL) {
|
||||
assert_d_ge(node_cmp_magic(node, parent), 0,
|
||||
"Child is less than parent");
|
||||
}
|
||||
|
||||
leftmost_child = phn_lchild_get(node_t, link, node);
|
||||
if (leftmost_child == NULL)
|
||||
return (nnodes);
|
||||
assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child),
|
||||
(void *)node, "Leftmost child does not link to node");
|
||||
nnodes += node_validate(leftmost_child, node);
|
||||
|
||||
for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
|
||||
NULL; sibling = phn_next_get(node_t, link, sibling)) {
|
||||
assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
|
||||
link, sibling)), sibling,
|
||||
"sibling's prev doesn't link to sibling");
|
||||
nnodes += node_validate(sibling, node);
|
||||
}
|
||||
return (nnodes);
|
||||
}
|
||||
|
||||
static unsigned
|
||||
heap_validate(const heap_t *heap)
|
||||
{
|
||||
unsigned nnodes = 0;
|
||||
node_t *auxelm;
|
||||
|
||||
if (heap->ph_root == NULL)
|
||||
goto label_return;
|
||||
|
||||
nnodes += node_validate(heap->ph_root, NULL);
|
||||
|
||||
for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
|
||||
auxelm = phn_next_get(node_t, link, auxelm)) {
|
||||
assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
|
||||
link, auxelm)), auxelm,
|
||||
"auxelm's prev doesn't link to auxelm");
|
||||
nnodes += node_validate(auxelm, NULL);
|
||||
}
|
||||
|
||||
label_return:
|
||||
if (false)
|
||||
heap_print(heap);
|
||||
return (nnodes);
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_ph_empty)
|
||||
{
|
||||
heap_t heap;
|
||||
|
||||
heap_new(&heap);
|
||||
assert_true(heap_empty(&heap), "Heap should be empty");
|
||||
assert_ptr_null(heap_first(&heap), "Unexpected node");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
static void
|
||||
node_remove(heap_t *heap, node_t *node)
|
||||
{
|
||||
|
||||
heap_remove(heap, node);
|
||||
|
||||
node->magic = 0;
|
||||
}
|
||||
|
||||
static node_t *
|
||||
node_remove_first(heap_t *heap)
|
||||
{
|
||||
node_t *node = heap_remove_first(heap);
|
||||
node->magic = 0;
|
||||
return (node);
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_ph_random)
|
||||
{
|
||||
#define NNODES 25
|
||||
#define NBAGS 250
|
||||
#define SEED 42
|
||||
sfmt_t *sfmt;
|
||||
uint64_t bag[NNODES];
|
||||
heap_t heap;
|
||||
node_t nodes[NNODES];
|
||||
unsigned i, j, k;
|
||||
|
||||
sfmt = init_gen_rand(SEED);
|
||||
for (i = 0; i < NBAGS; i++) {
|
||||
switch (i) {
|
||||
case 0:
|
||||
/* Insert in order. */
|
||||
for (j = 0; j < NNODES; j++)
|
||||
bag[j] = j;
|
||||
break;
|
||||
case 1:
|
||||
/* Insert in reverse order. */
|
||||
for (j = 0; j < NNODES; j++)
|
||||
bag[j] = NNODES - j - 1;
|
||||
break;
|
||||
default:
|
||||
for (j = 0; j < NNODES; j++)
|
||||
bag[j] = gen_rand64_range(sfmt, NNODES);
|
||||
}
|
||||
|
||||
for (j = 1; j <= NNODES; j++) {
|
||||
/* Initialize heap and nodes. */
|
||||
heap_new(&heap);
|
||||
assert_u_eq(heap_validate(&heap), 0,
|
||||
"Incorrect node count");
|
||||
for (k = 0; k < j; k++) {
|
||||
nodes[k].magic = NODE_MAGIC;
|
||||
nodes[k].key = bag[k];
|
||||
}
|
||||
|
||||
/* Insert nodes. */
|
||||
for (k = 0; k < j; k++) {
|
||||
heap_insert(&heap, &nodes[k]);
|
||||
if (i % 13 == 12) {
|
||||
/* Trigger merging. */
|
||||
assert_ptr_not_null(heap_first(&heap),
|
||||
"Heap should not be empty");
|
||||
}
|
||||
assert_u_eq(heap_validate(&heap), k + 1,
|
||||
"Incorrect node count");
|
||||
}
|
||||
|
||||
assert_false(heap_empty(&heap),
|
||||
"Heap should not be empty");
|
||||
|
||||
/* Remove nodes. */
|
||||
switch (i % 4) {
|
||||
case 0:
|
||||
for (k = 0; k < j; k++) {
|
||||
assert_u_eq(heap_validate(&heap), j - k,
|
||||
"Incorrect node count");
|
||||
node_remove(&heap, &nodes[k]);
|
||||
assert_u_eq(heap_validate(&heap), j - k
|
||||
- 1, "Incorrect node count");
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
for (k = j; k > 0; k--) {
|
||||
node_remove(&heap, &nodes[k-1]);
|
||||
assert_u_eq(heap_validate(&heap), k - 1,
|
||||
"Incorrect node count");
|
||||
}
|
||||
break;
|
||||
case 2: {
|
||||
node_t *prev = NULL;
|
||||
for (k = 0; k < j; k++) {
|
||||
node_t *node = node_remove_first(&heap);
|
||||
assert_u_eq(heap_validate(&heap), j - k
|
||||
- 1, "Incorrect node count");
|
||||
if (prev != NULL) {
|
||||
assert_d_ge(node_cmp(node,
|
||||
prev), 0,
|
||||
"Bad removal order");
|
||||
}
|
||||
prev = node;
|
||||
}
|
||||
break;
|
||||
} case 3: {
|
||||
node_t *prev = NULL;
|
||||
for (k = 0; k < j; k++) {
|
||||
node_t *node = heap_first(&heap);
|
||||
assert_u_eq(heap_validate(&heap), j - k,
|
||||
"Incorrect node count");
|
||||
if (prev != NULL) {
|
||||
assert_d_ge(node_cmp(node,
|
||||
prev), 0,
|
||||
"Bad removal order");
|
||||
}
|
||||
node_remove(&heap, node);
|
||||
assert_u_eq(heap_validate(&heap), j - k
|
||||
- 1, "Incorrect node count");
|
||||
prev = node;
|
||||
}
|
||||
break;
|
||||
} default:
|
||||
not_reached();
|
||||
}
|
||||
|
||||
assert_ptr_null(heap_first(&heap),
|
||||
"Heap should be empty");
|
||||
assert_true(heap_empty(&heap), "Heap should be empty");
|
||||
}
|
||||
}
|
||||
fini_gen_rand(sfmt);
|
||||
#undef NNODES
|
||||
#undef SEED
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_ph_empty,
|
||||
test_ph_random));
|
||||
}
|
263
deps/jemalloc/test/unit/prng.c
vendored
263
deps/jemalloc/test/unit/prng.c
vendored
@ -1,263 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
static void
|
||||
test_prng_lg_range_u32(bool atomic)
|
||||
{
|
||||
uint32_t sa, sb, ra, rb;
|
||||
unsigned lg_range;
|
||||
|
||||
sa = 42;
|
||||
ra = prng_lg_range_u32(&sa, 32, atomic);
|
||||
sa = 42;
|
||||
rb = prng_lg_range_u32(&sa, 32, atomic);
|
||||
assert_u32_eq(ra, rb,
|
||||
"Repeated generation should produce repeated results");
|
||||
|
||||
sb = 42;
|
||||
rb = prng_lg_range_u32(&sb, 32, atomic);
|
||||
assert_u32_eq(ra, rb,
|
||||
"Equivalent generation should produce equivalent results");
|
||||
|
||||
sa = 42;
|
||||
ra = prng_lg_range_u32(&sa, 32, atomic);
|
||||
rb = prng_lg_range_u32(&sa, 32, atomic);
|
||||
assert_u32_ne(ra, rb,
|
||||
"Full-width results must not immediately repeat");
|
||||
|
||||
sa = 42;
|
||||
ra = prng_lg_range_u32(&sa, 32, atomic);
|
||||
for (lg_range = 31; lg_range > 0; lg_range--) {
|
||||
sb = 42;
|
||||
rb = prng_lg_range_u32(&sb, lg_range, atomic);
|
||||
assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
|
||||
0, "High order bits should be 0, lg_range=%u", lg_range);
|
||||
assert_u32_eq(rb, (ra >> (32 - lg_range)),
|
||||
"Expected high order bits of full-width result, "
|
||||
"lg_range=%u", lg_range);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
test_prng_lg_range_u64(void)
|
||||
{
|
||||
uint64_t sa, sb, ra, rb;
|
||||
unsigned lg_range;
|
||||
|
||||
sa = 42;
|
||||
ra = prng_lg_range_u64(&sa, 64);
|
||||
sa = 42;
|
||||
rb = prng_lg_range_u64(&sa, 64);
|
||||
assert_u64_eq(ra, rb,
|
||||
"Repeated generation should produce repeated results");
|
||||
|
||||
sb = 42;
|
||||
rb = prng_lg_range_u64(&sb, 64);
|
||||
assert_u64_eq(ra, rb,
|
||||
"Equivalent generation should produce equivalent results");
|
||||
|
||||
sa = 42;
|
||||
ra = prng_lg_range_u64(&sa, 64);
|
||||
rb = prng_lg_range_u64(&sa, 64);
|
||||
assert_u64_ne(ra, rb,
|
||||
"Full-width results must not immediately repeat");
|
||||
|
||||
sa = 42;
|
||||
ra = prng_lg_range_u64(&sa, 64);
|
||||
for (lg_range = 63; lg_range > 0; lg_range--) {
|
||||
sb = 42;
|
||||
rb = prng_lg_range_u64(&sb, lg_range);
|
||||
assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
|
||||
0, "High order bits should be 0, lg_range=%u", lg_range);
|
||||
assert_u64_eq(rb, (ra >> (64 - lg_range)),
|
||||
"Expected high order bits of full-width result, "
|
||||
"lg_range=%u", lg_range);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
test_prng_lg_range_zu(bool atomic)
|
||||
{
|
||||
size_t sa, sb, ra, rb;
|
||||
unsigned lg_range;
|
||||
|
||||
sa = 42;
|
||||
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
||||
sa = 42;
|
||||
rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
||||
assert_zu_eq(ra, rb,
|
||||
"Repeated generation should produce repeated results");
|
||||
|
||||
sb = 42;
|
||||
rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
||||
assert_zu_eq(ra, rb,
|
||||
"Equivalent generation should produce equivalent results");
|
||||
|
||||
sa = 42;
|
||||
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
||||
rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
||||
assert_zu_ne(ra, rb,
|
||||
"Full-width results must not immediately repeat");
|
||||
|
||||
sa = 42;
|
||||
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
||||
for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0;
|
||||
lg_range--) {
|
||||
sb = 42;
|
||||
rb = prng_lg_range_zu(&sb, lg_range, atomic);
|
||||
assert_zu_eq((rb & (SIZE_T_MAX << lg_range)),
|
||||
0, "High order bits should be 0, lg_range=%u", lg_range);
|
||||
assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) -
|
||||
lg_range)), "Expected high order bits of full-width "
|
||||
"result, lg_range=%u", lg_range);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_prng_lg_range_u32_nonatomic)
|
||||
{
|
||||
|
||||
test_prng_lg_range_u32(false);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_prng_lg_range_u32_atomic)
|
||||
{
|
||||
|
||||
test_prng_lg_range_u32(true);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_prng_lg_range_u64_nonatomic)
|
||||
{
|
||||
|
||||
test_prng_lg_range_u64();
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_prng_lg_range_zu_nonatomic)
|
||||
{
|
||||
|
||||
test_prng_lg_range_zu(false);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_prng_lg_range_zu_atomic)
|
||||
{
|
||||
|
||||
test_prng_lg_range_zu(true);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
static void
|
||||
test_prng_range_u32(bool atomic)
|
||||
{
|
||||
uint32_t range;
|
||||
#define MAX_RANGE 10000000
|
||||
#define RANGE_STEP 97
|
||||
#define NREPS 10
|
||||
|
||||
for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
|
||||
uint32_t s;
|
||||
unsigned rep;
|
||||
|
||||
s = range;
|
||||
for (rep = 0; rep < NREPS; rep++) {
|
||||
uint32_t r = prng_range_u32(&s, range, atomic);
|
||||
|
||||
assert_u32_lt(r, range, "Out of range");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
test_prng_range_u64(void)
|
||||
{
|
||||
uint64_t range;
|
||||
#define MAX_RANGE 10000000
|
||||
#define RANGE_STEP 97
|
||||
#define NREPS 10
|
||||
|
||||
for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
|
||||
uint64_t s;
|
||||
unsigned rep;
|
||||
|
||||
s = range;
|
||||
for (rep = 0; rep < NREPS; rep++) {
|
||||
uint64_t r = prng_range_u64(&s, range);
|
||||
|
||||
assert_u64_lt(r, range, "Out of range");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
test_prng_range_zu(bool atomic)
|
||||
{
|
||||
size_t range;
|
||||
#define MAX_RANGE 10000000
|
||||
#define RANGE_STEP 97
|
||||
#define NREPS 10
|
||||
|
||||
for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
|
||||
size_t s;
|
||||
unsigned rep;
|
||||
|
||||
s = range;
|
||||
for (rep = 0; rep < NREPS; rep++) {
|
||||
size_t r = prng_range_zu(&s, range, atomic);
|
||||
|
||||
assert_zu_lt(r, range, "Out of range");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_prng_range_u32_nonatomic)
|
||||
{
|
||||
|
||||
test_prng_range_u32(false);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_prng_range_u32_atomic)
|
||||
{
|
||||
|
||||
test_prng_range_u32(true);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_prng_range_u64_nonatomic)
|
||||
{
|
||||
|
||||
test_prng_range_u64();
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_prng_range_zu_nonatomic)
|
||||
{
|
||||
|
||||
test_prng_range_zu(false);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_prng_range_zu_atomic)
|
||||
{
|
||||
|
||||
test_prng_range_zu(true);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_prng_lg_range_u32_nonatomic,
|
||||
test_prng_lg_range_u32_atomic,
|
||||
test_prng_lg_range_u64_nonatomic,
|
||||
test_prng_lg_range_zu_nonatomic,
|
||||
test_prng_lg_range_zu_atomic,
|
||||
test_prng_range_u32_nonatomic,
|
||||
test_prng_range_u32_atomic,
|
||||
test_prng_range_u64_nonatomic,
|
||||
test_prng_range_zu_nonatomic,
|
||||
test_prng_range_zu_atomic));
|
||||
}
|
5
deps/jemalloc/test/unit/prof_accum.c
vendored
Executable file → Normal file
5
deps/jemalloc/test/unit/prof_accum.c
vendored
Executable file → Normal file
@ -68,9 +68,8 @@ TEST_BEGIN(test_idump)
|
||||
test_skip_if(!config_prof);
|
||||
|
||||
active = true;
|
||||
assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
|
||||
sizeof(active)), 0,
|
||||
"Unexpected mallctl failure while activating profiling");
|
||||
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
|
||||
0, "Unexpected mallctl failure while activating profiling");
|
||||
|
||||
prof_dump_open = prof_dump_open_intercept;
|
||||
|
||||
|
5
deps/jemalloc/test/unit/prof_active.c
vendored
Executable file → Normal file
5
deps/jemalloc/test/unit/prof_active.c
vendored
Executable file → Normal file
@ -12,7 +12,7 @@ mallctl_bool_get(const char *name, bool expected, const char *func, int line)
|
||||
size_t sz;
|
||||
|
||||
sz = sizeof(old);
|
||||
assert_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0,
|
||||
assert_d_eq(mallctl(name, &old, &sz, NULL, 0), 0,
|
||||
"%s():%d: Unexpected mallctl failure reading %s", func, line, name);
|
||||
assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
|
||||
name);
|
||||
@ -26,8 +26,7 @@ mallctl_bool_set(const char *name, bool old_expected, bool val_new,
|
||||
size_t sz;
|
||||
|
||||
sz = sizeof(old);
|
||||
assert_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new,
|
||||
sizeof(val_new)), 0,
|
||||
assert_d_eq(mallctl(name, &old, &sz, &val_new, sizeof(val_new)), 0,
|
||||
"%s():%d: Unexpected mallctl failure reading/writing %s", func,
|
||||
line, name);
|
||||
assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
|
||||
|
13
deps/jemalloc/test/unit/prof_gdump.c
vendored
Executable file → Normal file
13
deps/jemalloc/test/unit/prof_gdump.c
vendored
Executable file → Normal file
@ -28,9 +28,8 @@ TEST_BEGIN(test_gdump)
|
||||
test_skip_if(!config_prof);
|
||||
|
||||
active = true;
|
||||
assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
|
||||
sizeof(active)), 0,
|
||||
"Unexpected mallctl failure while activating profiling");
|
||||
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
|
||||
0, "Unexpected mallctl failure while activating profiling");
|
||||
|
||||
prof_dump_open = prof_dump_open_intercept;
|
||||
|
||||
@ -46,8 +45,8 @@ TEST_BEGIN(test_gdump)
|
||||
|
||||
gdump = false;
|
||||
sz = sizeof(gdump_old);
|
||||
assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
|
||||
(void *)&gdump, sizeof(gdump)), 0,
|
||||
assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
|
||||
sizeof(gdump)), 0,
|
||||
"Unexpected mallctl failure while disabling prof.gdump");
|
||||
assert(gdump_old);
|
||||
did_prof_dump_open = false;
|
||||
@ -57,8 +56,8 @@ TEST_BEGIN(test_gdump)
|
||||
|
||||
gdump = true;
|
||||
sz = sizeof(gdump_old);
|
||||
assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
|
||||
(void *)&gdump, sizeof(gdump)), 0,
|
||||
assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
|
||||
sizeof(gdump)), 0,
|
||||
"Unexpected mallctl failure while enabling prof.gdump");
|
||||
assert(!gdump_old);
|
||||
did_prof_dump_open = false;
|
||||
|
5
deps/jemalloc/test/unit/prof_idump.c
vendored
Executable file → Normal file
5
deps/jemalloc/test/unit/prof_idump.c
vendored
Executable file → Normal file
@ -29,9 +29,8 @@ TEST_BEGIN(test_idump)
|
||||
test_skip_if(!config_prof);
|
||||
|
||||
active = true;
|
||||
assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
|
||||
sizeof(active)), 0,
|
||||
"Unexpected mallctl failure while activating profiling");
|
||||
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
|
||||
0, "Unexpected mallctl failure while activating profiling");
|
||||
|
||||
prof_dump_open = prof_dump_open_intercept;
|
||||
|
||||
|
16
deps/jemalloc/test/unit/prof_reset.c
vendored
Executable file → Normal file
16
deps/jemalloc/test/unit/prof_reset.c
vendored
Executable file → Normal file
@ -20,8 +20,8 @@ static void
|
||||
set_prof_active(bool active)
|
||||
{
|
||||
|
||||
assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
|
||||
sizeof(active)), 0, "Unexpected mallctl failure");
|
||||
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
|
||||
0, "Unexpected mallctl failure");
|
||||
}
|
||||
|
||||
static size_t
|
||||
@ -30,8 +30,7 @@ get_lg_prof_sample(void)
|
||||
size_t lg_prof_sample;
|
||||
size_t sz = sizeof(size_t);
|
||||
|
||||
assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz,
|
||||
NULL, 0), 0,
|
||||
assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl failure while reading profiling sample rate");
|
||||
return (lg_prof_sample);
|
||||
}
|
||||
@ -40,7 +39,7 @@ static void
|
||||
do_prof_reset(size_t lg_prof_sample)
|
||||
{
|
||||
assert_d_eq(mallctl("prof.reset", NULL, NULL,
|
||||
(void *)&lg_prof_sample, sizeof(size_t)), 0,
|
||||
&lg_prof_sample, sizeof(size_t)), 0,
|
||||
"Unexpected mallctl failure while resetting profile data");
|
||||
assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
|
||||
"Expected profile sample rate change");
|
||||
@ -55,8 +54,8 @@ TEST_BEGIN(test_prof_reset_basic)
|
||||
test_skip_if(!config_prof);
|
||||
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
|
||||
&sz, NULL, 0), 0,
|
||||
assert_d_eq(mallctl("opt.lg_prof_sample", &lg_prof_sample_orig, &sz,
|
||||
NULL, 0), 0,
|
||||
"Unexpected mallctl failure while reading profiling sample rate");
|
||||
assert_zu_eq(lg_prof_sample_orig, 0,
|
||||
"Unexpected profiling sample rate");
|
||||
@ -95,8 +94,7 @@ TEST_END
|
||||
bool prof_dump_header_intercepted = false;
|
||||
prof_cnt_t cnt_all_copy = {0, 0, 0, 0};
|
||||
static bool
|
||||
prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err,
|
||||
const prof_cnt_t *cnt_all)
|
||||
prof_dump_header_intercept(bool propagate_err, const prof_cnt_t *cnt_all)
|
||||
{
|
||||
|
||||
prof_dump_header_intercepted = true;
|
||||
|
22
deps/jemalloc/test/unit/prof_thread_name.c
vendored
Executable file → Normal file
22
deps/jemalloc/test/unit/prof_thread_name.c
vendored
Executable file → Normal file
@ -12,9 +12,8 @@ mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
|
||||
size_t sz;
|
||||
|
||||
sz = sizeof(thread_name_old);
|
||||
assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz,
|
||||
NULL, 0), 0,
|
||||
"%s():%d: Unexpected mallctl failure reading thread.prof.name",
|
||||
assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, NULL, 0),
|
||||
0, "%s():%d: Unexpected mallctl failure reading thread.prof.name",
|
||||
func, line);
|
||||
assert_str_eq(thread_name_old, thread_name_expected,
|
||||
"%s():%d: Unexpected thread.prof.name value", func, line);
|
||||
@ -27,8 +26,8 @@ mallctl_thread_name_set_impl(const char *thread_name, const char *func,
|
||||
int line)
|
||||
{
|
||||
|
||||
assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
|
||||
(void *)&thread_name, sizeof(thread_name)), 0,
|
||||
assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
|
||||
sizeof(thread_name)), 0,
|
||||
"%s():%d: Unexpected mallctl failure reading thread.prof.name",
|
||||
func, line);
|
||||
mallctl_thread_name_get_impl(thread_name, func, line);
|
||||
@ -47,15 +46,15 @@ TEST_BEGIN(test_prof_thread_name_validation)
|
||||
|
||||
/* NULL input shouldn't be allowed. */
|
||||
thread_name = NULL;
|
||||
assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
|
||||
(void *)&thread_name, sizeof(thread_name)), EFAULT,
|
||||
assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
|
||||
sizeof(thread_name)), EFAULT,
|
||||
"Unexpected mallctl result writing \"%s\" to thread.prof.name",
|
||||
thread_name);
|
||||
|
||||
/* '\n' shouldn't be allowed. */
|
||||
thread_name = "hi\nthere";
|
||||
assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
|
||||
(void *)&thread_name, sizeof(thread_name)), EFAULT,
|
||||
assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
|
||||
sizeof(thread_name)), EFAULT,
|
||||
"Unexpected mallctl result writing \"%s\" to thread.prof.name",
|
||||
thread_name);
|
||||
|
||||
@ -65,9 +64,8 @@ TEST_BEGIN(test_prof_thread_name_validation)
|
||||
size_t sz;
|
||||
|
||||
sz = sizeof(thread_name_old);
|
||||
assert_d_eq(mallctl("thread.prof.name",
|
||||
(void *)&thread_name_old, &sz, (void *)&thread_name,
|
||||
sizeof(thread_name)), EPERM,
|
||||
assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz,
|
||||
&thread_name, sizeof(thread_name)), EPERM,
|
||||
"Unexpected mallctl result writing \"%s\" to "
|
||||
"thread.prof.name", thread_name);
|
||||
}
|
||||
|
60
deps/jemalloc/test/unit/rb.c
vendored
60
deps/jemalloc/test/unit/rb.c
vendored
@ -3,7 +3,7 @@
|
||||
#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
|
||||
a_type *rbp_bh_t; \
|
||||
for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \
|
||||
rbp_bh_t != NULL; \
|
||||
rbp_bh_t != &(a_rbt)->rbt_nil; \
|
||||
rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \
|
||||
if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \
|
||||
(r_height)++; \
|
||||
@ -21,7 +21,7 @@ struct node_s {
|
||||
};
|
||||
|
||||
static int
|
||||
node_cmp(const node_t *a, const node_t *b) {
|
||||
node_cmp(node_t *a, node_t *b) {
|
||||
int ret;
|
||||
|
||||
assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
|
||||
@ -68,43 +68,38 @@ TEST_BEGIN(test_rb_empty)
|
||||
TEST_END
|
||||
|
||||
static unsigned
|
||||
tree_recurse(node_t *node, unsigned black_height, unsigned black_depth)
|
||||
tree_recurse(node_t *node, unsigned black_height, unsigned black_depth,
|
||||
node_t *nil)
|
||||
{
|
||||
unsigned ret = 0;
|
||||
node_t *left_node;
|
||||
node_t *right_node;
|
||||
|
||||
if (node == NULL)
|
||||
return (ret);
|
||||
|
||||
left_node = rbtn_left_get(node_t, link, node);
|
||||
right_node = rbtn_right_get(node_t, link, node);
|
||||
node_t *left_node = rbtn_left_get(node_t, link, node);
|
||||
node_t *right_node = rbtn_right_get(node_t, link, node);
|
||||
|
||||
if (!rbtn_red_get(node_t, link, node))
|
||||
black_depth++;
|
||||
|
||||
/* Red nodes must be interleaved with black nodes. */
|
||||
if (rbtn_red_get(node_t, link, node)) {
|
||||
if (left_node != NULL)
|
||||
assert_false(rbtn_red_get(node_t, link, left_node),
|
||||
"Node should be black");
|
||||
if (right_node != NULL)
|
||||
assert_false(rbtn_red_get(node_t, link, right_node),
|
||||
"Node should be black");
|
||||
assert_false(rbtn_red_get(node_t, link, left_node),
|
||||
"Node should be black");
|
||||
assert_false(rbtn_red_get(node_t, link, right_node),
|
||||
"Node should be black");
|
||||
}
|
||||
|
||||
if (node == nil)
|
||||
return (ret);
|
||||
/* Self. */
|
||||
assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
|
||||
|
||||
/* Left subtree. */
|
||||
if (left_node != NULL)
|
||||
ret += tree_recurse(left_node, black_height, black_depth);
|
||||
if (left_node != nil)
|
||||
ret += tree_recurse(left_node, black_height, black_depth, nil);
|
||||
else
|
||||
ret += (black_depth != black_height);
|
||||
|
||||
/* Right subtree. */
|
||||
if (right_node != NULL)
|
||||
ret += tree_recurse(right_node, black_height, black_depth);
|
||||
if (right_node != nil)
|
||||
ret += tree_recurse(right_node, black_height, black_depth, nil);
|
||||
else
|
||||
ret += (black_depth != black_height);
|
||||
|
||||
@ -186,7 +181,8 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes)
|
||||
node->magic = 0;
|
||||
|
||||
rbtn_black_height(node_t, link, tree, black_height);
|
||||
imbalances = tree_recurse(tree->rbt_root, black_height, 0);
|
||||
imbalances = tree_recurse(tree->rbt_root, black_height, 0,
|
||||
&(tree->rbt_nil));
|
||||
assert_u_eq(imbalances, 0, "Tree is unbalanced");
|
||||
assert_u_eq(tree_iterate(tree), nnodes-1,
|
||||
"Unexpected node iteration count");
|
||||
@ -216,15 +212,6 @@ remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
destroy_cb(node_t *node, void *data)
|
||||
{
|
||||
unsigned *nnodes = (unsigned *)data;
|
||||
|
||||
assert_u_gt(*nnodes, 0, "Destruction removed too many nodes");
|
||||
(*nnodes)--;
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_rb_random)
|
||||
{
|
||||
#define NNODES 25
|
||||
@ -257,6 +244,7 @@ TEST_BEGIN(test_rb_random)
|
||||
for (j = 1; j <= NNODES; j++) {
|
||||
/* Initialize tree and nodes. */
|
||||
tree_new(&tree);
|
||||
tree.rbt_nil.magic = 0;
|
||||
for (k = 0; k < j; k++) {
|
||||
nodes[k].magic = NODE_MAGIC;
|
||||
nodes[k].key = bag[k];
|
||||
@ -269,7 +257,7 @@ TEST_BEGIN(test_rb_random)
|
||||
rbtn_black_height(node_t, link, &tree,
|
||||
black_height);
|
||||
imbalances = tree_recurse(tree.rbt_root,
|
||||
black_height, 0);
|
||||
black_height, 0, &(tree.rbt_nil));
|
||||
assert_u_eq(imbalances, 0,
|
||||
"Tree is unbalanced");
|
||||
|
||||
@ -290,7 +278,7 @@ TEST_BEGIN(test_rb_random)
|
||||
}
|
||||
|
||||
/* Remove nodes. */
|
||||
switch (i % 5) {
|
||||
switch (i % 4) {
|
||||
case 0:
|
||||
for (k = 0; k < j; k++)
|
||||
node_remove(&tree, &nodes[k], j - k);
|
||||
@ -326,12 +314,6 @@ TEST_BEGIN(test_rb_random)
|
||||
assert_u_eq(nnodes, 0,
|
||||
"Removal terminated early");
|
||||
break;
|
||||
} case 4: {
|
||||
unsigned nnodes = j;
|
||||
tree_destroy(&tree, destroy_cb, &nnodes);
|
||||
assert_u_eq(nnodes, 0,
|
||||
"Destruction terminated early");
|
||||
break;
|
||||
} default:
|
||||
not_reached();
|
||||
}
|
||||
|
149
deps/jemalloc/test/unit/run_quantize.c
vendored
149
deps/jemalloc/test/unit/run_quantize.c
vendored
@ -1,149 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
TEST_BEGIN(test_small_run_size)
|
||||
{
|
||||
unsigned nbins, i;
|
||||
size_t sz, run_size;
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib) / sizeof(size_t);
|
||||
|
||||
/*
|
||||
* Iterate over all small size classes, get their run sizes, and verify
|
||||
* that the quantized size is the same as the run size.
|
||||
*/
|
||||
|
||||
sz = sizeof(unsigned);
|
||||
assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl failure");
|
||||
|
||||
assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib failure");
|
||||
for (i = 0; i < nbins; i++) {
|
||||
mib[2] = i;
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz,
|
||||
NULL, 0), 0, "Unexpected mallctlbymib failure");
|
||||
assert_zu_eq(run_size, run_quantize_floor(run_size),
|
||||
"Small run quantization should be a no-op (run_size=%zu)",
|
||||
run_size);
|
||||
assert_zu_eq(run_size, run_quantize_ceil(run_size),
|
||||
"Small run quantization should be a no-op (run_size=%zu)",
|
||||
run_size);
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_large_run_size)
|
||||
{
|
||||
bool cache_oblivious;
|
||||
unsigned nlruns, i;
|
||||
size_t sz, run_size_prev, ceil_prev;
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib) / sizeof(size_t);
|
||||
|
||||
/*
|
||||
* Iterate over all large size classes, get their run sizes, and verify
|
||||
* that the quantized size is the same as the run size.
|
||||
*/
|
||||
|
||||
sz = sizeof(bool);
|
||||
assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious,
|
||||
&sz, NULL, 0), 0, "Unexpected mallctl failure");
|
||||
|
||||
sz = sizeof(unsigned);
|
||||
assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl failure");
|
||||
|
||||
assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib failure");
|
||||
for (i = 0; i < nlruns; i++) {
|
||||
size_t lrun_size, run_size, floor, ceil;
|
||||
|
||||
mib[2] = i;
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&lrun_size, &sz,
|
||||
NULL, 0), 0, "Unexpected mallctlbymib failure");
|
||||
run_size = cache_oblivious ? lrun_size + PAGE : lrun_size;
|
||||
floor = run_quantize_floor(run_size);
|
||||
ceil = run_quantize_ceil(run_size);
|
||||
|
||||
assert_zu_eq(run_size, floor,
|
||||
"Large run quantization should be a no-op for precise "
|
||||
"size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
|
||||
assert_zu_eq(run_size, ceil,
|
||||
"Large run quantization should be a no-op for precise "
|
||||
"size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
|
||||
|
||||
if (i > 0) {
|
||||
assert_zu_eq(run_size_prev, run_quantize_floor(run_size
|
||||
- PAGE), "Floor should be a precise size");
|
||||
if (run_size_prev < ceil_prev) {
|
||||
assert_zu_eq(ceil_prev, run_size,
|
||||
"Ceiling should be a precise size "
|
||||
"(run_size_prev=%zu, ceil_prev=%zu, "
|
||||
"run_size=%zu)", run_size_prev, ceil_prev,
|
||||
run_size);
|
||||
}
|
||||
}
|
||||
run_size_prev = floor;
|
||||
ceil_prev = run_quantize_ceil(run_size + PAGE);
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_monotonic)
|
||||
{
|
||||
unsigned nbins, nlruns, i;
|
||||
size_t sz, floor_prev, ceil_prev;
|
||||
|
||||
/*
|
||||
* Iterate over all run sizes and verify that
|
||||
* run_quantize_{floor,ceil}() are monotonic.
|
||||
*/
|
||||
|
||||
sz = sizeof(unsigned);
|
||||
assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl failure");
|
||||
|
||||
sz = sizeof(unsigned);
|
||||
assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl failure");
|
||||
|
||||
floor_prev = 0;
|
||||
ceil_prev = 0;
|
||||
for (i = 1; i <= chunksize >> LG_PAGE; i++) {
|
||||
size_t run_size, floor, ceil;
|
||||
|
||||
run_size = i << LG_PAGE;
|
||||
floor = run_quantize_floor(run_size);
|
||||
ceil = run_quantize_ceil(run_size);
|
||||
|
||||
assert_zu_le(floor, run_size,
|
||||
"Floor should be <= (floor=%zu, run_size=%zu, ceil=%zu)",
|
||||
floor, run_size, ceil);
|
||||
assert_zu_ge(ceil, run_size,
|
||||
"Ceiling should be >= (floor=%zu, run_size=%zu, ceil=%zu)",
|
||||
floor, run_size, ceil);
|
||||
|
||||
assert_zu_le(floor_prev, floor, "Floor should be monotonic "
|
||||
"(floor_prev=%zu, floor=%zu, run_size=%zu, ceil=%zu)",
|
||||
floor_prev, floor, run_size, ceil);
|
||||
assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
|
||||
"(floor=%zu, run_size=%zu, ceil_prev=%zu, ceil=%zu)",
|
||||
floor, run_size, ceil_prev, ceil);
|
||||
|
||||
floor_prev = floor;
|
||||
ceil_prev = ceil;
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_small_run_size,
|
||||
test_large_run_size,
|
||||
test_monotonic));
|
||||
}
|
105
deps/jemalloc/test/unit/size_classes.c
vendored
Executable file → Normal file
105
deps/jemalloc/test/unit/size_classes.c
vendored
Executable file → Normal file
@ -8,8 +8,8 @@ get_max_size_class(void)
|
||||
size_t sz, miblen, max_size_class;
|
||||
|
||||
sz = sizeof(unsigned);
|
||||
assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, 0),
|
||||
0, "Unexpected mallctl() error");
|
||||
assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctl() error");
|
||||
|
||||
miblen = sizeof(mib) / sizeof(size_t);
|
||||
assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
|
||||
@ -17,8 +17,8 @@ get_max_size_class(void)
|
||||
mib[2] = nhchunks - 1;
|
||||
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
|
||||
NULL, 0), 0, "Unexpected mallctlbymib() error");
|
||||
assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() error");
|
||||
|
||||
return (max_size_class);
|
||||
}
|
||||
@ -80,105 +80,10 @@ TEST_BEGIN(test_size_classes)
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_psize_classes)
|
||||
{
|
||||
size_t size_class, max_size_class;
|
||||
pszind_t pind, max_pind;
|
||||
|
||||
max_size_class = get_max_size_class();
|
||||
max_pind = psz2ind(max_size_class);
|
||||
|
||||
for (pind = 0, size_class = pind2sz(pind); pind < max_pind ||
|
||||
size_class < max_size_class; pind++, size_class =
|
||||
pind2sz(pind)) {
|
||||
assert_true(pind < max_pind,
|
||||
"Loop conditionals should be equivalent; pind=%u, "
|
||||
"size_class=%zu (%#zx)", pind, size_class, size_class);
|
||||
assert_true(size_class < max_size_class,
|
||||
"Loop conditionals should be equivalent; pind=%u, "
|
||||
"size_class=%zu (%#zx)", pind, size_class, size_class);
|
||||
|
||||
assert_u_eq(pind, psz2ind(size_class),
|
||||
"psz2ind() does not reverse pind2sz(): pind=%u -->"
|
||||
" size_class=%zu --> pind=%u --> size_class=%zu", pind,
|
||||
size_class, psz2ind(size_class),
|
||||
pind2sz(psz2ind(size_class)));
|
||||
assert_zu_eq(size_class, pind2sz(psz2ind(size_class)),
|
||||
"pind2sz() does not reverse psz2ind(): pind=%u -->"
|
||||
" size_class=%zu --> pind=%u --> size_class=%zu", pind,
|
||||
size_class, psz2ind(size_class),
|
||||
pind2sz(psz2ind(size_class)));
|
||||
|
||||
assert_u_eq(pind+1, psz2ind(size_class+1),
|
||||
"Next size_class does not round up properly");
|
||||
|
||||
assert_zu_eq(size_class, (pind > 0) ?
|
||||
psz2u(pind2sz(pind-1)+1) : psz2u(1),
|
||||
"psz2u() does not round up to size class");
|
||||
assert_zu_eq(size_class, psz2u(size_class-1),
|
||||
"psz2u() does not round up to size class");
|
||||
assert_zu_eq(size_class, psz2u(size_class),
|
||||
"psz2u() does not compute same size class");
|
||||
assert_zu_eq(psz2u(size_class+1), pind2sz(pind+1),
|
||||
"psz2u() does not round up to next size class");
|
||||
}
|
||||
|
||||
assert_u_eq(pind, psz2ind(pind2sz(pind)),
|
||||
"psz2ind() does not reverse pind2sz()");
|
||||
assert_zu_eq(max_size_class, pind2sz(psz2ind(max_size_class)),
|
||||
"pind2sz() does not reverse psz2ind()");
|
||||
|
||||
assert_zu_eq(size_class, psz2u(pind2sz(pind-1)+1),
|
||||
"psz2u() does not round up to size class");
|
||||
assert_zu_eq(size_class, psz2u(size_class-1),
|
||||
"psz2u() does not round up to size class");
|
||||
assert_zu_eq(size_class, psz2u(size_class),
|
||||
"psz2u() does not compute same size class");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_overflow)
|
||||
{
|
||||
size_t max_size_class;
|
||||
|
||||
max_size_class = get_max_size_class();
|
||||
|
||||
assert_u_eq(size2index(max_size_class+1), NSIZES,
|
||||
"size2index() should return NSIZES on overflow");
|
||||
assert_u_eq(size2index(ZU(PTRDIFF_MAX)+1), NSIZES,
|
||||
"size2index() should return NSIZES on overflow");
|
||||
assert_u_eq(size2index(SIZE_T_MAX), NSIZES,
|
||||
"size2index() should return NSIZES on overflow");
|
||||
|
||||
assert_zu_eq(s2u(max_size_class+1), 0,
|
||||
"s2u() should return 0 for unsupported size");
|
||||
assert_zu_eq(s2u(ZU(PTRDIFF_MAX)+1), 0,
|
||||
"s2u() should return 0 for unsupported size");
|
||||
assert_zu_eq(s2u(SIZE_T_MAX), 0,
|
||||
"s2u() should return 0 on overflow");
|
||||
|
||||
assert_u_eq(psz2ind(max_size_class+1), NPSIZES,
|
||||
"psz2ind() should return NPSIZES on overflow");
|
||||
assert_u_eq(psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES,
|
||||
"psz2ind() should return NPSIZES on overflow");
|
||||
assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES,
|
||||
"psz2ind() should return NPSIZES on overflow");
|
||||
|
||||
assert_zu_eq(psz2u(max_size_class+1), 0,
|
||||
"psz2u() should return 0 for unsupported size");
|
||||
assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), 0,
|
||||
"psz2u() should return 0 for unsupported size");
|
||||
assert_zu_eq(psz2u(SIZE_T_MAX), 0,
|
||||
"psz2u() should return 0 on overflow");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_size_classes,
|
||||
test_psize_classes,
|
||||
test_overflow));
|
||||
test_size_classes));
|
||||
}
|
||||
|
106
deps/jemalloc/test/unit/smoothstep.c
vendored
106
deps/jemalloc/test/unit/smoothstep.c
vendored
@ -1,106 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
static const uint64_t smoothstep_tab[] = {
|
||||
#define STEP(step, h, x, y) \
|
||||
h,
|
||||
SMOOTHSTEP
|
||||
#undef STEP
|
||||
};
|
||||
|
||||
TEST_BEGIN(test_smoothstep_integral)
|
||||
{
|
||||
uint64_t sum, min, max;
|
||||
unsigned i;
|
||||
|
||||
/*
|
||||
* The integral of smoothstep in the [0..1] range equals 1/2. Verify
|
||||
* that the fixed point representation's integral is no more than
|
||||
* rounding error distant from 1/2. Regarding rounding, each table
|
||||
* element is rounded down to the nearest fixed point value, so the
|
||||
* integral may be off by as much as SMOOTHSTEP_NSTEPS ulps.
|
||||
*/
|
||||
sum = 0;
|
||||
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
|
||||
sum += smoothstep_tab[i];
|
||||
|
||||
max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1);
|
||||
min = max - SMOOTHSTEP_NSTEPS;
|
||||
|
||||
assert_u64_ge(sum, min,
|
||||
"Integral too small, even accounting for truncation");
|
||||
assert_u64_le(sum, max, "Integral exceeds 1/2");
|
||||
if (false) {
|
||||
malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n",
|
||||
max - sum, SMOOTHSTEP_NSTEPS);
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_smoothstep_monotonic)
|
||||
{
|
||||
uint64_t prev_h;
|
||||
unsigned i;
|
||||
|
||||
/*
|
||||
* The smoothstep function is monotonic in [0..1], i.e. its slope is
|
||||
* non-negative. In practice we want to parametrize table generation
|
||||
* such that piecewise slope is greater than zero, but do not require
|
||||
* that here.
|
||||
*/
|
||||
prev_h = 0;
|
||||
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
|
||||
uint64_t h = smoothstep_tab[i];
|
||||
assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
|
||||
prev_h = h;
|
||||
}
|
||||
assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
|
||||
(KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_smoothstep_slope)
|
||||
{
|
||||
uint64_t prev_h, prev_delta;
|
||||
unsigned i;
|
||||
|
||||
/*
|
||||
* The smoothstep slope strictly increases until x=0.5, and then
|
||||
* strictly decreases until x=1.0. Verify the slightly weaker
|
||||
* requirement of monotonicity, so that inadequate table precision does
|
||||
* not cause false test failures.
|
||||
*/
|
||||
prev_h = 0;
|
||||
prev_delta = 0;
|
||||
for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) {
|
||||
uint64_t h = smoothstep_tab[i];
|
||||
uint64_t delta = h - prev_h;
|
||||
assert_u64_ge(delta, prev_delta,
|
||||
"Slope must monotonically increase in 0.0 <= x <= 0.5, "
|
||||
"i=%u", i);
|
||||
prev_h = h;
|
||||
prev_delta = delta;
|
||||
}
|
||||
|
||||
prev_h = KQU(1) << SMOOTHSTEP_BFP;
|
||||
prev_delta = 0;
|
||||
for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) {
|
||||
uint64_t h = smoothstep_tab[i];
|
||||
uint64_t delta = prev_h - h;
|
||||
assert_u64_ge(delta, prev_delta,
|
||||
"Slope must monotonically decrease in 0.5 <= x <= 1.0, "
|
||||
"i=%u", i);
|
||||
prev_h = h;
|
||||
prev_delta = delta;
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_smoothstep_integral,
|
||||
test_smoothstep_monotonic,
|
||||
test_smoothstep_slope));
|
||||
}
|
253
deps/jemalloc/test/unit/stats.c
vendored
Executable file → Normal file
253
deps/jemalloc/test/unit/stats.c
vendored
Executable file → Normal file
@ -7,18 +7,18 @@ TEST_BEGIN(test_stats_summary)
|
||||
int expected = config_stats ? 0 : ENOENT;
|
||||
|
||||
sz = sizeof(cactive);
|
||||
assert_d_eq(mallctl("stats.cactive", (void *)&cactive, &sz, NULL, 0),
|
||||
expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected,
|
||||
"Unexpected mallctl() result");
|
||||
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL,
|
||||
0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
|
||||
assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0),
|
||||
expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
|
||||
expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
|
||||
assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected,
|
||||
"Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.resident", &resident, &sz, NULL, 0),
|
||||
expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected,
|
||||
"Unexpected mallctl() result");
|
||||
|
||||
if (config_stats) {
|
||||
assert_zu_le(active, *cactive,
|
||||
@ -45,19 +45,19 @@ TEST_BEGIN(test_stats_huge)
|
||||
p = mallocx(large_maxclass+1, 0);
|
||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", (void *)&nrequests,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL,
|
||||
0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL,
|
||||
0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
|
||||
if (config_stats) {
|
||||
assert_zu_gt(allocated, 0,
|
||||
@ -83,8 +83,8 @@ TEST_BEGIN(test_stats_arenas_summary)
|
||||
uint64_t npurge, nmadvise, purged;
|
||||
|
||||
arena = 0;
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
|
||||
sizeof(arena)), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
|
||||
little = mallocx(SMALL_MAXCLASS, 0);
|
||||
assert_ptr_not_null(little, "Unexpected mallocx() failure");
|
||||
@ -93,26 +93,22 @@ TEST_BEGIN(test_stats_arenas_summary)
|
||||
huge = mallocx(chunksize, 0);
|
||||
assert_ptr_not_null(huge, "Unexpected mallocx() failure");
|
||||
|
||||
dallocx(little, 0);
|
||||
dallocx(large, 0);
|
||||
dallocx(huge, 0);
|
||||
|
||||
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
|
||||
0), expected, "Unexepected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.mapped", &mapped, &sz, NULL, 0),
|
||||
expected, "Unexepected mallctl() result");
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge, &sz, NULL,
|
||||
0), expected, "Unexepected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.nmadvise", (void *)&nmadvise, &sz,
|
||||
NULL, 0), expected, "Unexepected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.purged", (void *)&purged, &sz, NULL,
|
||||
0), expected, "Unexepected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge, &sz, NULL, 0),
|
||||
expected, "Unexepected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0),
|
||||
expected, "Unexepected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.purged", &purged, &sz, NULL, 0),
|
||||
expected, "Unexepected mallctl() result");
|
||||
|
||||
if (config_stats) {
|
||||
assert_u64_gt(npurge, 0,
|
||||
@ -120,6 +116,10 @@ TEST_BEGIN(test_stats_arenas_summary)
|
||||
assert_u64_le(nmadvise, purged,
|
||||
"nmadvise should be no greater than purged");
|
||||
}
|
||||
|
||||
dallocx(little, 0);
|
||||
dallocx(large, 0);
|
||||
dallocx(huge, 0);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
@ -150,8 +150,8 @@ TEST_BEGIN(test_stats_arenas_small)
|
||||
no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
|
||||
|
||||
arena = 0;
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
|
||||
sizeof(arena)), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
|
||||
p = mallocx(SMALL_MAXCLASS, 0);
|
||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
@ -159,21 +159,19 @@ TEST_BEGIN(test_stats_arenas_small)
|
||||
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
||||
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
|
||||
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.small.allocated",
|
||||
(void *)&allocated, &sz, NULL, 0), expected,
|
||||
"Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.small.allocated", &allocated, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.small.nrequests",
|
||||
(void *)&nrequests, &sz, NULL, 0), expected,
|
||||
"Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", &nmalloc, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", &ndalloc, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.small.nrequests", &nrequests, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
|
||||
if (config_stats) {
|
||||
assert_zu_gt(allocated, 0,
|
||||
@ -199,36 +197,34 @@ TEST_BEGIN(test_stats_arenas_large)
|
||||
int expected = config_stats ? 0 : ENOENT;
|
||||
|
||||
arena = 0;
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
|
||||
sizeof(arena)), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
|
||||
p = mallocx(large_maxclass, 0);
|
||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.large.allocated",
|
||||
(void *)&allocated, &sz, NULL, 0), expected,
|
||||
"Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.large.nrequests",
|
||||
(void *)&nrequests, &sz, NULL, 0), expected,
|
||||
"Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
|
||||
if (config_stats) {
|
||||
assert_zu_gt(allocated, 0,
|
||||
"allocated should be greater than zero");
|
||||
assert_u64_gt(nmalloc, 0,
|
||||
assert_zu_gt(nmalloc, 0,
|
||||
"nmalloc should be greater than zero");
|
||||
assert_u64_ge(nmalloc, ndalloc,
|
||||
assert_zu_ge(nmalloc, ndalloc,
|
||||
"nmalloc should be at least as large as ndalloc");
|
||||
assert_u64_gt(nrequests, 0,
|
||||
assert_zu_gt(nrequests, 0,
|
||||
"nrequests should be greater than zero");
|
||||
}
|
||||
|
||||
@ -245,30 +241,30 @@ TEST_BEGIN(test_stats_arenas_huge)
|
||||
int expected = config_stats ? 0 : ENOENT;
|
||||
|
||||
arena = 0;
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
|
||||
sizeof(arena)), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
|
||||
p = mallocx(chunksize, 0);
|
||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
|
||||
if (config_stats) {
|
||||
assert_zu_gt(allocated, 0,
|
||||
"allocated should be greater than zero");
|
||||
assert_u64_gt(nmalloc, 0,
|
||||
assert_zu_gt(nmalloc, 0,
|
||||
"nmalloc should be greater than zero");
|
||||
assert_u64_ge(nmalloc, ndalloc,
|
||||
assert_zu_ge(nmalloc, ndalloc,
|
||||
"nmalloc should be at least as large as ndalloc");
|
||||
}
|
||||
|
||||
@ -286,8 +282,8 @@ TEST_BEGIN(test_stats_arenas_bins)
|
||||
int expected = config_stats ? 0 : ENOENT;
|
||||
|
||||
arena = 0;
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
|
||||
sizeof(arena)), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
|
||||
p = mallocx(arena_bin_info[0].reg_size, 0);
|
||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
@ -295,36 +291,35 @@ TEST_BEGIN(test_stats_arenas_bins)
|
||||
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
||||
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
|
||||
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", (void *)&nmalloc,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", (void *)&ndalloc,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests",
|
||||
(void *)&nrequests, &sz, NULL, 0), expected,
|
||||
"Unexpected mallctl() result");
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", (void *)&curregs,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", (void *)&nfills,
|
||||
&sz, NULL, 0), config_tcache ? expected : ENOENT,
|
||||
"Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", (void *)&nflushes,
|
||||
&sz, NULL, 0), config_tcache ? expected : ENOENT,
|
||||
"Unexpected mallctl() result");
|
||||
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", (void *)&nruns, &sz,
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", (void *)&nreruns,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", (void *)&curruns,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", &curregs, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz,
|
||||
NULL, 0), config_tcache ? expected : ENOENT,
|
||||
"Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", &nflushes, &sz,
|
||||
NULL, 0), config_tcache ? expected : ENOENT,
|
||||
"Unexpected mallctl() result");
|
||||
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
|
||||
if (config_stats) {
|
||||
assert_u64_gt(nmalloc, 0,
|
||||
@ -360,26 +355,25 @@ TEST_BEGIN(test_stats_arenas_lruns)
|
||||
int expected = config_stats ? 0 : ENOENT;
|
||||
|
||||
arena = 0;
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
|
||||
sizeof(arena)), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
|
||||
p = mallocx(LARGE_MINCLASS, 0);
|
||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", (void *)&nmalloc,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", (void *)&ndalloc,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests",
|
||||
(void *)&nrequests, &sz, NULL, 0), expected,
|
||||
"Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", (void *)&curruns,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
|
||||
if (config_stats) {
|
||||
assert_u64_gt(nmalloc, 0,
|
||||
@ -405,26 +399,23 @@ TEST_BEGIN(test_stats_arenas_hchunks)
|
||||
int expected = config_stats ? 0 : ENOENT;
|
||||
|
||||
arena = 0;
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
|
||||
sizeof(arena)), 0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
|
||||
p = mallocx(chunksize, 0);
|
||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
||||
"Unexpected mallctl() failure");
|
||||
|
||||
sz = sizeof(uint64_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc",
|
||||
(void *)&nmalloc, &sz, NULL, 0), expected,
|
||||
"Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc",
|
||||
(void *)&ndalloc, &sz, NULL, 0), expected,
|
||||
"Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz,
|
||||
NULL, 0), expected, "Unexpected mallctl() result");
|
||||
sz = sizeof(size_t);
|
||||
assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks",
|
||||
(void *)&curhchunks, &sz, NULL, 0), expected,
|
||||
"Unexpected mallctl() result");
|
||||
assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks,
|
||||
&sz, NULL, 0), expected, "Unexpected mallctl() result");
|
||||
|
||||
if (config_stats) {
|
||||
assert_u64_gt(nmalloc, 0,
|
||||
|
76
deps/jemalloc/test/unit/ticker.c
vendored
76
deps/jemalloc/test/unit/ticker.c
vendored
@ -1,76 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
TEST_BEGIN(test_ticker_tick)
|
||||
{
|
||||
#define NREPS 2
|
||||
#define NTICKS 3
|
||||
ticker_t ticker;
|
||||
int32_t i, j;
|
||||
|
||||
ticker_init(&ticker, NTICKS);
|
||||
for (i = 0; i < NREPS; i++) {
|
||||
for (j = 0; j < NTICKS; j++) {
|
||||
assert_u_eq(ticker_read(&ticker), NTICKS - j,
|
||||
"Unexpected ticker value (i=%d, j=%d)", i, j);
|
||||
assert_false(ticker_tick(&ticker),
|
||||
"Unexpected ticker fire (i=%d, j=%d)", i, j);
|
||||
}
|
||||
assert_u32_eq(ticker_read(&ticker), 0,
|
||||
"Expected ticker depletion");
|
||||
assert_true(ticker_tick(&ticker),
|
||||
"Expected ticker fire (i=%d)", i);
|
||||
assert_u32_eq(ticker_read(&ticker), NTICKS,
|
||||
"Expected ticker reset");
|
||||
}
|
||||
#undef NTICKS
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_ticker_ticks)
|
||||
{
|
||||
#define NTICKS 3
|
||||
ticker_t ticker;
|
||||
|
||||
ticker_init(&ticker, NTICKS);
|
||||
|
||||
assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
|
||||
assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
|
||||
assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
|
||||
assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
|
||||
assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
|
||||
|
||||
assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
|
||||
assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
|
||||
#undef NTICKS
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_ticker_copy)
|
||||
{
|
||||
#define NTICKS 3
|
||||
ticker_t ta, tb;
|
||||
|
||||
ticker_init(&ta, NTICKS);
|
||||
ticker_copy(&tb, &ta);
|
||||
assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
|
||||
assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
|
||||
assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
|
||||
|
||||
ticker_tick(&ta);
|
||||
ticker_copy(&tb, &ta);
|
||||
assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
|
||||
assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
|
||||
assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
|
||||
#undef NTICKS
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_ticker_tick,
|
||||
test_ticker_ticks,
|
||||
test_ticker_copy));
|
||||
}
|
13
deps/jemalloc/test/unit/tsd.c
vendored
13
deps/jemalloc/test/unit/tsd.c
vendored
@ -58,18 +58,18 @@ thd_start(void *arg)
|
||||
data_t d = (data_t)(uintptr_t)arg;
|
||||
void *p;
|
||||
|
||||
assert_x_eq(*data_tsd_get(true), DATA_INIT,
|
||||
assert_x_eq(*data_tsd_get(), DATA_INIT,
|
||||
"Initial tsd get should return initialization value");
|
||||
|
||||
p = malloc(1);
|
||||
assert_ptr_not_null(p, "Unexpected malloc() failure");
|
||||
|
||||
data_tsd_set(&d);
|
||||
assert_x_eq(*data_tsd_get(true), d,
|
||||
assert_x_eq(*data_tsd_get(), d,
|
||||
"After tsd set, tsd get should return value that was set");
|
||||
|
||||
d = 0;
|
||||
assert_x_eq(*data_tsd_get(true), (data_t)(uintptr_t)arg,
|
||||
assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg,
|
||||
"Resetting local data should have no effect on tsd");
|
||||
|
||||
free(p);
|
||||
@ -79,7 +79,7 @@ thd_start(void *arg)
|
||||
TEST_BEGIN(test_tsd_main_thread)
|
||||
{
|
||||
|
||||
thd_start((void *)(uintptr_t)0xa5f3e329);
|
||||
thd_start((void *) 0xa5f3e329);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
@ -99,11 +99,6 @@ int
|
||||
main(void)
|
||||
{
|
||||
|
||||
/* Core tsd bootstrapping must happen prior to data_tsd_boot(). */
|
||||
if (nallocx(1, 0) == 0) {
|
||||
malloc_printf("Initialization error");
|
||||
return (test_status_fail);
|
||||
}
|
||||
data_tsd_boot();
|
||||
|
||||
return (test(
|
||||
|
89
deps/jemalloc/test/unit/util.c
vendored
89
deps/jemalloc/test/unit/util.c
vendored
@ -1,54 +1,33 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
#define TEST_POW2_CEIL(t, suf, pri) do { \
|
||||
unsigned i, pow2; \
|
||||
t x; \
|
||||
\
|
||||
assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \
|
||||
\
|
||||
for (i = 0; i < sizeof(t) * 8; i++) { \
|
||||
assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \
|
||||
<< i, "Unexpected result"); \
|
||||
} \
|
||||
\
|
||||
for (i = 2; i < sizeof(t) * 8; i++) { \
|
||||
assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \
|
||||
((t)1) << i, "Unexpected result"); \
|
||||
} \
|
||||
\
|
||||
for (i = 0; i < sizeof(t) * 8 - 1; i++) { \
|
||||
assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \
|
||||
((t)1) << (i+1), "Unexpected result"); \
|
||||
} \
|
||||
\
|
||||
for (pow2 = 1; pow2 < 25; pow2++) { \
|
||||
for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \
|
||||
x++) { \
|
||||
assert_##suf##_eq(pow2_ceil_##suf(x), \
|
||||
((t)1) << pow2, \
|
||||
"Unexpected result, x=%"pri, x); \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
TEST_BEGIN(test_pow2_ceil_u64)
|
||||
TEST_BEGIN(test_pow2_ceil)
|
||||
{
|
||||
unsigned i, pow2;
|
||||
size_t x;
|
||||
|
||||
TEST_POW2_CEIL(uint64_t, u64, FMTu64);
|
||||
}
|
||||
TEST_END
|
||||
assert_zu_eq(pow2_ceil(0), 0, "Unexpected result");
|
||||
|
||||
TEST_BEGIN(test_pow2_ceil_u32)
|
||||
{
|
||||
for (i = 0; i < sizeof(size_t) * 8; i++) {
|
||||
assert_zu_eq(pow2_ceil(ZU(1) << i), ZU(1) << i,
|
||||
"Unexpected result");
|
||||
}
|
||||
|
||||
TEST_POW2_CEIL(uint32_t, u32, FMTu32);
|
||||
}
|
||||
TEST_END
|
||||
for (i = 2; i < sizeof(size_t) * 8; i++) {
|
||||
assert_zu_eq(pow2_ceil((ZU(1) << i) - 1), ZU(1) << i,
|
||||
"Unexpected result");
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_pow2_ceil_zu)
|
||||
{
|
||||
for (i = 0; i < sizeof(size_t) * 8 - 1; i++) {
|
||||
assert_zu_eq(pow2_ceil((ZU(1) << i) + 1), ZU(1) << (i+1),
|
||||
"Unexpected result");
|
||||
}
|
||||
|
||||
TEST_POW2_CEIL(size_t, zu, "zu");
|
||||
for (pow2 = 1; pow2 < 25; pow2++) {
|
||||
for (x = (ZU(1) << (pow2-1)) + 1; x <= ZU(1) << pow2; x++) {
|
||||
assert_zu_eq(pow2_ceil(x), ZU(1) << pow2,
|
||||
"Unexpected result, x=%zu", x);
|
||||
}
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
@ -75,7 +54,6 @@ TEST_BEGIN(test_malloc_strtoumax)
|
||||
};
|
||||
#define ERR(e) e, #e
|
||||
#define KUMAX(x) ((uintmax_t)x##ULL)
|
||||
#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL)
|
||||
struct test_s tests[] = {
|
||||
{"0", "0", -1, ERR(EINVAL), UINTMAX_MAX},
|
||||
{"0", "0", 1, ERR(EINVAL), UINTMAX_MAX},
|
||||
@ -88,13 +66,13 @@ TEST_BEGIN(test_malloc_strtoumax)
|
||||
|
||||
{"42", "", 0, ERR(0), KUMAX(42)},
|
||||
{"+42", "", 0, ERR(0), KUMAX(42)},
|
||||
{"-42", "", 0, ERR(0), KSMAX(-42)},
|
||||
{"-42", "", 0, ERR(0), KUMAX(-42)},
|
||||
{"042", "", 0, ERR(0), KUMAX(042)},
|
||||
{"+042", "", 0, ERR(0), KUMAX(042)},
|
||||
{"-042", "", 0, ERR(0), KSMAX(-042)},
|
||||
{"-042", "", 0, ERR(0), KUMAX(-042)},
|
||||
{"0x42", "", 0, ERR(0), KUMAX(0x42)},
|
||||
{"+0x42", "", 0, ERR(0), KUMAX(0x42)},
|
||||
{"-0x42", "", 0, ERR(0), KSMAX(-0x42)},
|
||||
{"-0x42", "", 0, ERR(0), KUMAX(-0x42)},
|
||||
|
||||
{"0", "", 0, ERR(0), KUMAX(0)},
|
||||
{"1", "", 0, ERR(0), KUMAX(1)},
|
||||
@ -131,7 +109,6 @@ TEST_BEGIN(test_malloc_strtoumax)
|
||||
};
|
||||
#undef ERR
|
||||
#undef KUMAX
|
||||
#undef KSMAX
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
|
||||
@ -162,14 +139,14 @@ TEST_BEGIN(test_malloc_snprintf_truncated)
|
||||
{
|
||||
#define BUFLEN 15
|
||||
char buf[BUFLEN];
|
||||
size_t result;
|
||||
int result;
|
||||
size_t len;
|
||||
#define TEST(expected_str_untruncated, ...) do { \
|
||||
#define TEST(expected_str_untruncated, ...) do { \
|
||||
result = malloc_snprintf(buf, len, __VA_ARGS__); \
|
||||
assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
|
||||
"Unexpected string inequality (\"%s\" vs \"%s\")", \
|
||||
buf, expected_str_untruncated); \
|
||||
assert_zu_eq(result, strlen(expected_str_untruncated), \
|
||||
buf, expected_str_untruncated); \
|
||||
assert_d_eq(result, strlen(expected_str_untruncated), \
|
||||
"Unexpected result"); \
|
||||
} while (0)
|
||||
|
||||
@ -195,11 +172,11 @@ TEST_BEGIN(test_malloc_snprintf)
|
||||
{
|
||||
#define BUFLEN 128
|
||||
char buf[BUFLEN];
|
||||
size_t result;
|
||||
int result;
|
||||
#define TEST(expected_str, ...) do { \
|
||||
result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
|
||||
assert_str_eq(buf, expected_str, "Unexpected output"); \
|
||||
assert_zu_eq(result, strlen(expected_str), "Unexpected result");\
|
||||
assert_d_eq(result, strlen(expected_str), "Unexpected result"); \
|
||||
} while (0)
|
||||
|
||||
TEST("hello", "hello");
|
||||
@ -309,9 +286,7 @@ main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_pow2_ceil_u64,
|
||||
test_pow2_ceil_u32,
|
||||
test_pow2_ceil_zu,
|
||||
test_pow2_ceil,
|
||||
test_malloc_strtoumax_no_endptr,
|
||||
test_malloc_strtoumax,
|
||||
test_malloc_snprintf_truncated,
|
||||
|
278
deps/jemalloc/test/unit/witness.c
vendored
278
deps/jemalloc/test/unit/witness.c
vendored
@ -1,278 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
static witness_lock_error_t *witness_lock_error_orig;
|
||||
static witness_owner_error_t *witness_owner_error_orig;
|
||||
static witness_not_owner_error_t *witness_not_owner_error_orig;
|
||||
static witness_lockless_error_t *witness_lockless_error_orig;
|
||||
|
||||
static bool saw_lock_error;
|
||||
static bool saw_owner_error;
|
||||
static bool saw_not_owner_error;
|
||||
static bool saw_lockless_error;
|
||||
|
||||
static void
|
||||
witness_lock_error_intercept(const witness_list_t *witnesses,
|
||||
const witness_t *witness)
|
||||
{
|
||||
|
||||
saw_lock_error = true;
|
||||
}
|
||||
|
||||
static void
|
||||
witness_owner_error_intercept(const witness_t *witness)
|
||||
{
|
||||
|
||||
saw_owner_error = true;
|
||||
}
|
||||
|
||||
static void
|
||||
witness_not_owner_error_intercept(const witness_t *witness)
|
||||
{
|
||||
|
||||
saw_not_owner_error = true;
|
||||
}
|
||||
|
||||
static void
|
||||
witness_lockless_error_intercept(const witness_list_t *witnesses)
|
||||
{
|
||||
|
||||
saw_lockless_error = true;
|
||||
}
|
||||
|
||||
static int
|
||||
witness_comp(const witness_t *a, const witness_t *b)
|
||||
{
|
||||
|
||||
assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
|
||||
|
||||
return (strcmp(a->name, b->name));
|
||||
}
|
||||
|
||||
static int
|
||||
witness_comp_reverse(const witness_t *a, const witness_t *b)
|
||||
{
|
||||
|
||||
assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
|
||||
|
||||
return (-strcmp(a->name, b->name));
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_witness)
|
||||
{
|
||||
witness_t a, b;
|
||||
tsdn_t *tsdn;
|
||||
|
||||
test_skip_if(!config_debug);
|
||||
|
||||
tsdn = tsdn_fetch();
|
||||
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
witness_init(&a, "a", 1, NULL);
|
||||
witness_assert_not_owner(tsdn, &a);
|
||||
witness_lock(tsdn, &a);
|
||||
witness_assert_owner(tsdn, &a);
|
||||
|
||||
witness_init(&b, "b", 2, NULL);
|
||||
witness_assert_not_owner(tsdn, &b);
|
||||
witness_lock(tsdn, &b);
|
||||
witness_assert_owner(tsdn, &b);
|
||||
|
||||
witness_unlock(tsdn, &a);
|
||||
witness_unlock(tsdn, &b);
|
||||
|
||||
witness_assert_lockless(tsdn);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_witness_comp)
|
||||
{
|
||||
witness_t a, b, c, d;
|
||||
tsdn_t *tsdn;
|
||||
|
||||
test_skip_if(!config_debug);
|
||||
|
||||
tsdn = tsdn_fetch();
|
||||
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
witness_init(&a, "a", 1, witness_comp);
|
||||
witness_assert_not_owner(tsdn, &a);
|
||||
witness_lock(tsdn, &a);
|
||||
witness_assert_owner(tsdn, &a);
|
||||
|
||||
witness_init(&b, "b", 1, witness_comp);
|
||||
witness_assert_not_owner(tsdn, &b);
|
||||
witness_lock(tsdn, &b);
|
||||
witness_assert_owner(tsdn, &b);
|
||||
witness_unlock(tsdn, &b);
|
||||
|
||||
witness_lock_error_orig = witness_lock_error;
|
||||
witness_lock_error = witness_lock_error_intercept;
|
||||
saw_lock_error = false;
|
||||
|
||||
witness_init(&c, "c", 1, witness_comp_reverse);
|
||||
witness_assert_not_owner(tsdn, &c);
|
||||
assert_false(saw_lock_error, "Unexpected witness lock error");
|
||||
witness_lock(tsdn, &c);
|
||||
assert_true(saw_lock_error, "Expected witness lock error");
|
||||
witness_unlock(tsdn, &c);
|
||||
|
||||
saw_lock_error = false;
|
||||
|
||||
witness_init(&d, "d", 1, NULL);
|
||||
witness_assert_not_owner(tsdn, &d);
|
||||
assert_false(saw_lock_error, "Unexpected witness lock error");
|
||||
witness_lock(tsdn, &d);
|
||||
assert_true(saw_lock_error, "Expected witness lock error");
|
||||
witness_unlock(tsdn, &d);
|
||||
|
||||
witness_unlock(tsdn, &a);
|
||||
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
witness_lock_error = witness_lock_error_orig;
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_witness_reversal)
|
||||
{
|
||||
witness_t a, b;
|
||||
tsdn_t *tsdn;
|
||||
|
||||
test_skip_if(!config_debug);
|
||||
|
||||
witness_lock_error_orig = witness_lock_error;
|
||||
witness_lock_error = witness_lock_error_intercept;
|
||||
saw_lock_error = false;
|
||||
|
||||
tsdn = tsdn_fetch();
|
||||
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
witness_init(&a, "a", 1, NULL);
|
||||
witness_init(&b, "b", 2, NULL);
|
||||
|
||||
witness_lock(tsdn, &b);
|
||||
assert_false(saw_lock_error, "Unexpected witness lock error");
|
||||
witness_lock(tsdn, &a);
|
||||
assert_true(saw_lock_error, "Expected witness lock error");
|
||||
|
||||
witness_unlock(tsdn, &a);
|
||||
witness_unlock(tsdn, &b);
|
||||
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
witness_lock_error = witness_lock_error_orig;
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_witness_recursive)
|
||||
{
|
||||
witness_t a;
|
||||
tsdn_t *tsdn;
|
||||
|
||||
test_skip_if(!config_debug);
|
||||
|
||||
witness_not_owner_error_orig = witness_not_owner_error;
|
||||
witness_not_owner_error = witness_not_owner_error_intercept;
|
||||
saw_not_owner_error = false;
|
||||
|
||||
witness_lock_error_orig = witness_lock_error;
|
||||
witness_lock_error = witness_lock_error_intercept;
|
||||
saw_lock_error = false;
|
||||
|
||||
tsdn = tsdn_fetch();
|
||||
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
witness_init(&a, "a", 1, NULL);
|
||||
|
||||
witness_lock(tsdn, &a);
|
||||
assert_false(saw_lock_error, "Unexpected witness lock error");
|
||||
assert_false(saw_not_owner_error, "Unexpected witness not owner error");
|
||||
witness_lock(tsdn, &a);
|
||||
assert_true(saw_lock_error, "Expected witness lock error");
|
||||
assert_true(saw_not_owner_error, "Expected witness not owner error");
|
||||
|
||||
witness_unlock(tsdn, &a);
|
||||
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
witness_owner_error = witness_owner_error_orig;
|
||||
witness_lock_error = witness_lock_error_orig;
|
||||
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_witness_unlock_not_owned)
|
||||
{
|
||||
witness_t a;
|
||||
tsdn_t *tsdn;
|
||||
|
||||
test_skip_if(!config_debug);
|
||||
|
||||
witness_owner_error_orig = witness_owner_error;
|
||||
witness_owner_error = witness_owner_error_intercept;
|
||||
saw_owner_error = false;
|
||||
|
||||
tsdn = tsdn_fetch();
|
||||
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
witness_init(&a, "a", 1, NULL);
|
||||
|
||||
assert_false(saw_owner_error, "Unexpected owner error");
|
||||
witness_unlock(tsdn, &a);
|
||||
assert_true(saw_owner_error, "Expected owner error");
|
||||
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
witness_owner_error = witness_owner_error_orig;
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_witness_lockful)
|
||||
{
|
||||
witness_t a;
|
||||
tsdn_t *tsdn;
|
||||
|
||||
test_skip_if(!config_debug);
|
||||
|
||||
witness_lockless_error_orig = witness_lockless_error;
|
||||
witness_lockless_error = witness_lockless_error_intercept;
|
||||
saw_lockless_error = false;
|
||||
|
||||
tsdn = tsdn_fetch();
|
||||
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
witness_init(&a, "a", 1, NULL);
|
||||
|
||||
assert_false(saw_lockless_error, "Unexpected lockless error");
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
witness_lock(tsdn, &a);
|
||||
witness_assert_lockless(tsdn);
|
||||
assert_true(saw_lockless_error, "Expected lockless error");
|
||||
|
||||
witness_unlock(tsdn, &a);
|
||||
|
||||
witness_assert_lockless(tsdn);
|
||||
|
||||
witness_lockless_error = witness_lockless_error_orig;
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test(
|
||||
test_witness,
|
||||
test_witness_comp,
|
||||
test_witness_reversal,
|
||||
test_witness_recursive,
|
||||
test_witness_unlock_not_owned,
|
||||
test_witness_lockful));
|
||||
}
|
16
deps/jemalloc/test/unit/zero.c
vendored
16
deps/jemalloc/test/unit/zero.c
vendored
@ -8,41 +8,39 @@ const char *malloc_conf =
|
||||
static void
|
||||
test_zero(size_t sz_min, size_t sz_max)
|
||||
{
|
||||
uint8_t *s;
|
||||
char *s;
|
||||
size_t sz_prev, sz, i;
|
||||
#define MAGIC ((uint8_t)0x61)
|
||||
|
||||
sz_prev = 0;
|
||||
s = (uint8_t *)mallocx(sz_min, 0);
|
||||
s = (char *)mallocx(sz_min, 0);
|
||||
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
|
||||
|
||||
for (sz = sallocx(s, 0); sz <= sz_max;
|
||||
sz_prev = sz, sz = sallocx(s, 0)) {
|
||||
if (sz_prev > 0) {
|
||||
assert_u_eq(s[0], MAGIC,
|
||||
assert_c_eq(s[0], 'a',
|
||||
"Previously allocated byte %zu/%zu is corrupted",
|
||||
ZU(0), sz_prev);
|
||||
assert_u_eq(s[sz_prev-1], MAGIC,
|
||||
assert_c_eq(s[sz_prev-1], 'a',
|
||||
"Previously allocated byte %zu/%zu is corrupted",
|
||||
sz_prev-1, sz_prev);
|
||||
}
|
||||
|
||||
for (i = sz_prev; i < sz; i++) {
|
||||
assert_u_eq(s[i], 0x0,
|
||||
assert_c_eq(s[i], 0x0,
|
||||
"Newly allocated byte %zu/%zu isn't zero-filled",
|
||||
i, sz);
|
||||
s[i] = MAGIC;
|
||||
s[i] = 'a';
|
||||
}
|
||||
|
||||
if (xallocx(s, sz+1, 0, 0) == sz) {
|
||||
s = (uint8_t *)rallocx(s, sz+1, 0);
|
||||
s = (char *)rallocx(s, sz+1, 0);
|
||||
assert_ptr_not_null((void *)s,
|
||||
"Unexpected rallocx() failure");
|
||||
}
|
||||
}
|
||||
|
||||
dallocx(s, 0);
|
||||
#undef MAGIC
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_zero_small)
|
||||
|
Reference in New Issue
Block a user