mirror of
https://github.com/fluencelabs/redis
synced 2025-06-12 16:51:22 +00:00
Jemalloc updated to 4.4.0.
The original jemalloc source tree was modified to: 1. Remove the configure error that prevents nested builds. 2. Insert the Redis private Jemalloc API in order to allow the Redis fragmentation function to work.
This commit is contained in:
2057
deps/jemalloc/src/arena.c
vendored
2057
deps/jemalloc/src/arena.c
vendored
File diff suppressed because it is too large
Load Diff
73
deps/jemalloc/src/base.c
vendored
73
deps/jemalloc/src/base.c
vendored
@ -5,7 +5,8 @@
|
||||
/* Data. */
|
||||
|
||||
static malloc_mutex_t base_mtx;
|
||||
static extent_tree_t base_avail_szad;
|
||||
static size_t base_extent_sn_next;
|
||||
static extent_tree_t base_avail_szsnad;
|
||||
static extent_node_t *base_nodes;
|
||||
static size_t base_allocated;
|
||||
static size_t base_resident;
|
||||
@ -13,12 +14,13 @@ static size_t base_mapped;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/* base_mtx must be held. */
|
||||
static extent_node_t *
|
||||
base_node_try_alloc(void)
|
||||
base_node_try_alloc(tsdn_t *tsdn)
|
||||
{
|
||||
extent_node_t *node;
|
||||
|
||||
malloc_mutex_assert_owner(tsdn, &base_mtx);
|
||||
|
||||
if (base_nodes == NULL)
|
||||
return (NULL);
|
||||
node = base_nodes;
|
||||
@ -27,33 +29,42 @@ base_node_try_alloc(void)
|
||||
return (node);
|
||||
}
|
||||
|
||||
/* base_mtx must be held. */
|
||||
static void
|
||||
base_node_dalloc(extent_node_t *node)
|
||||
base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
|
||||
{
|
||||
|
||||
malloc_mutex_assert_owner(tsdn, &base_mtx);
|
||||
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
||||
*(extent_node_t **)node = base_nodes;
|
||||
base_nodes = node;
|
||||
}
|
||||
|
||||
/* base_mtx must be held. */
|
||||
static void
|
||||
base_extent_node_init(extent_node_t *node, void *addr, size_t size)
|
||||
{
|
||||
size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
|
||||
|
||||
extent_node_init(node, NULL, addr, size, sn, true, true);
|
||||
}
|
||||
|
||||
static extent_node_t *
|
||||
base_chunk_alloc(size_t minsize)
|
||||
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
|
||||
{
|
||||
extent_node_t *node;
|
||||
size_t csize, nsize;
|
||||
void *addr;
|
||||
|
||||
malloc_mutex_assert_owner(tsdn, &base_mtx);
|
||||
assert(minsize != 0);
|
||||
node = base_node_try_alloc();
|
||||
node = base_node_try_alloc(tsdn);
|
||||
/* Allocate enough space to also carve a node out if necessary. */
|
||||
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
|
||||
csize = CHUNK_CEILING(minsize + nsize);
|
||||
addr = chunk_alloc_base(csize);
|
||||
if (addr == NULL) {
|
||||
if (node != NULL)
|
||||
base_node_dalloc(node);
|
||||
base_node_dalloc(tsdn, node);
|
||||
return (NULL);
|
||||
}
|
||||
base_mapped += csize;
|
||||
@ -66,7 +77,7 @@ base_chunk_alloc(size_t minsize)
|
||||
base_resident += PAGE_CEILING(nsize);
|
||||
}
|
||||
}
|
||||
extent_node_init(node, NULL, addr, csize, true, true);
|
||||
base_extent_node_init(node, addr, csize);
|
||||
return (node);
|
||||
}
|
||||
|
||||
@ -76,7 +87,7 @@ base_chunk_alloc(size_t minsize)
|
||||
* physical memory usage.
|
||||
*/
|
||||
void *
|
||||
base_alloc(size_t size)
|
||||
base_alloc(tsdn_t *tsdn, size_t size)
|
||||
{
|
||||
void *ret;
|
||||
size_t csize, usize;
|
||||
@ -90,15 +101,15 @@ base_alloc(size_t size)
|
||||
csize = CACHELINE_CEILING(size);
|
||||
|
||||
usize = s2u(csize);
|
||||
extent_node_init(&key, NULL, NULL, usize, false, false);
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
||||
extent_node_init(&key, NULL, NULL, usize, 0, false, false);
|
||||
malloc_mutex_lock(tsdn, &base_mtx);
|
||||
node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
|
||||
if (node != NULL) {
|
||||
/* Use existing space. */
|
||||
extent_tree_szad_remove(&base_avail_szad, node);
|
||||
extent_tree_szsnad_remove(&base_avail_szsnad, node);
|
||||
} else {
|
||||
/* Try to allocate more space. */
|
||||
node = base_chunk_alloc(csize);
|
||||
node = base_chunk_alloc(tsdn, csize);
|
||||
}
|
||||
if (node == NULL) {
|
||||
ret = NULL;
|
||||
@ -109,9 +120,9 @@ base_alloc(size_t size)
|
||||
if (extent_node_size_get(node) > csize) {
|
||||
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
|
||||
extent_node_size_set(node, extent_node_size_get(node) - csize);
|
||||
extent_tree_szad_insert(&base_avail_szad, node);
|
||||
extent_tree_szsnad_insert(&base_avail_szsnad, node);
|
||||
} else
|
||||
base_node_dalloc(node);
|
||||
base_node_dalloc(tsdn, node);
|
||||
if (config_stats) {
|
||||
base_allocated += csize;
|
||||
/*
|
||||
@ -123,52 +134,54 @@ base_alloc(size_t size)
|
||||
}
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
|
||||
label_return:
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
malloc_mutex_unlock(tsdn, &base_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
|
||||
base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
|
||||
size_t *mapped)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
malloc_mutex_lock(tsdn, &base_mtx);
|
||||
assert(base_allocated <= base_resident);
|
||||
assert(base_resident <= base_mapped);
|
||||
*allocated = base_allocated;
|
||||
*resident = base_resident;
|
||||
*mapped = base_mapped;
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
malloc_mutex_unlock(tsdn, &base_mtx);
|
||||
}
|
||||
|
||||
bool
|
||||
base_boot(void)
|
||||
{
|
||||
|
||||
if (malloc_mutex_init(&base_mtx))
|
||||
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
|
||||
return (true);
|
||||
extent_tree_szad_new(&base_avail_szad);
|
||||
base_extent_sn_next = 0;
|
||||
extent_tree_szsnad_new(&base_avail_szsnad);
|
||||
base_nodes = NULL;
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
base_prefork(void)
|
||||
base_prefork(tsdn_t *tsdn)
|
||||
{
|
||||
|
||||
malloc_mutex_prefork(&base_mtx);
|
||||
malloc_mutex_prefork(tsdn, &base_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
base_postfork_parent(void)
|
||||
base_postfork_parent(tsdn_t *tsdn)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_parent(&base_mtx);
|
||||
malloc_mutex_postfork_parent(tsdn, &base_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
base_postfork_child(void)
|
||||
base_postfork_child(tsdn_t *tsdn)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_child(&base_mtx);
|
||||
malloc_mutex_postfork_child(tsdn, &base_mtx);
|
||||
}
|
||||
|
59
deps/jemalloc/src/bitmap.c
vendored
59
deps/jemalloc/src/bitmap.c
vendored
@ -3,6 +3,8 @@
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
#ifdef USE_TREE
|
||||
|
||||
void
|
||||
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
|
||||
{
|
||||
@ -32,20 +34,11 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
|
||||
binfo->nbits = nbits;
|
||||
}
|
||||
|
||||
size_t
|
||||
static size_t
|
||||
bitmap_info_ngroups(const bitmap_info_t *binfo)
|
||||
{
|
||||
|
||||
return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
|
||||
}
|
||||
|
||||
size_t
|
||||
bitmap_size(size_t nbits)
|
||||
{
|
||||
bitmap_info_t binfo;
|
||||
|
||||
bitmap_info_init(&binfo, nbits);
|
||||
return (bitmap_info_ngroups(&binfo));
|
||||
return (binfo->levels[binfo->nlevels].group_offset);
|
||||
}
|
||||
|
||||
void
|
||||
@ -61,8 +54,7 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
* correspond to the first logical bit in the group, so extra bits
|
||||
* are the most significant bits of the last group.
|
||||
*/
|
||||
memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
|
||||
LG_SIZEOF_BITMAP);
|
||||
memset(bitmap, 0xffU, bitmap_size(binfo));
|
||||
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
|
||||
& BITMAP_GROUP_NBITS_MASK;
|
||||
if (extra != 0)
|
||||
@ -76,3 +68,44 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
|
||||
}
|
||||
}
|
||||
|
||||
#else /* USE_TREE */
|
||||
|
||||
void
|
||||
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
|
||||
{
|
||||
|
||||
assert(nbits > 0);
|
||||
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
|
||||
|
||||
binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
|
||||
binfo->nbits = nbits;
|
||||
}
|
||||
|
||||
static size_t
|
||||
bitmap_info_ngroups(const bitmap_info_t *binfo)
|
||||
{
|
||||
|
||||
return (binfo->ngroups);
|
||||
}
|
||||
|
||||
void
|
||||
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
{
|
||||
size_t extra;
|
||||
|
||||
memset(bitmap, 0xffU, bitmap_size(binfo));
|
||||
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
|
||||
& BITMAP_GROUP_NBITS_MASK;
|
||||
if (extra != 0)
|
||||
bitmap[binfo->ngroups - 1] >>= extra;
|
||||
}
|
||||
|
||||
#endif /* USE_TREE */
|
||||
|
||||
size_t
|
||||
bitmap_size(const bitmap_info_t *binfo)
|
||||
{
|
||||
|
||||
return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
|
||||
}
|
||||
|
428
deps/jemalloc/src/chunk.c
vendored
428
deps/jemalloc/src/chunk.c
vendored
@ -49,9 +49,10 @@ const chunk_hooks_t chunk_hooks_default = {
|
||||
* definition.
|
||||
*/
|
||||
|
||||
static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *chunk, size_t size, bool zeroed, bool committed);
|
||||
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
|
||||
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
|
||||
bool zeroed, bool committed);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
@ -63,23 +64,23 @@ chunk_hooks_get_locked(arena_t *arena)
|
||||
}
|
||||
|
||||
chunk_hooks_t
|
||||
chunk_hooks_get(arena_t *arena)
|
||||
chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
|
||||
{
|
||||
chunk_hooks_t chunk_hooks;
|
||||
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||
chunk_hooks = chunk_hooks_get_locked(arena);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
|
||||
return (chunk_hooks);
|
||||
}
|
||||
|
||||
chunk_hooks_t
|
||||
chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
|
||||
chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
|
||||
{
|
||||
chunk_hooks_t old_chunk_hooks;
|
||||
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||
old_chunk_hooks = arena->chunk_hooks;
|
||||
/*
|
||||
* Copy each field atomically so that it is impossible for readers to
|
||||
@ -104,14 +105,14 @@ chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
|
||||
ATOMIC_COPY_HOOK(split);
|
||||
ATOMIC_COPY_HOOK(merge);
|
||||
#undef ATOMIC_COPY_HOOK
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
|
||||
return (old_chunk_hooks);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
bool locked)
|
||||
chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks, bool locked)
|
||||
{
|
||||
static const chunk_hooks_t uninitialized_hooks =
|
||||
CHUNK_HOOKS_INITIALIZER;
|
||||
@ -119,27 +120,28 @@ chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
|
||||
0) {
|
||||
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
|
||||
chunk_hooks_get(arena);
|
||||
chunk_hooks_get(tsdn, arena);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_hooks_assure_initialized_locked(arena_t *arena,
|
||||
chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
|
||||
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
|
||||
chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
|
||||
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_register(const void *chunk, const extent_node_t *node)
|
||||
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
|
||||
{
|
||||
|
||||
assert(extent_node_addr_get(node) == chunk);
|
||||
@ -159,7 +161,7 @@ chunk_register(const void *chunk, const extent_node_t *node)
|
||||
high = atomic_read_z(&highchunks);
|
||||
}
|
||||
if (cur > high && prof_gdump_get_unlocked())
|
||||
prof_gdump();
|
||||
prof_gdump(tsdn);
|
||||
}
|
||||
|
||||
return (false);
|
||||
@ -181,33 +183,35 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
|
||||
}
|
||||
|
||||
/*
|
||||
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best
|
||||
* fits.
|
||||
* Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
|
||||
* best fits.
|
||||
*/
|
||||
static extent_node_t *
|
||||
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, size_t size)
|
||||
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
|
||||
{
|
||||
extent_node_t key;
|
||||
|
||||
assert(size == CHUNK_CEILING(size));
|
||||
|
||||
extent_node_init(&key, arena, NULL, size, false, false);
|
||||
return (extent_tree_szad_nsearch(chunks_szad, &key));
|
||||
extent_node_init(&key, arena, NULL, size, 0, false, false);
|
||||
return (extent_tree_szsnad_nsearch(chunks_szsnad, &key));
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
||||
bool dalloc_node)
|
||||
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
|
||||
bool *commit, bool dalloc_node)
|
||||
{
|
||||
void *ret;
|
||||
extent_node_t *node;
|
||||
size_t alloc_size, leadsize, trailsize;
|
||||
bool zeroed, committed;
|
||||
|
||||
assert(CHUNK_CEILING(size) == size);
|
||||
assert(alignment > 0);
|
||||
assert(new_addr == NULL || alignment == chunksize);
|
||||
assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
|
||||
/*
|
||||
* Cached chunks use the node linkage embedded in their headers, in
|
||||
* which case dalloc_node is true, and new_addr is non-NULL because
|
||||
@ -215,24 +219,23 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
*/
|
||||
assert(dalloc_node || new_addr != NULL);
|
||||
|
||||
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
||||
alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
|
||||
/* Beware size_t wrap-around. */
|
||||
if (alloc_size < size)
|
||||
return (NULL);
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
||||
if (new_addr != NULL) {
|
||||
extent_node_t key;
|
||||
extent_node_init(&key, arena, new_addr, alloc_size, false,
|
||||
extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
|
||||
false);
|
||||
node = extent_tree_ad_search(chunks_ad, &key);
|
||||
} else {
|
||||
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
|
||||
alloc_size);
|
||||
node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
|
||||
}
|
||||
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
|
||||
size)) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
|
||||
@ -241,6 +244,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
assert(extent_node_size_get(node) >= leadsize + size);
|
||||
trailsize = extent_node_size_get(node) - leadsize - size;
|
||||
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
||||
*sn = extent_node_sn_get(node);
|
||||
zeroed = extent_node_zeroed_get(node);
|
||||
if (zeroed)
|
||||
*zero = true;
|
||||
@ -251,17 +255,17 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
if (leadsize != 0 &&
|
||||
chunk_hooks->split(extent_node_addr_get(node),
|
||||
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
/* Remove node from the tree. */
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
extent_tree_szsnad_remove(chunks_szsnad, node);
|
||||
extent_tree_ad_remove(chunks_ad, node);
|
||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
||||
if (leadsize != 0) {
|
||||
/* Insert the leading space as a smaller chunk. */
|
||||
extent_node_size_set(node, leadsize);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
node = NULL;
|
||||
@ -271,41 +275,42 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
if (chunk_hooks->split(ret, size + trailsize, size,
|
||||
trailsize, false, arena->ind)) {
|
||||
if (dalloc_node && node != NULL)
|
||||
arena_node_dalloc(arena, node);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
|
||||
cache, ret, size + trailsize, zeroed, committed);
|
||||
arena_node_dalloc(tsdn, arena, node);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
|
||||
chunks_ad, cache, ret, size + trailsize, *sn,
|
||||
zeroed, committed);
|
||||
return (NULL);
|
||||
}
|
||||
/* Insert the trailing space as a smaller chunk. */
|
||||
if (node == NULL) {
|
||||
node = arena_node_alloc(arena);
|
||||
node = arena_node_alloc(tsdn, arena);
|
||||
if (node == NULL) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_record(arena, chunk_hooks, chunks_szad,
|
||||
chunks_ad, cache, ret, size + trailsize,
|
||||
zeroed, committed);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
chunk_record(tsdn, arena, chunk_hooks,
|
||||
chunks_szsnad, chunks_ad, cache, ret, size
|
||||
+ trailsize, *sn, zeroed, committed);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
|
||||
trailsize, zeroed, committed);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
trailsize, *sn, zeroed, committed);
|
||||
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
node = NULL;
|
||||
}
|
||||
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
|
||||
ret, size, zeroed, committed);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
|
||||
cache, ret, size, *sn, zeroed, committed);
|
||||
return (NULL);
|
||||
}
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
|
||||
assert(dalloc_node || node != NULL);
|
||||
if (dalloc_node && node != NULL)
|
||||
arena_node_dalloc(arena, node);
|
||||
arena_node_dalloc(tsdn, arena, node);
|
||||
if (*zero) {
|
||||
if (!zeroed)
|
||||
memset(ret, 0, size);
|
||||
@ -313,10 +318,11 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
size_t i;
|
||||
size_t *p = (size_t *)(uintptr_t)ret;
|
||||
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
||||
for (i = 0; i < size / sizeof(size_t); i++)
|
||||
assert(p[i] == 0);
|
||||
}
|
||||
if (config_valgrind)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
@ -328,39 +334,29 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
* them if they are returned.
|
||||
*/
|
||||
static void *
|
||||
chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero, bool *commit, dss_prec_t dss_prec)
|
||||
chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
|
||||
{
|
||||
void *ret;
|
||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
/* Retained. */
|
||||
if ((ret = chunk_recycle(arena, &chunk_hooks,
|
||||
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
|
||||
new_addr, size, alignment, zero, commit, true)) != NULL)
|
||||
return (ret);
|
||||
|
||||
/* "primary" dss. */
|
||||
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
||||
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
|
||||
NULL)
|
||||
return (ret);
|
||||
/*
|
||||
* mmap. Requesting an address is not implemented for
|
||||
* chunk_alloc_mmap(), so only call it if (new_addr == NULL).
|
||||
*/
|
||||
if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
|
||||
chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
|
||||
commit)) != NULL)
|
||||
return (ret);
|
||||
/* mmap. */
|
||||
if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
|
||||
NULL)
|
||||
return (ret);
|
||||
/* "secondary" dss. */
|
||||
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
||||
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
|
||||
NULL)
|
||||
chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
|
||||
commit)) != NULL)
|
||||
return (ret);
|
||||
|
||||
/* All strategies for allocation failed. */
|
||||
@ -380,7 +376,7 @@ chunk_alloc_base(size_t size)
|
||||
*/
|
||||
zero = true;
|
||||
commit = true;
|
||||
ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
|
||||
ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
if (config_valgrind)
|
||||
@ -390,37 +386,33 @@ chunk_alloc_base(size_t size)
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
||||
size_t size, size_t alignment, bool *zero, bool dalloc_node)
|
||||
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
|
||||
bool *commit, bool dalloc_node)
|
||||
{
|
||||
void *ret;
|
||||
bool commit;
|
||||
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
commit = true;
|
||||
ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
|
||||
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
|
||||
&commit, dalloc_node);
|
||||
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
||||
&arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
|
||||
new_addr, size, alignment, sn, zero, commit, dalloc_node);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
assert(commit);
|
||||
if (config_valgrind)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static arena_t *
|
||||
chunk_arena_get(unsigned arena_ind)
|
||||
chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
|
||||
{
|
||||
arena_t *arena;
|
||||
|
||||
/* Dodge tsd for a0 in order to avoid bootstrapping issues. */
|
||||
arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
|
||||
false, true);
|
||||
arena = arena_get(tsdn, arena_ind, false);
|
||||
/*
|
||||
* The arena we're allocating on behalf of must have been initialized
|
||||
* already.
|
||||
@ -430,14 +422,12 @@ chunk_arena_get(unsigned arena_ind)
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
bool *commit, unsigned arena_ind)
|
||||
chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
|
||||
size_t size, size_t alignment, bool *zero, bool *commit)
|
||||
{
|
||||
void *ret;
|
||||
arena_t *arena;
|
||||
|
||||
arena = chunk_arena_get(arena_ind);
|
||||
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
|
||||
ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
|
||||
commit, arena->dss_prec);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
@ -447,26 +437,80 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
||||
size_t size, size_t alignment, bool *zero, bool *commit)
|
||||
static void *
|
||||
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
bool *commit, unsigned arena_ind)
|
||||
{
|
||||
tsdn_t *tsdn;
|
||||
arena_t *arena;
|
||||
|
||||
tsdn = tsdn_fetch();
|
||||
arena = chunk_arena_get(tsdn, arena_ind);
|
||||
|
||||
return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
|
||||
zero, commit));
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
|
||||
bool *commit)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
|
||||
arena->ind);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
||||
&arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
|
||||
new_addr, size, alignment, sn, zero, commit, true);
|
||||
|
||||
if (config_stats && ret != NULL)
|
||||
arena->stats.retained -= size;
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
|
||||
bool *commit)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||
|
||||
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
|
||||
alignment, sn, zero, commit);
|
||||
if (ret == NULL) {
|
||||
if (chunk_hooks->alloc == chunk_alloc_default) {
|
||||
/* Call directly to propagate tsdn. */
|
||||
ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
|
||||
size, alignment, zero, commit);
|
||||
} else {
|
||||
ret = chunk_hooks->alloc(new_addr, size, alignment,
|
||||
zero, commit, arena->ind);
|
||||
}
|
||||
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
|
||||
*sn = arena_extent_sn_next(arena);
|
||||
|
||||
if (config_valgrind && chunk_hooks->alloc !=
|
||||
chunk_alloc_default)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *chunk, size_t size, bool zeroed, bool committed)
|
||||
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
|
||||
{
|
||||
bool unzeroed;
|
||||
extent_node_t *node, *prev;
|
||||
@ -476,9 +520,9 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
unzeroed = cache || !zeroed;
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
||||
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
|
||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
||||
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
|
||||
false, false);
|
||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||
/* Try to coalesce forward. */
|
||||
@ -490,19 +534,21 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
/*
|
||||
* Coalesce chunk with the following address range. This does
|
||||
* not change the position within chunks_ad, so only
|
||||
* remove/insert from/into chunks_szad.
|
||||
* remove/insert from/into chunks_szsnad.
|
||||
*/
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
extent_tree_szsnad_remove(chunks_szsnad, node);
|
||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
||||
extent_node_addr_set(node, chunk);
|
||||
extent_node_size_set(node, size + extent_node_size_get(node));
|
||||
if (sn < extent_node_sn_get(node))
|
||||
extent_node_sn_set(node, sn);
|
||||
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
||||
!unzeroed);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
} else {
|
||||
/* Coalescing forward failed, so insert a new node. */
|
||||
node = arena_node_alloc(arena);
|
||||
node = arena_node_alloc(tsdn, arena);
|
||||
if (node == NULL) {
|
||||
/*
|
||||
* Node allocation failed, which is an exceedingly
|
||||
@ -511,15 +557,15 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
* a virtual memory leak.
|
||||
*/
|
||||
if (cache) {
|
||||
chunk_purge_wrapper(arena, chunk_hooks, chunk,
|
||||
size, 0, size);
|
||||
chunk_purge_wrapper(tsdn, arena, chunk_hooks,
|
||||
chunk, size, 0, size);
|
||||
}
|
||||
goto label_return;
|
||||
}
|
||||
extent_node_init(node, arena, chunk, size, !unzeroed,
|
||||
extent_node_init(node, arena, chunk, size, sn, !unzeroed,
|
||||
committed);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
}
|
||||
|
||||
@ -533,31 +579,33 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
/*
|
||||
* Coalesce chunk with the previous address range. This does
|
||||
* not change the position within chunks_ad, so only
|
||||
* remove/insert node from/into chunks_szad.
|
||||
* remove/insert node from/into chunks_szsnad.
|
||||
*/
|
||||
extent_tree_szad_remove(chunks_szad, prev);
|
||||
extent_tree_szsnad_remove(chunks_szsnad, prev);
|
||||
extent_tree_ad_remove(chunks_ad, prev);
|
||||
arena_chunk_cache_maybe_remove(arena, prev, cache);
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
extent_tree_szsnad_remove(chunks_szsnad, node);
|
||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
||||
extent_node_addr_set(node, extent_node_addr_get(prev));
|
||||
extent_node_size_set(node, extent_node_size_get(prev) +
|
||||
extent_node_size_get(node));
|
||||
if (extent_node_sn_get(prev) < extent_node_sn_get(node))
|
||||
extent_node_sn_set(node, extent_node_sn_get(prev));
|
||||
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
|
||||
extent_node_zeroed_get(node));
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
|
||||
arena_node_dalloc(arena, prev);
|
||||
arena_node_dalloc(tsdn, arena, prev);
|
||||
}
|
||||
|
||||
label_return:
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, bool committed)
|
||||
chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *chunk, size_t size, size_t sn, bool committed)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
@ -565,24 +613,49 @@ chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
|
||||
&arena->chunks_ad_cached, true, chunk, size, false, committed);
|
||||
arena_maybe_purge(arena);
|
||||
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
|
||||
&arena->chunks_ad_cached, true, chunk, size, sn, false,
|
||||
committed);
|
||||
arena_maybe_purge(tsdn, arena);
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_dalloc_default_impl(void *chunk, size_t size)
|
||||
{
|
||||
|
||||
if (!have_dss || !chunk_in_dss(chunk))
|
||||
return (chunk_dalloc_mmap(chunk, size));
|
||||
return (true);
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_dalloc_default(void *chunk, size_t size, bool committed,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
|
||||
return (chunk_dalloc_default_impl(chunk, size));
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, bool zeroed, bool committed)
|
||||
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
|
||||
{
|
||||
bool err;
|
||||
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||
/* Try to deallocate. */
|
||||
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
|
||||
if (chunk_hooks->dalloc == chunk_dalloc_default) {
|
||||
/* Call directly to propagate tsdn. */
|
||||
err = chunk_dalloc_default_impl(chunk, size);
|
||||
} else
|
||||
err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
|
||||
|
||||
if (!err)
|
||||
return;
|
||||
/* Try to decommit; purge if that fails. */
|
||||
if (committed) {
|
||||
@ -591,29 +664,12 @@ chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
}
|
||||
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
|
||||
arena->ind);
|
||||
chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
|
||||
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
|
||||
}
|
||||
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
|
||||
&arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
|
||||
committed);
|
||||
|
||||
static bool
|
||||
chunk_dalloc_default(void *chunk, size_t size, bool committed,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
|
||||
if (!have_dss || !chunk_in_dss(chunk))
|
||||
return (chunk_dalloc_mmap(chunk, size));
|
||||
return (true);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, bool committed)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
chunk_hooks->dalloc(chunk, size, committed, arena->ind);
|
||||
if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
if (config_stats)
|
||||
arena->stats.retained += size;
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -634,8 +690,9 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
|
||||
length));
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
|
||||
static bool
|
||||
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
@ -648,21 +705,12 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
|
||||
length));
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
|
||||
return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
|
||||
length));
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, size_t offset, size_t length)
|
||||
chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *chunk, size_t size, size_t offset, size_t length)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
|
||||
}
|
||||
|
||||
@ -677,23 +725,30 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
||||
bool committed, unsigned arena_ind)
|
||||
chunk_merge_default_impl(void *chunk_a, void *chunk_b)
|
||||
{
|
||||
|
||||
if (!maps_coalesce)
|
||||
return (true);
|
||||
if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
|
||||
if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b))
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
||||
bool committed, unsigned arena_ind)
|
||||
{
|
||||
|
||||
return (chunk_merge_default_impl(chunk_a, chunk_b));
|
||||
}
|
||||
|
||||
static rtree_node_elm_t *
|
||||
chunks_rtree_node_alloc(size_t nelms)
|
||||
{
|
||||
|
||||
return ((rtree_node_elm_t *)base_alloc(nelms *
|
||||
return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
|
||||
sizeof(rtree_node_elm_t)));
|
||||
}
|
||||
|
||||
@ -716,7 +771,7 @@ chunk_boot(void)
|
||||
* so pages_map will always take fast path.
|
||||
*/
|
||||
if (!opt_lg_chunk) {
|
||||
opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
|
||||
opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
|
||||
- 1;
|
||||
}
|
||||
#else
|
||||
@ -730,32 +785,11 @@ chunk_boot(void)
|
||||
chunksize_mask = chunksize - 1;
|
||||
chunk_npages = (chunksize >> LG_PAGE);
|
||||
|
||||
if (have_dss && chunk_dss_boot())
|
||||
return (true);
|
||||
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk, chunks_rtree_node_alloc, NULL))
|
||||
if (have_dss)
|
||||
chunk_dss_boot();
|
||||
if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk), chunks_rtree_node_alloc, NULL))
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_prefork(void)
|
||||
{
|
||||
|
||||
chunk_dss_prefork();
|
||||
}
|
||||
|
||||
void
|
||||
chunk_postfork_parent(void)
|
||||
{
|
||||
|
||||
chunk_dss_postfork_parent();
|
||||
}
|
||||
|
||||
void
|
||||
chunk_postfork_child(void)
|
||||
{
|
||||
|
||||
chunk_dss_postfork_child();
|
||||
}
|
||||
|
188
deps/jemalloc/src/chunk_dss.c
vendored
188
deps/jemalloc/src/chunk_dss.c
vendored
@ -10,20 +10,19 @@ const char *dss_prec_names[] = {
|
||||
"N/A"
|
||||
};
|
||||
|
||||
/* Current dss precedence default, used when creating new arenas. */
|
||||
static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
|
||||
|
||||
/*
|
||||
* Protects sbrk() calls. This avoids malloc races among threads, though it
|
||||
* does not protect against races with threads that call sbrk() directly.
|
||||
* Current dss precedence default, used when creating new arenas. NB: This is
|
||||
* stored as unsigned rather than dss_prec_t because in principle there's no
|
||||
* guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
|
||||
* atomic operations to synchronize the setting.
|
||||
*/
|
||||
static malloc_mutex_t dss_mtx;
|
||||
static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT;
|
||||
|
||||
/* Base address of the DSS. */
|
||||
static void *dss_base;
|
||||
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
|
||||
static void *dss_prev;
|
||||
/* Current upper limit on DSS addresses. */
|
||||
/* Atomic boolean indicating whether the DSS is exhausted. */
|
||||
static unsigned dss_exhausted;
|
||||
/* Atomic current upper limit on DSS addresses. */
|
||||
static void *dss_max;
|
||||
|
||||
/******************************************************************************/
|
||||
@ -47,9 +46,7 @@ chunk_dss_prec_get(void)
|
||||
|
||||
if (!have_dss)
|
||||
return (dss_prec_disabled);
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
ret = dss_prec_default;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
ret = (dss_prec_t)atomic_read_u(&dss_prec_default);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@ -59,15 +56,46 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
|
||||
|
||||
if (!have_dss)
|
||||
return (dss_prec != dss_prec_disabled);
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
dss_prec_default = dss_prec;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
atomic_write_u(&dss_prec_default, (unsigned)dss_prec);
|
||||
return (false);
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_dss_max_update(void *new_addr)
|
||||
{
|
||||
void *max_cur;
|
||||
spin_t spinner;
|
||||
|
||||
/*
|
||||
* Get the current end of the DSS as max_cur and assure that dss_max is
|
||||
* up to date.
|
||||
*/
|
||||
spin_init(&spinner);
|
||||
while (true) {
|
||||
void *max_prev = atomic_read_p(&dss_max);
|
||||
|
||||
max_cur = chunk_dss_sbrk(0);
|
||||
if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
|
||||
/*
|
||||
* Another thread optimistically updated dss_max. Wait
|
||||
* for it to finish.
|
||||
*/
|
||||
spin_adaptive(&spinner);
|
||||
continue;
|
||||
}
|
||||
if (!atomic_cas_p(&dss_max, max_prev, max_cur))
|
||||
break;
|
||||
}
|
||||
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
|
||||
if (new_addr != NULL && max_cur != new_addr)
|
||||
return (NULL);
|
||||
|
||||
return (max_cur);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero, bool *commit)
|
||||
chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero, bool *commit)
|
||||
{
|
||||
cassert(have_dss);
|
||||
assert(size > 0 && (size & chunksize_mask) == 0);
|
||||
@ -80,28 +108,20 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
if ((intptr_t)size < 0)
|
||||
return (NULL);
|
||||
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
if (dss_prev != (void *)-1) {
|
||||
|
||||
if (!atomic_read_u(&dss_exhausted)) {
|
||||
/*
|
||||
* The loop is necessary to recover from races with other
|
||||
* threads that are using the DSS for something other than
|
||||
* malloc.
|
||||
*/
|
||||
do {
|
||||
void *ret, *cpad, *dss_next;
|
||||
while (true) {
|
||||
void *ret, *cpad, *max_cur, *dss_next, *dss_prev;
|
||||
size_t gap_size, cpad_size;
|
||||
intptr_t incr;
|
||||
/* Avoid an unnecessary system call. */
|
||||
if (new_addr != NULL && dss_max != new_addr)
|
||||
break;
|
||||
|
||||
/* Get the current end of the DSS. */
|
||||
dss_max = chunk_dss_sbrk(0);
|
||||
|
||||
/* Make sure the earlier condition still holds. */
|
||||
if (new_addr != NULL && dss_max != new_addr)
|
||||
break;
|
||||
max_cur = chunk_dss_max_update(new_addr);
|
||||
if (max_cur == NULL)
|
||||
goto label_oom;
|
||||
|
||||
/*
|
||||
* Calculate how much padding is necessary to
|
||||
@ -120,22 +140,29 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
|
||||
dss_next = (void *)((uintptr_t)ret + size);
|
||||
if ((uintptr_t)ret < (uintptr_t)dss_max ||
|
||||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
|
||||
/* Wrap-around. */
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
(uintptr_t)dss_next < (uintptr_t)dss_max)
|
||||
goto label_oom; /* Wrap-around. */
|
||||
incr = gap_size + cpad_size + size;
|
||||
|
||||
/*
|
||||
* Optimistically update dss_max, and roll back below if
|
||||
* sbrk() fails. No other thread will try to extend the
|
||||
* DSS while dss_max is greater than the current DSS
|
||||
* max reported by sbrk(0).
|
||||
*/
|
||||
if (atomic_cas_p(&dss_max, max_cur, dss_next))
|
||||
continue;
|
||||
|
||||
/* Try to allocate. */
|
||||
dss_prev = chunk_dss_sbrk(incr);
|
||||
if (dss_prev == dss_max) {
|
||||
if (dss_prev == max_cur) {
|
||||
/* Success. */
|
||||
dss_max = dss_next;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
if (cpad_size != 0) {
|
||||
chunk_hooks_t chunk_hooks =
|
||||
CHUNK_HOOKS_INITIALIZER;
|
||||
chunk_dalloc_wrapper(arena,
|
||||
chunk_dalloc_wrapper(tsdn, arena,
|
||||
&chunk_hooks, cpad, cpad_size,
|
||||
arena_extent_sn_next(arena), false,
|
||||
true);
|
||||
}
|
||||
if (*zero) {
|
||||
@ -147,68 +174,65 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
*commit = pages_decommit(ret, size);
|
||||
return (ret);
|
||||
}
|
||||
} while (dss_prev != (void *)-1);
|
||||
}
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
|
||||
/*
|
||||
* Failure, whether due to OOM or a race with a raw
|
||||
* sbrk() call from outside the allocator. Try to roll
|
||||
* back optimistic dss_max update; if rollback fails,
|
||||
* it's due to another caller of this function having
|
||||
* succeeded since this invocation started, in which
|
||||
* case rollback is not necessary.
|
||||
*/
|
||||
atomic_cas_p(&dss_max, dss_next, max_cur);
|
||||
if (dss_prev == (void *)-1) {
|
||||
/* OOM. */
|
||||
atomic_write_u(&dss_exhausted, (unsigned)true);
|
||||
goto label_oom;
|
||||
}
|
||||
}
|
||||
}
|
||||
label_oom:
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_in_dss_helper(void *chunk, void *max)
|
||||
{
|
||||
|
||||
return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk <
|
||||
(uintptr_t)max);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_in_dss(void *chunk)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
cassert(have_dss);
|
||||
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
||||
&& (uintptr_t)chunk < (uintptr_t)dss_max)
|
||||
ret = true;
|
||||
else
|
||||
ret = false;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
|
||||
return (ret);
|
||||
return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max)));
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_dss_mergeable(void *chunk_a, void *chunk_b)
|
||||
{
|
||||
void *max;
|
||||
|
||||
cassert(have_dss);
|
||||
|
||||
max = atomic_read_p(&dss_max);
|
||||
return (chunk_in_dss_helper(chunk_a, max) ==
|
||||
chunk_in_dss_helper(chunk_b, max));
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dss_boot(void)
|
||||
{
|
||||
|
||||
cassert(have_dss);
|
||||
|
||||
if (malloc_mutex_init(&dss_mtx))
|
||||
return (true);
|
||||
dss_base = chunk_dss_sbrk(0);
|
||||
dss_prev = dss_base;
|
||||
dss_exhausted = (unsigned)(dss_base == (void *)-1);
|
||||
dss_max = dss_base;
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dss_prefork(void)
|
||||
{
|
||||
|
||||
if (have_dss)
|
||||
malloc_mutex_prefork(&dss_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dss_postfork_parent(void)
|
||||
{
|
||||
|
||||
if (have_dss)
|
||||
malloc_mutex_postfork_parent(&dss_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dss_postfork_child(void)
|
||||
{
|
||||
|
||||
if (have_dss)
|
||||
malloc_mutex_postfork_child(&dss_mtx);
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
18
deps/jemalloc/src/chunk_mmap.c
vendored
18
deps/jemalloc/src/chunk_mmap.c
vendored
@ -16,23 +16,22 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
|
||||
do {
|
||||
void *pages;
|
||||
size_t leadsize;
|
||||
pages = pages_map(NULL, alloc_size);
|
||||
pages = pages_map(NULL, alloc_size, commit);
|
||||
if (pages == NULL)
|
||||
return (NULL);
|
||||
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
|
||||
(uintptr_t)pages;
|
||||
ret = pages_trim(pages, alloc_size, leadsize, size);
|
||||
ret = pages_trim(pages, alloc_size, leadsize, size, commit);
|
||||
} while (ret == NULL);
|
||||
|
||||
assert(ret != NULL);
|
||||
*zero = true;
|
||||
if (!*commit)
|
||||
*commit = pages_decommit(ret, size);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
|
||||
chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
bool *commit)
|
||||
{
|
||||
void *ret;
|
||||
size_t offset;
|
||||
@ -53,9 +52,10 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
ret = pages_map(NULL, size);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
ret = pages_map(new_addr, size, commit);
|
||||
if (ret == NULL || ret == new_addr)
|
||||
return (ret);
|
||||
assert(new_addr == NULL);
|
||||
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
|
||||
if (offset != 0) {
|
||||
pages_unmap(ret, size);
|
||||
@ -64,8 +64,6 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
|
||||
|
||||
assert(ret != NULL);
|
||||
*zero = true;
|
||||
if (!*commit)
|
||||
*commit = pages_decommit(ret, size);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
43
deps/jemalloc/src/ckh.c
vendored
43
deps/jemalloc/src/ckh.c
vendored
@ -99,7 +99,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
||||
* Cycle through the cells in the bucket, starting at a random position.
|
||||
* The randomness avoids worst-case search overhead as buckets fill up.
|
||||
*/
|
||||
prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
|
||||
offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
|
||||
LG_CKH_BUCKET_CELLS);
|
||||
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
|
||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
|
||||
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
|
||||
@ -141,7 +142,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
* were an item for which both hashes indicated the same
|
||||
* bucket.
|
||||
*/
|
||||
prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
|
||||
i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
|
||||
LG_CKH_BUCKET_CELLS);
|
||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
|
||||
assert(cell->key != NULL);
|
||||
|
||||
@ -247,8 +249,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
|
||||
{
|
||||
bool ret;
|
||||
ckhc_t *tab, *ttab;
|
||||
size_t lg_curcells;
|
||||
unsigned lg_prevbuckets;
|
||||
unsigned lg_prevbuckets, lg_curcells;
|
||||
|
||||
#ifdef CKH_COUNT
|
||||
ckh->ngrows++;
|
||||
@ -266,12 +267,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
|
||||
|
||||
lg_curcells++;
|
||||
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (usize == 0) {
|
||||
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
|
||||
true, NULL);
|
||||
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
|
||||
true, NULL, true, arena_ichoose(tsd, NULL));
|
||||
if (tab == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
@ -283,12 +284,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
|
||||
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
|
||||
|
||||
if (!ckh_rebuild(ckh, tab)) {
|
||||
idalloctm(tsd, tab, tcache_get(tsd, false), true);
|
||||
idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Rebuilding failed, so back out partially rebuilt table. */
|
||||
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
|
||||
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
|
||||
ckh->tab = tab;
|
||||
ckh->lg_curbuckets = lg_prevbuckets;
|
||||
}
|
||||
@ -302,8 +303,8 @@ static void
|
||||
ckh_shrink(tsd_t *tsd, ckh_t *ckh)
|
||||
{
|
||||
ckhc_t *tab, *ttab;
|
||||
size_t lg_curcells, usize;
|
||||
unsigned lg_prevbuckets;
|
||||
size_t usize;
|
||||
unsigned lg_prevbuckets, lg_curcells;
|
||||
|
||||
/*
|
||||
* It is possible (though unlikely, given well behaved hashes) that the
|
||||
@ -312,10 +313,10 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
|
||||
lg_prevbuckets = ckh->lg_curbuckets;
|
||||
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
|
||||
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (usize == 0)
|
||||
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
|
||||
return;
|
||||
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
|
||||
NULL);
|
||||
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
|
||||
true, arena_ichoose(tsd, NULL));
|
||||
if (tab == NULL) {
|
||||
/*
|
||||
* An OOM error isn't worth propagating, since it doesn't
|
||||
@ -330,7 +331,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
|
||||
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
|
||||
|
||||
if (!ckh_rebuild(ckh, tab)) {
|
||||
idalloctm(tsd, tab, tcache_get(tsd, false), true);
|
||||
idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
|
||||
#ifdef CKH_COUNT
|
||||
ckh->nshrinks++;
|
||||
#endif
|
||||
@ -338,7 +339,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
|
||||
}
|
||||
|
||||
/* Rebuilding failed, so back out partially rebuilt table. */
|
||||
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
|
||||
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
|
||||
ckh->tab = tab;
|
||||
ckh->lg_curbuckets = lg_prevbuckets;
|
||||
#ifdef CKH_COUNT
|
||||
@ -387,12 +388,12 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh->keycomp = keycomp;
|
||||
|
||||
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
|
||||
if (usize == 0) {
|
||||
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
|
||||
NULL);
|
||||
ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
|
||||
NULL, true, arena_ichoose(tsd, NULL));
|
||||
if (ckh->tab == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
@ -421,9 +422,9 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
|
||||
(unsigned long long)ckh->nrelocs);
|
||||
#endif
|
||||
|
||||
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
|
||||
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
|
||||
if (config_debug)
|
||||
memset(ckh, 0x5a, sizeof(ckh_t));
|
||||
memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
|
||||
}
|
||||
|
||||
size_t
|
||||
|
789
deps/jemalloc/src/ctl.c
vendored
789
deps/jemalloc/src/ctl.c
vendored
File diff suppressed because it is too large
Load Diff
80
deps/jemalloc/src/extent.c
vendored
80
deps/jemalloc/src/extent.c
vendored
@ -3,45 +3,48 @@
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/*
|
||||
* Round down to the nearest chunk size that can actually be requested during
|
||||
* normal huge allocation.
|
||||
*/
|
||||
JEMALLOC_INLINE_C size_t
|
||||
extent_quantize(size_t size)
|
||||
{
|
||||
size_t ret;
|
||||
szind_t ind;
|
||||
|
||||
/*
|
||||
* Round down to the nearest chunk size that can actually be requested
|
||||
* during normal huge allocation.
|
||||
*/
|
||||
return (index2size(size2index(size + 1) - 1));
|
||||
}
|
||||
assert(size > 0);
|
||||
|
||||
JEMALLOC_INLINE_C int
|
||||
extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
||||
{
|
||||
int ret;
|
||||
size_t a_qsize = extent_quantize(extent_node_size_get(a));
|
||||
size_t b_qsize = extent_quantize(extent_node_size_get(b));
|
||||
|
||||
/*
|
||||
* Compare based on quantized size rather than size, in order to sort
|
||||
* equally useful extents only by address.
|
||||
*/
|
||||
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
|
||||
if (ret == 0) {
|
||||
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
||||
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
||||
|
||||
ret = (a_addr > b_addr) - (a_addr < b_addr);
|
||||
ind = size2index(size + 1);
|
||||
if (ind == 0) {
|
||||
/* Avoid underflow. */
|
||||
return (index2size(0));
|
||||
}
|
||||
|
||||
ret = index2size(ind - 1);
|
||||
assert(ret <= size);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/* Generate red-black tree functions. */
|
||||
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
|
||||
extent_szad_comp)
|
||||
JEMALLOC_INLINE_C int
|
||||
extent_sz_comp(const extent_node_t *a, const extent_node_t *b)
|
||||
{
|
||||
size_t a_qsize = extent_quantize(extent_node_size_get(a));
|
||||
size_t b_qsize = extent_quantize(extent_node_size_get(b));
|
||||
|
||||
return ((a_qsize > b_qsize) - (a_qsize < b_qsize));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C int
|
||||
extent_ad_comp(extent_node_t *a, extent_node_t *b)
|
||||
extent_sn_comp(const extent_node_t *a, const extent_node_t *b)
|
||||
{
|
||||
size_t a_sn = extent_node_sn_get(a);
|
||||
size_t b_sn = extent_node_sn_get(b);
|
||||
|
||||
return ((a_sn > b_sn) - (a_sn < b_sn));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C int
|
||||
extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
|
||||
{
|
||||
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
||||
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
||||
@ -49,5 +52,26 @@ extent_ad_comp(extent_node_t *a, extent_node_t *b)
|
||||
return ((a_addr > b_addr) - (a_addr < b_addr));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C int
|
||||
extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = extent_sz_comp(a, b);
|
||||
if (ret != 0)
|
||||
return (ret);
|
||||
|
||||
ret = extent_sn_comp(a, b);
|
||||
if (ret != 0)
|
||||
return (ret);
|
||||
|
||||
ret = extent_ad_comp(a, b);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/* Generate red-black tree functions. */
|
||||
rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link,
|
||||
extent_szsnad_comp)
|
||||
|
||||
/* Generate red-black tree functions. */
|
||||
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
|
||||
|
238
deps/jemalloc/src/huge.c
vendored
238
deps/jemalloc/src/huge.c
vendored
@ -15,12 +15,21 @@ huge_node_get(const void *ptr)
|
||||
}
|
||||
|
||||
static bool
|
||||
huge_node_set(const void *ptr, extent_node_t *node)
|
||||
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
|
||||
{
|
||||
|
||||
assert(extent_node_addr_get(node) == ptr);
|
||||
assert(!extent_node_achunk_get(node));
|
||||
return (chunk_register(ptr, node));
|
||||
return (chunk_register(tsdn, ptr, node));
|
||||
}
|
||||
|
||||
static void
|
||||
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
|
||||
{
|
||||
bool err;
|
||||
|
||||
err = huge_node_set(tsdn, ptr, node);
|
||||
assert(!err);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -31,39 +40,39 @@ huge_node_unset(const void *ptr, const extent_node_t *node)
|
||||
}
|
||||
|
||||
void *
|
||||
huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
||||
tcache_t *tcache)
|
||||
huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
|
||||
{
|
||||
size_t usize;
|
||||
|
||||
usize = s2u(size);
|
||||
if (usize == 0) {
|
||||
/* size_t overflow. */
|
||||
return (NULL);
|
||||
}
|
||||
assert(usize == s2u(usize));
|
||||
|
||||
return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
|
||||
return (huge_palloc(tsdn, arena, usize, chunksize, zero));
|
||||
}
|
||||
|
||||
void *
|
||||
huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
|
||||
bool zero, tcache_t *tcache)
|
||||
huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
bool zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t usize;
|
||||
size_t ausize;
|
||||
arena_t *iarena;
|
||||
extent_node_t *node;
|
||||
size_t sn;
|
||||
bool is_zeroed;
|
||||
|
||||
/* Allocate one or more contiguous chunks for this request. */
|
||||
|
||||
usize = sa2u(size, alignment);
|
||||
if (unlikely(usize == 0))
|
||||
assert(!tsdn_null(tsdn) || arena != NULL);
|
||||
|
||||
ausize = sa2u(usize, alignment);
|
||||
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
|
||||
return (NULL);
|
||||
assert(usize >= chunksize);
|
||||
assert(ausize >= chunksize);
|
||||
|
||||
/* Allocate an extent node with which to track the chunk. */
|
||||
node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
|
||||
CACHELINE, false, tcache, true, arena);
|
||||
iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) :
|
||||
a0get();
|
||||
node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
|
||||
CACHELINE, false, NULL, true, iarena);
|
||||
if (node == NULL)
|
||||
return (NULL);
|
||||
|
||||
@ -72,33 +81,35 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
|
||||
* it is possible to make correct junk/zero fill decisions below.
|
||||
*/
|
||||
is_zeroed = zero;
|
||||
arena = arena_choose(tsd, arena);
|
||||
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
|
||||
size, alignment, &is_zeroed)) == NULL) {
|
||||
idalloctm(tsd, node, tcache, true);
|
||||
if (likely(!tsdn_null(tsdn)))
|
||||
arena = arena_choose(tsdn_tsd(tsdn), arena);
|
||||
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
|
||||
arena, usize, alignment, &sn, &is_zeroed)) == NULL) {
|
||||
idalloctm(tsdn, node, NULL, true, true);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
extent_node_init(node, arena, ret, size, is_zeroed, true);
|
||||
extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
|
||||
|
||||
if (huge_node_set(ret, node)) {
|
||||
arena_chunk_dalloc_huge(arena, ret, size);
|
||||
idalloctm(tsd, node, tcache, true);
|
||||
if (huge_node_set(tsdn, ret, node)) {
|
||||
arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
|
||||
idalloctm(tsdn, node, NULL, true, true);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/* Insert node into huge. */
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
ql_elm_new(node, ql_link);
|
||||
ql_tail_insert(&arena->huge, node, ql_link);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
|
||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||
if (!is_zeroed)
|
||||
memset(ret, 0, size);
|
||||
memset(ret, 0, usize);
|
||||
} else if (config_fill && unlikely(opt_junk_alloc))
|
||||
memset(ret, 0xa5, size);
|
||||
memset(ret, JEMALLOC_ALLOC_JUNK, usize);
|
||||
|
||||
arena_decay_tick(tsdn, arena);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@ -116,7 +127,7 @@ huge_dalloc_junk(void *ptr, size_t usize)
|
||||
* unmapped.
|
||||
*/
|
||||
if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
|
||||
memset(ptr, 0x5a, usize);
|
||||
memset(ptr, JEMALLOC_FREE_JUNK, usize);
|
||||
}
|
||||
}
|
||||
#ifdef JEMALLOC_JET
|
||||
@ -126,8 +137,8 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
||||
#endif
|
||||
|
||||
static void
|
||||
huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
|
||||
size_t usize_max, bool zero)
|
||||
huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
||||
size_t usize_min, size_t usize_max, bool zero)
|
||||
{
|
||||
size_t usize, usize_next;
|
||||
extent_node_t *node;
|
||||
@ -151,24 +162,28 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
|
||||
if (oldsize > usize) {
|
||||
size_t sdiff = oldsize - usize;
|
||||
if (config_fill && unlikely(opt_junk_free)) {
|
||||
memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
|
||||
memset((void *)((uintptr_t)ptr + usize),
|
||||
JEMALLOC_FREE_JUNK, sdiff);
|
||||
post_zeroed = false;
|
||||
} else {
|
||||
post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
|
||||
ptr, CHUNK_CEILING(oldsize), usize, sdiff);
|
||||
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
|
||||
&chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
|
||||
sdiff);
|
||||
}
|
||||
} else
|
||||
post_zeroed = pre_zeroed;
|
||||
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
/* Update the size of the huge allocation. */
|
||||
huge_node_unset(ptr, node);
|
||||
assert(extent_node_size_get(node) != usize);
|
||||
extent_node_size_set(node, usize);
|
||||
huge_node_reset(tsdn, ptr, node);
|
||||
/* Update zeroed. */
|
||||
extent_node_zeroed_set(node, post_zeroed);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
|
||||
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
|
||||
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
|
||||
|
||||
/* Fill if necessary (growing). */
|
||||
if (oldsize < usize) {
|
||||
@ -178,14 +193,15 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
|
||||
usize - oldsize);
|
||||
}
|
||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
||||
memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
|
||||
oldsize);
|
||||
memset((void *)((uintptr_t)ptr + oldsize),
|
||||
JEMALLOC_ALLOC_JUNK, usize - oldsize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
||||
huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
||||
size_t usize)
|
||||
{
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
@ -196,7 +212,7 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
pre_zeroed = extent_node_zeroed_get(node);
|
||||
chunk_hooks = chunk_hooks_get(arena);
|
||||
chunk_hooks = chunk_hooks_get(tsdn, arena);
|
||||
|
||||
assert(oldsize > usize);
|
||||
|
||||
@ -213,53 +229,59 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
||||
sdiff);
|
||||
post_zeroed = false;
|
||||
} else {
|
||||
post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
|
||||
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
|
||||
CHUNK_CEILING(oldsize),
|
||||
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
|
||||
&chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
|
||||
usize), CHUNK_CEILING(oldsize),
|
||||
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
|
||||
}
|
||||
} else
|
||||
post_zeroed = pre_zeroed;
|
||||
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
/* Update the size of the huge allocation. */
|
||||
huge_node_unset(ptr, node);
|
||||
extent_node_size_set(node, usize);
|
||||
huge_node_reset(tsdn, ptr, node);
|
||||
/* Update zeroed. */
|
||||
extent_node_zeroed_set(node, post_zeroed);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
|
||||
/* Zap the excess chunks. */
|
||||
arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
|
||||
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
|
||||
extent_node_sn_get(node));
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static bool
|
||||
huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
|
||||
huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
||||
size_t usize, bool zero) {
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
bool is_zeroed_subchunk, is_zeroed_chunk;
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
is_zeroed_subchunk = extent_node_zeroed_get(node);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
|
||||
/*
|
||||
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
|
||||
* that it is possible to make correct junk/zero fill decisions below.
|
||||
* Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
|
||||
* update extent's zeroed field, and zero as necessary.
|
||||
*/
|
||||
is_zeroed_chunk = zero;
|
||||
|
||||
if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
|
||||
is_zeroed_chunk = false;
|
||||
if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
|
||||
&is_zeroed_chunk))
|
||||
return (true);
|
||||
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
/* Update the size of the huge allocation. */
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
huge_node_unset(ptr, node);
|
||||
extent_node_size_set(node, usize);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
||||
is_zeroed_chunk);
|
||||
huge_node_reset(tsdn, ptr, node);
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
|
||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||
if (!is_zeroed_subchunk) {
|
||||
@ -272,19 +294,21 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
|
||||
CHUNK_CEILING(oldsize));
|
||||
}
|
||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
||||
memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
|
||||
oldsize);
|
||||
memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
|
||||
usize - oldsize);
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
bool
|
||||
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
|
||||
huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
|
||||
size_t usize_max, bool zero)
|
||||
{
|
||||
|
||||
assert(s2u(oldsize) == oldsize);
|
||||
/* The following should have been caught by callers. */
|
||||
assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
|
||||
|
||||
/* Both allocations must be huge to avoid a move. */
|
||||
if (oldsize < chunksize || usize_max < chunksize)
|
||||
@ -292,13 +316,18 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
|
||||
|
||||
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
|
||||
/* Attempt to expand the allocation in-place. */
|
||||
if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
|
||||
if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
|
||||
zero)) {
|
||||
arena_decay_tick(tsdn, huge_aalloc(ptr));
|
||||
return (false);
|
||||
}
|
||||
/* Try again, this time with usize_min. */
|
||||
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
|
||||
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
|
||||
oldsize, usize_min, zero))
|
||||
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
|
||||
ptr, oldsize, usize_min, zero)) {
|
||||
arena_decay_tick(tsdn, huge_aalloc(ptr));
|
||||
return (false);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -307,36 +336,46 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
|
||||
*/
|
||||
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
|
||||
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
|
||||
huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
|
||||
zero);
|
||||
huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
|
||||
usize_max, zero);
|
||||
arena_decay_tick(tsdn, huge_aalloc(ptr));
|
||||
return (false);
|
||||
}
|
||||
|
||||
/* Attempt to shrink the allocation in-place. */
|
||||
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
|
||||
return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
|
||||
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
|
||||
if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
|
||||
usize_max)) {
|
||||
arena_decay_tick(tsdn, huge_aalloc(ptr));
|
||||
return (false);
|
||||
}
|
||||
}
|
||||
return (true);
|
||||
}
|
||||
|
||||
static void *
|
||||
huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool zero, tcache_t *tcache)
|
||||
huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool zero)
|
||||
{
|
||||
|
||||
if (alignment <= chunksize)
|
||||
return (huge_malloc(tsd, arena, usize, zero, tcache));
|
||||
return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
|
||||
return (huge_malloc(tsdn, arena, usize, zero));
|
||||
return (huge_palloc(tsdn, arena, usize, alignment, zero));
|
||||
}
|
||||
|
||||
void *
|
||||
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
|
||||
size_t alignment, bool zero, tcache_t *tcache)
|
||||
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
||||
size_t usize, size_t alignment, bool zero, tcache_t *tcache)
|
||||
{
|
||||
void *ret;
|
||||
size_t copysize;
|
||||
|
||||
/* The following should have been caught by callers. */
|
||||
assert(usize > 0 && usize <= HUGE_MAXCLASS);
|
||||
|
||||
/* Try to avoid moving the allocation. */
|
||||
if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
|
||||
if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
|
||||
zero))
|
||||
return (ptr);
|
||||
|
||||
/*
|
||||
@ -344,19 +383,19 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
|
||||
* different size class. In that case, fall back to allocating new
|
||||
* space and copying.
|
||||
*/
|
||||
ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
|
||||
tcache);
|
||||
ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
|
||||
zero);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
|
||||
copysize = (usize < oldsize) ? usize : oldsize;
|
||||
memcpy(ret, ptr, copysize);
|
||||
isqalloc(tsd, ptr, oldsize, tcache);
|
||||
isqalloc(tsd, ptr, oldsize, tcache, true);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
||||
huge_dalloc(tsdn_t *tsdn, void *ptr)
|
||||
{
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
@ -364,15 +403,18 @@ huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
huge_node_unset(ptr, node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
ql_remove(&arena->huge, node, ql_link);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
|
||||
huge_dalloc_junk(extent_node_addr_get(node),
|
||||
extent_node_size_get(node));
|
||||
arena_chunk_dalloc_huge(extent_node_arena_get(node),
|
||||
extent_node_addr_get(node), extent_node_size_get(node));
|
||||
idalloctm(tsd, node, tcache, true);
|
||||
arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
|
||||
extent_node_addr_get(node), extent_node_size_get(node),
|
||||
extent_node_sn_get(node));
|
||||
idalloctm(tsdn, node, NULL, true, true);
|
||||
|
||||
arena_decay_tick(tsdn, arena);
|
||||
}
|
||||
|
||||
arena_t *
|
||||
@ -383,7 +425,7 @@ huge_aalloc(const void *ptr)
|
||||
}
|
||||
|
||||
size_t
|
||||
huge_salloc(const void *ptr)
|
||||
huge_salloc(tsdn_t *tsdn, const void *ptr)
|
||||
{
|
||||
size_t size;
|
||||
extent_node_t *node;
|
||||
@ -391,15 +433,15 @@ huge_salloc(const void *ptr)
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
size = extent_node_size_get(node);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
|
||||
return (size);
|
||||
}
|
||||
|
||||
prof_tctx_t *
|
||||
huge_prof_tctx_get(const void *ptr)
|
||||
huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
|
||||
{
|
||||
prof_tctx_t *tctx;
|
||||
extent_node_t *node;
|
||||
@ -407,29 +449,29 @@ huge_prof_tctx_get(const void *ptr)
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
tctx = extent_node_prof_tctx_get(node);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
|
||||
return (tctx);
|
||||
}
|
||||
|
||||
void
|
||||
huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
||||
huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
|
||||
{
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||
extent_node_prof_tctx_set(node, tctx);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
huge_prof_tctx_reset(const void *ptr)
|
||||
huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
|
||||
{
|
||||
|
||||
huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
|
||||
huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);
|
||||
}
|
||||
|
1500
deps/jemalloc/src/jemalloc.c
vendored
1500
deps/jemalloc/src/jemalloc.c
vendored
File diff suppressed because it is too large
Load Diff
23
deps/jemalloc/src/mutex.c
vendored
23
deps/jemalloc/src/mutex.c
vendored
@ -69,7 +69,7 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
||||
#endif
|
||||
|
||||
bool
|
||||
malloc_mutex_init(malloc_mutex_t *mutex)
|
||||
malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
|
||||
{
|
||||
|
||||
#ifdef _WIN32
|
||||
@ -80,6 +80,8 @@ malloc_mutex_init(malloc_mutex_t *mutex)
|
||||
_CRT_SPINCOUNT))
|
||||
return (true);
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
mutex->lock = OS_UNFAIR_LOCK_INIT;
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
mutex->lock = 0;
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
@ -103,31 +105,34 @@ malloc_mutex_init(malloc_mutex_t *mutex)
|
||||
}
|
||||
pthread_mutexattr_destroy(&attr);
|
||||
#endif
|
||||
if (config_debug)
|
||||
witness_init(&mutex->witness, name, rank, NULL);
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
malloc_mutex_prefork(malloc_mutex_t *mutex)
|
||||
malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(mutex);
|
||||
malloc_mutex_lock(tsdn, mutex);
|
||||
}
|
||||
|
||||
void
|
||||
malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
|
||||
malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex)
|
||||
{
|
||||
|
||||
malloc_mutex_unlock(mutex);
|
||||
malloc_mutex_unlock(tsdn, mutex);
|
||||
}
|
||||
|
||||
void
|
||||
malloc_mutex_postfork_child(malloc_mutex_t *mutex)
|
||||
malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
|
||||
{
|
||||
|
||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||
malloc_mutex_unlock(mutex);
|
||||
malloc_mutex_unlock(tsdn, mutex);
|
||||
#else
|
||||
if (malloc_mutex_init(mutex)) {
|
||||
if (malloc_mutex_init(mutex, mutex->witness.name,
|
||||
mutex->witness.rank)) {
|
||||
malloc_printf("<jemalloc>: Error re-initializing mutex in "
|
||||
"child\n");
|
||||
if (opt_abort)
|
||||
@ -137,7 +142,7 @@ malloc_mutex_postfork_child(malloc_mutex_t *mutex)
|
||||
}
|
||||
|
||||
bool
|
||||
mutex_boot(void)
|
||||
malloc_mutex_boot(void)
|
||||
{
|
||||
|
||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||
|
194
deps/jemalloc/src/nstime.c
vendored
Normal file
194
deps/jemalloc/src/nstime.c
vendored
Normal file
@ -0,0 +1,194 @@
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
#define BILLION UINT64_C(1000000000)
|
||||
|
||||
void
|
||||
nstime_init(nstime_t *time, uint64_t ns)
|
||||
{
|
||||
|
||||
time->ns = ns;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec)
|
||||
{
|
||||
|
||||
time->ns = sec * BILLION + nsec;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_ns(const nstime_t *time)
|
||||
{
|
||||
|
||||
return (time->ns);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_sec(const nstime_t *time)
|
||||
{
|
||||
|
||||
return (time->ns / BILLION);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_nsec(const nstime_t *time)
|
||||
{
|
||||
|
||||
return (time->ns % BILLION);
|
||||
}
|
||||
|
||||
void
|
||||
nstime_copy(nstime_t *time, const nstime_t *source)
|
||||
{
|
||||
|
||||
*time = *source;
|
||||
}
|
||||
|
||||
int
|
||||
nstime_compare(const nstime_t *a, const nstime_t *b)
|
||||
{
|
||||
|
||||
return ((a->ns > b->ns) - (a->ns < b->ns));
|
||||
}
|
||||
|
||||
void
|
||||
nstime_add(nstime_t *time, const nstime_t *addend)
|
||||
{
|
||||
|
||||
assert(UINT64_MAX - time->ns >= addend->ns);
|
||||
|
||||
time->ns += addend->ns;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_subtract(nstime_t *time, const nstime_t *subtrahend)
|
||||
{
|
||||
|
||||
assert(nstime_compare(time, subtrahend) >= 0);
|
||||
|
||||
time->ns -= subtrahend->ns;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_imultiply(nstime_t *time, uint64_t multiplier)
|
||||
{
|
||||
|
||||
assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
|
||||
2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
|
||||
|
||||
time->ns *= multiplier;
|
||||
}
|
||||
|
||||
void
|
||||
nstime_idivide(nstime_t *time, uint64_t divisor)
|
||||
{
|
||||
|
||||
assert(divisor != 0);
|
||||
|
||||
time->ns /= divisor;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
nstime_divide(const nstime_t *time, const nstime_t *divisor)
|
||||
{
|
||||
|
||||
assert(divisor->ns != 0);
|
||||
|
||||
return (time->ns / divisor->ns);
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
# define NSTIME_MONOTONIC true
|
||||
static void
|
||||
nstime_get(nstime_t *time)
|
||||
{
|
||||
FILETIME ft;
|
||||
uint64_t ticks_100ns;
|
||||
|
||||
GetSystemTimeAsFileTime(&ft);
|
||||
ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
|
||||
nstime_init(time, ticks_100ns * 100);
|
||||
}
|
||||
#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
# define NSTIME_MONOTONIC true
|
||||
static void
|
||||
nstime_get(nstime_t *time)
|
||||
{
|
||||
struct timespec ts;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
|
||||
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
|
||||
}
|
||||
#elif JEMALLOC_HAVE_CLOCK_MONOTONIC
|
||||
# define NSTIME_MONOTONIC true
|
||||
static void
|
||||
nstime_get(nstime_t *time)
|
||||
{
|
||||
struct timespec ts;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
|
||||
}
|
||||
#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
|
||||
# define NSTIME_MONOTONIC true
|
||||
static void
|
||||
nstime_get(nstime_t *time)
|
||||
{
|
||||
|
||||
nstime_init(time, mach_absolute_time());
|
||||
}
|
||||
#else
|
||||
# define NSTIME_MONOTONIC false
|
||||
static void
|
||||
nstime_get(nstime_t *time)
|
||||
{
|
||||
struct timeval tv;
|
||||
|
||||
gettimeofday(&tv, NULL);
|
||||
nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef nstime_monotonic
|
||||
#define nstime_monotonic JEMALLOC_N(n_nstime_monotonic)
|
||||
#endif
|
||||
bool
|
||||
nstime_monotonic(void)
|
||||
{
|
||||
|
||||
return (NSTIME_MONOTONIC);
|
||||
#undef NSTIME_MONOTONIC
|
||||
}
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef nstime_monotonic
|
||||
#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
|
||||
nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef nstime_update
|
||||
#define nstime_update JEMALLOC_N(n_nstime_update)
|
||||
#endif
|
||||
bool
|
||||
nstime_update(nstime_t *time)
|
||||
{
|
||||
nstime_t old_time;
|
||||
|
||||
nstime_copy(&old_time, time);
|
||||
nstime_get(time);
|
||||
|
||||
/* Handle non-monotonic clocks. */
|
||||
if (unlikely(nstime_compare(&old_time, time) > 0)) {
|
||||
nstime_copy(time, &old_time);
|
||||
return (true);
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef nstime_update
|
||||
#define nstime_update JEMALLOC_N(nstime_update)
|
||||
nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update);
|
||||
#endif
|
177
deps/jemalloc/src/pages.c
vendored
177
deps/jemalloc/src/pages.c
vendored
@ -1,29 +1,49 @@
|
||||
#define JEMALLOC_PAGES_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||
#include <sys/sysctl.h>
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
#ifndef _WIN32
|
||||
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
|
||||
# define PAGES_PROT_DECOMMIT (PROT_NONE)
|
||||
static int mmap_flags;
|
||||
#endif
|
||||
static bool os_overcommits;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void *
|
||||
pages_map(void *addr, size_t size)
|
||||
pages_map(void *addr, size_t size, bool *commit)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
assert(size != 0);
|
||||
|
||||
if (os_overcommits)
|
||||
*commit = true;
|
||||
|
||||
#ifdef _WIN32
|
||||
/*
|
||||
* If VirtualAlloc can't allocate at the given address when one is
|
||||
* given, it fails and returns NULL.
|
||||
*/
|
||||
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
|
||||
ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
|
||||
PAGE_READWRITE);
|
||||
#else
|
||||
/*
|
||||
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
||||
* of existing mappings, and we only want to create new mappings.
|
||||
*/
|
||||
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
|
||||
-1, 0);
|
||||
{
|
||||
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
|
||||
|
||||
ret = mmap(addr, size, prot, mmap_flags, -1, 0);
|
||||
}
|
||||
assert(ret != NULL);
|
||||
|
||||
if (ret == MAP_FAILED)
|
||||
@ -67,7 +87,8 @@ pages_unmap(void *addr, size_t size)
|
||||
}
|
||||
|
||||
void *
|
||||
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
|
||||
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
|
||||
bool *commit)
|
||||
{
|
||||
void *ret = (void *)((uintptr_t)addr + leadsize);
|
||||
|
||||
@ -77,7 +98,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
|
||||
void *new_addr;
|
||||
|
||||
pages_unmap(addr, alloc_size);
|
||||
new_addr = pages_map(ret, size);
|
||||
new_addr = pages_map(ret, size, commit);
|
||||
if (new_addr == ret)
|
||||
return (ret);
|
||||
if (new_addr)
|
||||
@ -101,17 +122,17 @@ static bool
|
||||
pages_commit_impl(void *addr, size_t size, bool commit)
|
||||
{
|
||||
|
||||
#ifndef _WIN32
|
||||
/*
|
||||
* The following decommit/commit implementation is functional, but
|
||||
* always disabled because it doesn't add value beyong improved
|
||||
* debugging (at the cost of extra system calls) on systems that
|
||||
* overcommit.
|
||||
*/
|
||||
if (false) {
|
||||
int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
|
||||
void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
|
||||
MAP_FIXED, -1, 0);
|
||||
if (os_overcommits)
|
||||
return (true);
|
||||
|
||||
#ifdef _WIN32
|
||||
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
|
||||
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
|
||||
#else
|
||||
{
|
||||
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
|
||||
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
|
||||
-1, 0);
|
||||
if (result == MAP_FAILED)
|
||||
return (true);
|
||||
if (result != addr) {
|
||||
@ -125,7 +146,6 @@ pages_commit_impl(void *addr, size_t size, bool commit)
|
||||
return (false);
|
||||
}
|
||||
#endif
|
||||
return (true);
|
||||
}
|
||||
|
||||
bool
|
||||
@ -150,15 +170,16 @@ pages_purge(void *addr, size_t size)
|
||||
#ifdef _WIN32
|
||||
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
|
||||
unzeroed = true;
|
||||
#elif defined(JEMALLOC_HAVE_MADVISE)
|
||||
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
||||
# define JEMALLOC_MADV_ZEROS true
|
||||
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||
#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \
|
||||
defined(JEMALLOC_PURGE_MADVISE_DONTNEED))
|
||||
# if defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||
# define JEMALLOC_MADV_PURGE MADV_FREE
|
||||
# define JEMALLOC_MADV_ZEROS false
|
||||
# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
|
||||
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
||||
# define JEMALLOC_MADV_ZEROS true
|
||||
# else
|
||||
# error "No madvise(2) flag defined for purging unused dirty pages."
|
||||
# error No madvise(2) flag defined for purging unused dirty pages
|
||||
# endif
|
||||
int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
|
||||
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
|
||||
@ -171,3 +192,111 @@ pages_purge(void *addr, size_t size)
|
||||
return (unzeroed);
|
||||
}
|
||||
|
||||
bool
|
||||
pages_huge(void *addr, size_t size)
|
||||
{
|
||||
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
|
||||
#ifdef JEMALLOC_THP
|
||||
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
|
||||
#else
|
||||
return (false);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_nohuge(void *addr, size_t size)
|
||||
{
|
||||
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
|
||||
#ifdef JEMALLOC_THP
|
||||
return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
|
||||
#else
|
||||
return (false);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||
static bool
|
||||
os_overcommits_sysctl(void)
|
||||
{
|
||||
int vm_overcommit;
|
||||
size_t sz;
|
||||
|
||||
sz = sizeof(vm_overcommit);
|
||||
if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0)
|
||||
return (false); /* Error. */
|
||||
|
||||
return ((vm_overcommit & 0x3) == 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
|
||||
/*
|
||||
* Use syscall(2) rather than {open,read,close}(2) when possible to avoid
|
||||
* reentry during bootstrapping if another library has interposed system call
|
||||
* wrappers.
|
||||
*/
|
||||
static bool
|
||||
os_overcommits_proc(void)
|
||||
{
|
||||
int fd;
|
||||
char buf[1];
|
||||
ssize_t nread;
|
||||
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
|
||||
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
|
||||
#else
|
||||
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
|
||||
#endif
|
||||
if (fd == -1)
|
||||
return (false); /* Error. */
|
||||
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
|
||||
nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
|
||||
#else
|
||||
nread = read(fd, &buf, sizeof(buf));
|
||||
#endif
|
||||
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
|
||||
syscall(SYS_close, fd);
|
||||
#else
|
||||
close(fd);
|
||||
#endif
|
||||
|
||||
if (nread < 1)
|
||||
return (false); /* Error. */
|
||||
/*
|
||||
* /proc/sys/vm/overcommit_memory meanings:
|
||||
* 0: Heuristic overcommit.
|
||||
* 1: Always overcommit.
|
||||
* 2: Never overcommit.
|
||||
*/
|
||||
return (buf[0] == '0' || buf[0] == '1');
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
pages_boot(void)
|
||||
{
|
||||
|
||||
#ifndef _WIN32
|
||||
mmap_flags = MAP_PRIVATE | MAP_ANON;
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||
os_overcommits = os_overcommits_sysctl();
|
||||
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
|
||||
os_overcommits = os_overcommits_proc();
|
||||
# ifdef MAP_NORESERVE
|
||||
if (os_overcommits)
|
||||
mmap_flags |= MAP_NORESERVE;
|
||||
# endif
|
||||
#else
|
||||
os_overcommits = false;
|
||||
#endif
|
||||
}
|
||||
|
2
deps/jemalloc/src/prng.c
vendored
Normal file
2
deps/jemalloc/src/prng.c
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
#define JEMALLOC_PRNG_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
678
deps/jemalloc/src/prof.c
vendored
678
deps/jemalloc/src/prof.c
vendored
File diff suppressed because it is too large
Load Diff
50
deps/jemalloc/src/quarantine.c
vendored
50
deps/jemalloc/src/quarantine.c
vendored
@ -13,22 +13,22 @@
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
|
||||
static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine);
|
||||
static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine,
|
||||
static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine);
|
||||
static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine,
|
||||
size_t upper_bound);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static quarantine_t *
|
||||
quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
|
||||
quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs)
|
||||
{
|
||||
quarantine_t *quarantine;
|
||||
size_t size;
|
||||
|
||||
assert(tsd_nominal(tsd));
|
||||
|
||||
quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs)
|
||||
+ ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false,
|
||||
tcache_get(tsd, true), true, NULL);
|
||||
size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) *
|
||||
sizeof(quarantine_obj_t));
|
||||
quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size),
|
||||
false, NULL, true, arena_get(TSDN_NULL, 0, true), true);
|
||||
if (quarantine == NULL)
|
||||
return (NULL);
|
||||
quarantine->curbytes = 0;
|
||||
@ -47,7 +47,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
|
||||
if (!tsd_nominal(tsd))
|
||||
return;
|
||||
|
||||
quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT);
|
||||
quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT);
|
||||
/*
|
||||
* Check again whether quarantine has been initialized, because
|
||||
* quarantine_init() may have triggered recursive initialization.
|
||||
@ -55,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
|
||||
if (tsd_quarantine_get(tsd) == NULL)
|
||||
tsd_quarantine_set(tsd, quarantine);
|
||||
else
|
||||
idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
|
||||
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
|
||||
}
|
||||
|
||||
static quarantine_t *
|
||||
@ -63,9 +63,9 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
|
||||
{
|
||||
quarantine_t *ret;
|
||||
|
||||
ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1);
|
||||
ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1);
|
||||
if (ret == NULL) {
|
||||
quarantine_drain_one(tsd, quarantine);
|
||||
quarantine_drain_one(tsd_tsdn(tsd), quarantine);
|
||||
return (quarantine);
|
||||
}
|
||||
|
||||
@ -87,18 +87,18 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
|
||||
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
|
||||
sizeof(quarantine_obj_t));
|
||||
}
|
||||
idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
|
||||
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
|
||||
|
||||
tsd_quarantine_set(tsd, ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
|
||||
quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
|
||||
{
|
||||
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
|
||||
assert(obj->usize == isalloc(obj->ptr, config_prof));
|
||||
idalloctm(tsd, obj->ptr, NULL, false);
|
||||
assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof));
|
||||
idalloctm(tsdn, obj->ptr, NULL, false, true);
|
||||
quarantine->curbytes -= obj->usize;
|
||||
quarantine->curobjs--;
|
||||
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
|
||||
@ -106,24 +106,24 @@ quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
|
||||
}
|
||||
|
||||
static void
|
||||
quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound)
|
||||
quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound)
|
||||
{
|
||||
|
||||
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
|
||||
quarantine_drain_one(tsd, quarantine);
|
||||
quarantine_drain_one(tsdn, quarantine);
|
||||
}
|
||||
|
||||
void
|
||||
quarantine(tsd_t *tsd, void *ptr)
|
||||
{
|
||||
quarantine_t *quarantine;
|
||||
size_t usize = isalloc(ptr, config_prof);
|
||||
size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
|
||||
|
||||
cassert(config_fill);
|
||||
assert(opt_quarantine);
|
||||
|
||||
if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
|
||||
idalloctm(tsd, ptr, NULL, false);
|
||||
idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
@ -133,7 +133,7 @@ quarantine(tsd_t *tsd, void *ptr)
|
||||
if (quarantine->curbytes + usize > opt_quarantine) {
|
||||
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
|
||||
- usize : 0;
|
||||
quarantine_drain(tsd, quarantine, upper_bound);
|
||||
quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound);
|
||||
}
|
||||
/* Grow the quarantine ring buffer if it's full. */
|
||||
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
|
||||
@ -158,11 +158,11 @@ quarantine(tsd_t *tsd, void *ptr)
|
||||
&& usize <= SMALL_MAXCLASS)
|
||||
arena_quarantine_junk_small(ptr, usize);
|
||||
else
|
||||
memset(ptr, 0x5a, usize);
|
||||
memset(ptr, JEMALLOC_FREE_JUNK, usize);
|
||||
}
|
||||
} else {
|
||||
assert(quarantine->curbytes == 0);
|
||||
idalloctm(tsd, ptr, NULL, false);
|
||||
idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -176,8 +176,8 @@ quarantine_cleanup(tsd_t *tsd)
|
||||
|
||||
quarantine = tsd_quarantine_get(tsd);
|
||||
if (quarantine != NULL) {
|
||||
quarantine_drain(tsd, quarantine, 0);
|
||||
idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
|
||||
quarantine_drain(tsd_tsdn(tsd), quarantine, 0);
|
||||
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
|
||||
tsd_quarantine_set(tsd, NULL);
|
||||
}
|
||||
}
|
||||
|
9
deps/jemalloc/src/rtree.c
vendored
9
deps/jemalloc/src/rtree.c
vendored
@ -15,6 +15,8 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
|
||||
{
|
||||
unsigned bits_in_leaf, height, i;
|
||||
|
||||
assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
|
||||
RTREE_BITS_PER_LEVEL));
|
||||
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
|
||||
|
||||
bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
|
||||
@ -94,12 +96,15 @@ rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
|
||||
rtree_node_elm_t *node;
|
||||
|
||||
if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
|
||||
spin_t spinner;
|
||||
|
||||
/*
|
||||
* Another thread is already in the process of initializing.
|
||||
* Spin-wait until initialization is complete.
|
||||
*/
|
||||
spin_init(&spinner);
|
||||
do {
|
||||
CPU_SPINWAIT;
|
||||
spin_adaptive(&spinner);
|
||||
node = atomic_read_p((void **)elmp);
|
||||
} while (node == RTREE_NODE_INITIALIZING);
|
||||
} else {
|
||||
@ -123,5 +128,5 @@ rtree_node_elm_t *
|
||||
rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
|
||||
{
|
||||
|
||||
return (rtree_node_init(rtree, level, &elm->child));
|
||||
return (rtree_node_init(rtree, level+1, &elm->child));
|
||||
}
|
||||
|
2
deps/jemalloc/src/spin.c
vendored
Normal file
2
deps/jemalloc/src/spin.c
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
#define JEMALLOC_SPIN_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
1250
deps/jemalloc/src/stats.c
vendored
Normal file → Executable file
1250
deps/jemalloc/src/stats.c
vendored
Normal file → Executable file
File diff suppressed because it is too large
Load Diff
170
deps/jemalloc/src/tcache.c
vendored
Normal file → Executable file
170
deps/jemalloc/src/tcache.c
vendored
Normal file → Executable file
@ -10,7 +10,7 @@ ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
|
||||
tcache_bin_info_t *tcache_bin_info;
|
||||
static unsigned stack_nelms; /* Total stack elms per tcache. */
|
||||
|
||||
size_t nhbins;
|
||||
unsigned nhbins;
|
||||
size_t tcache_maxclass;
|
||||
|
||||
tcaches_t *tcaches;
|
||||
@ -23,10 +23,11 @@ static tcaches_t *tcaches_avail;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
size_t tcache_salloc(const void *ptr)
|
||||
size_t
|
||||
tcache_salloc(tsdn_t *tsdn, const void *ptr)
|
||||
{
|
||||
|
||||
return (arena_salloc(ptr, false));
|
||||
return (arena_salloc(tsdn, ptr, false));
|
||||
}
|
||||
|
||||
void
|
||||
@ -67,20 +68,19 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
|
||||
tcache->next_gc_bin++;
|
||||
if (tcache->next_gc_bin == nhbins)
|
||||
tcache->next_gc_bin = 0;
|
||||
tcache->ev_cnt = 0;
|
||||
}
|
||||
|
||||
void *
|
||||
tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, szind_t binind)
|
||||
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
arena_tcache_fill_small(arena, tbin, binind, config_prof ?
|
||||
arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
|
||||
tcache->prof_accumbytes : 0);
|
||||
if (config_prof)
|
||||
tcache->prof_accumbytes = 0;
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
ret = tcache_alloc_easy(tbin, tcache_success);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@ -102,17 +102,18 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
||||
/* Lock the arena bin associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
*(tbin->avail - 1));
|
||||
arena_t *bin_arena = extent_node_arena_get(&chunk->node);
|
||||
arena_bin_t *bin = &bin_arena->bins[binind];
|
||||
|
||||
if (config_prof && bin_arena == arena) {
|
||||
if (arena_prof_accum(arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
if (arena_prof_accum(tsd_tsdn(tsd), arena,
|
||||
tcache->prof_accumbytes))
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
if (config_stats && bin_arena == arena) {
|
||||
assert(!merged_stats);
|
||||
merged_stats = true;
|
||||
@ -122,16 +123,16 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
}
|
||||
ndeferred = 0;
|
||||
for (i = 0; i < nflush; i++) {
|
||||
ptr = tbin->avail[i];
|
||||
ptr = *(tbin->avail - 1 - i);
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (extent_node_arena_get(&chunk->node) == bin_arena) {
|
||||
size_t pageind = ((uintptr_t)ptr -
|
||||
(uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_chunk_map_bits_t *bitselm =
|
||||
arena_bitselm_get(chunk, pageind);
|
||||
arena_dalloc_bin_junked_locked(bin_arena, chunk,
|
||||
ptr, bitselm);
|
||||
arena_bitselm_get_mutable(chunk, pageind);
|
||||
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
|
||||
bin_arena, chunk, ptr, bitselm);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
@ -139,11 +140,12 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
* locked. Stash the object, so that it can be
|
||||
* handled in a future pass.
|
||||
*/
|
||||
tbin->avail[ndeferred] = ptr;
|
||||
*(tbin->avail - 1 - ndeferred) = ptr;
|
||||
ndeferred++;
|
||||
}
|
||||
}
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||
arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
|
||||
}
|
||||
if (config_stats && !merged_stats) {
|
||||
/*
|
||||
@ -151,15 +153,15 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
bin->stats.nflushes++;
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||
}
|
||||
|
||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||
rem * sizeof(void *));
|
||||
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||
sizeof(void *));
|
||||
tbin->ncached = rem;
|
||||
if ((int)tbin->ncached < tbin->low_water)
|
||||
tbin->low_water = tbin->ncached;
|
||||
@ -182,13 +184,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
||||
/* Lock the arena associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
*(tbin->avail - 1));
|
||||
arena_t *locked_arena = extent_node_arena_get(&chunk->node);
|
||||
UNUSED bool idump;
|
||||
|
||||
if (config_prof)
|
||||
idump = false;
|
||||
malloc_mutex_lock(&locked_arena->lock);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
|
||||
if ((config_prof || config_stats) && locked_arena == arena) {
|
||||
if (config_prof) {
|
||||
idump = arena_prof_accum_locked(arena,
|
||||
@ -206,13 +208,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
}
|
||||
ndeferred = 0;
|
||||
for (i = 0; i < nflush; i++) {
|
||||
ptr = tbin->avail[i];
|
||||
ptr = *(tbin->avail - 1 - i);
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (extent_node_arena_get(&chunk->node) ==
|
||||
locked_arena) {
|
||||
arena_dalloc_large_junked_locked(locked_arena,
|
||||
chunk, ptr);
|
||||
arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
|
||||
locked_arena, chunk, ptr);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
@ -220,62 +222,56 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
* Stash the object, so that it can be handled
|
||||
* in a future pass.
|
||||
*/
|
||||
tbin->avail[ndeferred] = ptr;
|
||||
*(tbin->avail - 1 - ndeferred) = ptr;
|
||||
ndeferred++;
|
||||
}
|
||||
}
|
||||
malloc_mutex_unlock(&locked_arena->lock);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
|
||||
if (config_prof && idump)
|
||||
prof_idump();
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
|
||||
ndeferred);
|
||||
}
|
||||
if (config_stats && !merged_stats) {
|
||||
/*
|
||||
* The flush loop didn't happen to flush to this thread's
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
arena->stats.lstats[binind - NBINS].nrequests +=
|
||||
tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
||||
}
|
||||
|
||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||
rem * sizeof(void *));
|
||||
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||
sizeof(void *));
|
||||
tbin->ncached = rem;
|
||||
if ((int)tbin->ncached < tbin->low_water)
|
||||
tbin->low_water = tbin->ncached;
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_associate(tcache_t *tcache, arena_t *arena)
|
||||
static void
|
||||
tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
|
||||
if (config_stats) {
|
||||
/* Link into list of extant tcaches. */
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
ql_elm_new(tcache, link);
|
||||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
|
||||
{
|
||||
|
||||
tcache_arena_dissociate(tcache, oldarena);
|
||||
tcache_arena_associate(tcache, newarena);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
|
||||
static void
|
||||
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
|
||||
if (config_stats) {
|
||||
/* Unlink from list of extant tcaches. */
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
if (config_debug) {
|
||||
bool in_ql = false;
|
||||
tcache_t *iter;
|
||||
@ -288,11 +284,20 @@ tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
|
||||
assert(in_ql);
|
||||
}
|
||||
ql_remove(&arena->tcache_ql, tcache, link);
|
||||
tcache_stats_merge(tcache, arena);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
tcache_stats_merge(tsdn, tcache, arena);
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
|
||||
arena_t *newarena)
|
||||
{
|
||||
|
||||
tcache_arena_dissociate(tsdn, tcache, oldarena);
|
||||
tcache_arena_associate(tsdn, tcache, newarena);
|
||||
}
|
||||
|
||||
tcache_t *
|
||||
tcache_get_hard(tsd_t *tsd)
|
||||
{
|
||||
@ -306,11 +311,11 @@ tcache_get_hard(tsd_t *tsd)
|
||||
arena = arena_choose(tsd, NULL);
|
||||
if (unlikely(arena == NULL))
|
||||
return (NULL);
|
||||
return (tcache_create(tsd, arena));
|
||||
return (tcache_create(tsd_tsdn(tsd), arena));
|
||||
}
|
||||
|
||||
tcache_t *
|
||||
tcache_create(tsd_t *tsd, arena_t *arena)
|
||||
tcache_create(tsdn_t *tsdn, arena_t *arena)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
size_t size, stack_offset;
|
||||
@ -324,18 +329,26 @@ tcache_create(tsd_t *tsd, arena_t *arena)
|
||||
/* Avoid false cacheline sharing. */
|
||||
size = sa2u(size, CACHELINE);
|
||||
|
||||
tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
|
||||
tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
|
||||
arena_get(TSDN_NULL, 0, true));
|
||||
if (tcache == NULL)
|
||||
return (NULL);
|
||||
|
||||
tcache_arena_associate(tcache, arena);
|
||||
tcache_arena_associate(tsdn, tcache, arena);
|
||||
|
||||
ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
|
||||
|
||||
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
|
||||
for (i = 0; i < nhbins; i++) {
|
||||
tcache->tbins[i].lg_fill_div = 1;
|
||||
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
||||
/*
|
||||
* avail points past the available space. Allocations will
|
||||
* access the slots toward higher addresses (for the benefit of
|
||||
* prefetch).
|
||||
*/
|
||||
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
|
||||
(uintptr_t)stack_offset);
|
||||
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
||||
}
|
||||
|
||||
return (tcache);
|
||||
@ -348,7 +361,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
||||
unsigned i;
|
||||
|
||||
arena = arena_choose(tsd, NULL);
|
||||
tcache_arena_dissociate(tcache, arena);
|
||||
tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena);
|
||||
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
@ -356,9 +369,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
||||
|
||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -367,19 +380,19 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
||||
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
||||
|
||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
arena->stats.lstats[i - NBINS].nrequests +=
|
||||
tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
||||
}
|
||||
}
|
||||
|
||||
if (config_prof && tcache->prof_accumbytes > 0 &&
|
||||
arena_prof_accum(arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
|
||||
idalloctm(tsd, tcache, false, true);
|
||||
idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
|
||||
}
|
||||
|
||||
void
|
||||
@ -403,21 +416,22 @@ tcache_enabled_cleanup(tsd_t *tsd)
|
||||
/* Do nothing. */
|
||||
}
|
||||
|
||||
/* Caller must own arena->lock. */
|
||||
void
|
||||
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||
tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
cassert(config_stats);
|
||||
|
||||
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||
|
||||
/* Merge and reset tcache stats. */
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
|
||||
@ -433,11 +447,12 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||
bool
|
||||
tcaches_create(tsd_t *tsd, unsigned *r_ind)
|
||||
{
|
||||
arena_t *arena;
|
||||
tcache_t *tcache;
|
||||
tcaches_t *elm;
|
||||
|
||||
if (tcaches == NULL) {
|
||||
tcaches = base_alloc(sizeof(tcache_t *) *
|
||||
tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) *
|
||||
(MALLOCX_TCACHE_MAX+1));
|
||||
if (tcaches == NULL)
|
||||
return (true);
|
||||
@ -445,7 +460,10 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
|
||||
|
||||
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
|
||||
return (true);
|
||||
tcache = tcache_create(tsd, a0get());
|
||||
arena = arena_ichoose(tsd, NULL);
|
||||
if (unlikely(arena == NULL))
|
||||
return (true);
|
||||
tcache = tcache_create(tsd_tsdn(tsd), arena);
|
||||
if (tcache == NULL)
|
||||
return (true);
|
||||
|
||||
@ -453,7 +471,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
|
||||
elm = tcaches_avail;
|
||||
tcaches_avail = tcaches_avail->next;
|
||||
elm->tcache = tcache;
|
||||
*r_ind = elm - tcaches;
|
||||
*r_ind = (unsigned)(elm - tcaches);
|
||||
} else {
|
||||
elm = &tcaches[tcaches_past];
|
||||
elm->tcache = tcache;
|
||||
@ -491,7 +509,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind)
|
||||
}
|
||||
|
||||
bool
|
||||
tcache_boot(void)
|
||||
tcache_boot(tsdn_t *tsdn)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
@ -499,17 +517,17 @@ tcache_boot(void)
|
||||
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
|
||||
* known.
|
||||
*/
|
||||
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
|
||||
if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS)
|
||||
tcache_maxclass = SMALL_MAXCLASS;
|
||||
else if ((1U << opt_lg_tcache_max) > large_maxclass)
|
||||
else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass)
|
||||
tcache_maxclass = large_maxclass;
|
||||
else
|
||||
tcache_maxclass = (1U << opt_lg_tcache_max);
|
||||
tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
|
||||
|
||||
nhbins = size2index(tcache_maxclass) + 1;
|
||||
|
||||
/* Initialize tcache_bin_info. */
|
||||
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
|
||||
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins *
|
||||
sizeof(tcache_bin_info_t));
|
||||
if (tcache_bin_info == NULL)
|
||||
return (true);
|
||||
|
2
deps/jemalloc/src/ticker.c
vendored
Normal file
2
deps/jemalloc/src/ticker.c
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
#define JEMALLOC_TICKER_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
28
deps/jemalloc/src/tsd.c
vendored
28
deps/jemalloc/src/tsd.c
vendored
@ -77,7 +77,7 @@ tsd_cleanup(void *arg)
|
||||
/* Do nothing. */
|
||||
break;
|
||||
case tsd_state_nominal:
|
||||
#define O(n, t) \
|
||||
#define O(n, t) \
|
||||
n##_cleanup(tsd);
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
@ -106,15 +106,17 @@ MALLOC_TSD
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
tsd_t *
|
||||
malloc_tsd_boot0(void)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
|
||||
ncleanups = 0;
|
||||
if (tsd_boot0())
|
||||
return (true);
|
||||
*tsd_arenas_cache_bypassp_get(tsd_fetch()) = true;
|
||||
return (false);
|
||||
return (NULL);
|
||||
tsd = tsd_fetch();
|
||||
*tsd_arenas_tdata_bypassp_get(tsd) = true;
|
||||
return (tsd);
|
||||
}
|
||||
|
||||
void
|
||||
@ -122,7 +124,7 @@ malloc_tsd_boot1(void)
|
||||
{
|
||||
|
||||
tsd_boot1();
|
||||
*tsd_arenas_cache_bypassp_get(tsd_fetch()) = false;
|
||||
*tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
@ -148,13 +150,15 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
|
||||
#ifdef _MSC_VER
|
||||
# ifdef _M_IX86
|
||||
# pragma comment(linker, "/INCLUDE:__tls_used")
|
||||
# pragma comment(linker, "/INCLUDE:_tls_callback")
|
||||
# else
|
||||
# pragma comment(linker, "/INCLUDE:_tls_used")
|
||||
# pragma comment(linker, "/INCLUDE:tls_callback")
|
||||
# endif
|
||||
# pragma section(".CRT$XLY",long,read)
|
||||
#endif
|
||||
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
|
||||
static BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
|
||||
BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
|
||||
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
|
||||
#endif
|
||||
|
||||
@ -167,10 +171,10 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
|
||||
tsd_init_block_t *iter;
|
||||
|
||||
/* Check whether this thread has already inserted into the list. */
|
||||
malloc_mutex_lock(&head->lock);
|
||||
malloc_mutex_lock(TSDN_NULL, &head->lock);
|
||||
ql_foreach(iter, &head->blocks, link) {
|
||||
if (iter->thread == self) {
|
||||
malloc_mutex_unlock(&head->lock);
|
||||
malloc_mutex_unlock(TSDN_NULL, &head->lock);
|
||||
return (iter->data);
|
||||
}
|
||||
}
|
||||
@ -178,7 +182,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
|
||||
ql_elm_new(block, link);
|
||||
block->thread = self;
|
||||
ql_tail_insert(&head->blocks, block, link);
|
||||
malloc_mutex_unlock(&head->lock);
|
||||
malloc_mutex_unlock(TSDN_NULL, &head->lock);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@ -186,8 +190,8 @@ void
|
||||
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&head->lock);
|
||||
malloc_mutex_lock(TSDN_NULL, &head->lock);
|
||||
ql_remove(&head->blocks, block, link);
|
||||
malloc_mutex_unlock(&head->lock);
|
||||
malloc_mutex_unlock(TSDN_NULL, &head->lock);
|
||||
}
|
||||
#endif
|
||||
|
42
deps/jemalloc/src/util.c
vendored
Normal file → Executable file
42
deps/jemalloc/src/util.c
vendored
Normal file → Executable file
@ -1,3 +1,7 @@
|
||||
/*
|
||||
* Define simple versions of assertion macros that won't recurse in case
|
||||
* of assertion failures in malloc_*printf().
|
||||
*/
|
||||
#define assert(e) do { \
|
||||
if (config_debug && !(e)) { \
|
||||
malloc_write("<jemalloc>: Failed assertion\n"); \
|
||||
@ -10,6 +14,7 @@
|
||||
malloc_write("<jemalloc>: Unreachable code reached\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
unreachable(); \
|
||||
} while (0)
|
||||
|
||||
#define not_implemented() do { \
|
||||
@ -44,15 +49,19 @@ static void
|
||||
wrtmessage(void *cbopaque, const char *s)
|
||||
{
|
||||
|
||||
#ifdef SYS_write
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
|
||||
/*
|
||||
* Use syscall(2) rather than write(2) when possible in order to avoid
|
||||
* the possibility of memory allocation within libc. This is necessary
|
||||
* on FreeBSD; most operating systems do not have this problem though.
|
||||
*
|
||||
* syscall() returns long or int, depending on platform, so capture the
|
||||
* unused result in the widest plausible type to avoid compiler
|
||||
* warnings.
|
||||
*/
|
||||
UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
|
||||
UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
|
||||
#else
|
||||
UNUSED int result = write(STDERR_FILENO, s, strlen(s));
|
||||
UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s));
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -82,7 +91,7 @@ buferror(int err, char *buf, size_t buflen)
|
||||
|
||||
#ifdef _WIN32
|
||||
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
|
||||
(LPSTR)buf, buflen, NULL);
|
||||
(LPSTR)buf, (DWORD)buflen, NULL);
|
||||
return (0);
|
||||
#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
|
||||
char *b = strerror_r(err, buf, buflen);
|
||||
@ -191,7 +200,7 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
|
||||
p++;
|
||||
}
|
||||
if (neg)
|
||||
ret = -ret;
|
||||
ret = (uintmax_t)(-((intmax_t)ret));
|
||||
|
||||
if (p == ns) {
|
||||
/* No conversion performed. */
|
||||
@ -306,10 +315,9 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
|
||||
return (s);
|
||||
}
|
||||
|
||||
int
|
||||
size_t
|
||||
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
{
|
||||
int ret;
|
||||
size_t i;
|
||||
const char *f;
|
||||
|
||||
@ -400,6 +408,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
int prec = -1;
|
||||
int width = -1;
|
||||
unsigned char len = '?';
|
||||
char *s;
|
||||
size_t slen;
|
||||
|
||||
f++;
|
||||
/* Flags. */
|
||||
@ -490,8 +500,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
}
|
||||
/* Conversion specifier. */
|
||||
switch (*f) {
|
||||
char *s;
|
||||
size_t slen;
|
||||
case '%':
|
||||
/* %% */
|
||||
APPEND_C(*f);
|
||||
@ -577,20 +585,19 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
str[i] = '\0';
|
||||
else
|
||||
str[size - 1] = '\0';
|
||||
ret = i;
|
||||
|
||||
#undef APPEND_C
|
||||
#undef APPEND_S
|
||||
#undef APPEND_PADDED_S
|
||||
#undef GET_ARG_NUMERIC
|
||||
return (ret);
|
||||
return (i);
|
||||
}
|
||||
|
||||
JEMALLOC_FORMAT_PRINTF(3, 4)
|
||||
int
|
||||
size_t
|
||||
malloc_snprintf(char *str, size_t size, const char *format, ...)
|
||||
{
|
||||
int ret;
|
||||
size_t ret;
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
@ -648,3 +655,12 @@ malloc_printf(const char *format, ...)
|
||||
malloc_vcprintf(NULL, NULL, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore normal assertion macros, in order to make it possible to compile all
|
||||
* C files as a single concatenation.
|
||||
*/
|
||||
#undef assert
|
||||
#undef not_reached
|
||||
#undef not_implemented
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
136
deps/jemalloc/src/witness.c
vendored
Normal file
136
deps/jemalloc/src/witness.c
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
#define JEMALLOC_WITNESS_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
void
|
||||
witness_init(witness_t *witness, const char *name, witness_rank_t rank,
|
||||
witness_comp_t *comp)
|
||||
{
|
||||
|
||||
witness->name = name;
|
||||
witness->rank = rank;
|
||||
witness->comp = comp;
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef witness_lock_error
|
||||
#define witness_lock_error JEMALLOC_N(n_witness_lock_error)
|
||||
#endif
|
||||
void
|
||||
witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
|
||||
{
|
||||
witness_t *w;
|
||||
|
||||
malloc_printf("<jemalloc>: Lock rank order reversal:");
|
||||
ql_foreach(w, witnesses, link) {
|
||||
malloc_printf(" %s(%u)", w->name, w->rank);
|
||||
}
|
||||
malloc_printf(" %s(%u)\n", witness->name, witness->rank);
|
||||
abort();
|
||||
}
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef witness_lock_error
|
||||
#define witness_lock_error JEMALLOC_N(witness_lock_error)
|
||||
witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef witness_owner_error
|
||||
#define witness_owner_error JEMALLOC_N(n_witness_owner_error)
|
||||
#endif
|
||||
void
|
||||
witness_owner_error(const witness_t *witness)
|
||||
{
|
||||
|
||||
malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
|
||||
witness->rank);
|
||||
abort();
|
||||
}
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef witness_owner_error
|
||||
#define witness_owner_error JEMALLOC_N(witness_owner_error)
|
||||
witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef witness_not_owner_error
|
||||
#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error)
|
||||
#endif
|
||||
void
|
||||
witness_not_owner_error(const witness_t *witness)
|
||||
{
|
||||
|
||||
malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
|
||||
witness->rank);
|
||||
abort();
|
||||
}
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef witness_not_owner_error
|
||||
#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
|
||||
witness_not_owner_error_t *witness_not_owner_error =
|
||||
JEMALLOC_N(n_witness_not_owner_error);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef witness_lockless_error
|
||||
#define witness_lockless_error JEMALLOC_N(n_witness_lockless_error)
|
||||
#endif
|
||||
void
|
||||
witness_lockless_error(const witness_list_t *witnesses)
|
||||
{
|
||||
witness_t *w;
|
||||
|
||||
malloc_printf("<jemalloc>: Should not own any locks:");
|
||||
ql_foreach(w, witnesses, link) {
|
||||
malloc_printf(" %s(%u)", w->name, w->rank);
|
||||
}
|
||||
malloc_printf("\n");
|
||||
abort();
|
||||
}
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef witness_lockless_error
|
||||
#define witness_lockless_error JEMALLOC_N(witness_lockless_error)
|
||||
witness_lockless_error_t *witness_lockless_error =
|
||||
JEMALLOC_N(n_witness_lockless_error);
|
||||
#endif
|
||||
|
||||
void
|
||||
witnesses_cleanup(tsd_t *tsd)
|
||||
{
|
||||
|
||||
witness_assert_lockless(tsd_tsdn(tsd));
|
||||
|
||||
/* Do nothing. */
|
||||
}
|
||||
|
||||
void
|
||||
witness_fork_cleanup(tsd_t *tsd)
|
||||
{
|
||||
|
||||
/* Do nothing. */
|
||||
}
|
||||
|
||||
void
|
||||
witness_prefork(tsd_t *tsd)
|
||||
{
|
||||
|
||||
tsd_witness_fork_set(tsd, true);
|
||||
}
|
||||
|
||||
void
|
||||
witness_postfork_parent(tsd_t *tsd)
|
||||
{
|
||||
|
||||
tsd_witness_fork_set(tsd, false);
|
||||
}
|
||||
|
||||
void
|
||||
witness_postfork_child(tsd_t *tsd)
|
||||
{
|
||||
#ifndef JEMALLOC_MUTEX_INIT_CB
|
||||
witness_list_t *witnesses;
|
||||
|
||||
witnesses = tsd_witnessesp_get(tsd);
|
||||
ql_new(witnesses);
|
||||
#endif
|
||||
tsd_witness_fork_set(tsd, false);
|
||||
}
|
198
deps/jemalloc/src/zone.c
vendored
198
deps/jemalloc/src/zone.c
vendored
@ -4,7 +4,7 @@
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The malloc_default_purgeable_zone function is only available on >= 10.6.
|
||||
* The malloc_default_purgeable_zone() function is only available on >= 10.6.
|
||||
* We need to check whether it is present at runtime, thus the weak_import.
|
||||
*/
|
||||
extern malloc_zone_t *malloc_default_purgeable_zone(void)
|
||||
@ -13,8 +13,9 @@ JEMALLOC_ATTR(weak_import);
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
static malloc_zone_t zone;
|
||||
static struct malloc_introspection_t zone_introspect;
|
||||
static malloc_zone_t *default_zone, *purgeable_zone;
|
||||
static malloc_zone_t jemalloc_zone;
|
||||
static struct malloc_introspection_t jemalloc_zone_introspect;
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
@ -56,7 +57,7 @@ zone_size(malloc_zone_t *zone, void *ptr)
|
||||
* not work in practice, we must check all pointers to assure that they
|
||||
* reside within a mapped chunk before determining size.
|
||||
*/
|
||||
return (ivsalloc(ptr, config_prof));
|
||||
return (ivsalloc(tsdn_fetch(), ptr, config_prof));
|
||||
}
|
||||
|
||||
static void *
|
||||
@ -87,7 +88,7 @@ static void
|
||||
zone_free(malloc_zone_t *zone, void *ptr)
|
||||
{
|
||||
|
||||
if (ivsalloc(ptr, config_prof) != 0) {
|
||||
if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) {
|
||||
je_free(ptr);
|
||||
return;
|
||||
}
|
||||
@ -99,7 +100,7 @@ static void *
|
||||
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
|
||||
{
|
||||
|
||||
if (ivsalloc(ptr, config_prof) != 0)
|
||||
if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0)
|
||||
return (je_realloc(ptr, size));
|
||||
|
||||
return (realloc(ptr, size));
|
||||
@ -121,9 +122,11 @@ zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
|
||||
static void
|
||||
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
|
||||
{
|
||||
size_t alloc_size;
|
||||
|
||||
if (ivsalloc(ptr, config_prof) != 0) {
|
||||
assert(ivsalloc(ptr, config_prof) == size);
|
||||
alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof);
|
||||
if (alloc_size != 0) {
|
||||
assert(alloc_size == size);
|
||||
je_free(ptr);
|
||||
return;
|
||||
}
|
||||
@ -162,89 +165,103 @@ static void
|
||||
zone_force_unlock(malloc_zone_t *zone)
|
||||
{
|
||||
|
||||
/*
|
||||
* Call jemalloc_postfork_child() rather than
|
||||
* jemalloc_postfork_parent(), because this function is executed by both
|
||||
* parent and child. The parent can tolerate having state
|
||||
* reinitialized, but the child cannot unlock mutexes that were locked
|
||||
* by the parent.
|
||||
*/
|
||||
if (isthreaded)
|
||||
jemalloc_postfork_parent();
|
||||
jemalloc_postfork_child();
|
||||
}
|
||||
|
||||
JEMALLOC_ATTR(constructor)
|
||||
void
|
||||
register_zone(void)
|
||||
static void
|
||||
zone_init(void)
|
||||
{
|
||||
|
||||
/*
|
||||
* If something else replaced the system default zone allocator, don't
|
||||
* register jemalloc's.
|
||||
*/
|
||||
malloc_zone_t *default_zone = malloc_default_zone();
|
||||
malloc_zone_t *purgeable_zone = NULL;
|
||||
if (!default_zone->zone_name ||
|
||||
strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
zone.size = (void *)zone_size;
|
||||
zone.malloc = (void *)zone_malloc;
|
||||
zone.calloc = (void *)zone_calloc;
|
||||
zone.valloc = (void *)zone_valloc;
|
||||
zone.free = (void *)zone_free;
|
||||
zone.realloc = (void *)zone_realloc;
|
||||
zone.destroy = (void *)zone_destroy;
|
||||
zone.zone_name = "jemalloc_zone";
|
||||
zone.batch_malloc = NULL;
|
||||
zone.batch_free = NULL;
|
||||
zone.introspect = &zone_introspect;
|
||||
zone.version = JEMALLOC_ZONE_VERSION;
|
||||
jemalloc_zone.size = (void *)zone_size;
|
||||
jemalloc_zone.malloc = (void *)zone_malloc;
|
||||
jemalloc_zone.calloc = (void *)zone_calloc;
|
||||
jemalloc_zone.valloc = (void *)zone_valloc;
|
||||
jemalloc_zone.free = (void *)zone_free;
|
||||
jemalloc_zone.realloc = (void *)zone_realloc;
|
||||
jemalloc_zone.destroy = (void *)zone_destroy;
|
||||
jemalloc_zone.zone_name = "jemalloc_zone";
|
||||
jemalloc_zone.batch_malloc = NULL;
|
||||
jemalloc_zone.batch_free = NULL;
|
||||
jemalloc_zone.introspect = &jemalloc_zone_introspect;
|
||||
jemalloc_zone.version = JEMALLOC_ZONE_VERSION;
|
||||
#if (JEMALLOC_ZONE_VERSION >= 5)
|
||||
zone.memalign = zone_memalign;
|
||||
jemalloc_zone.memalign = zone_memalign;
|
||||
#endif
|
||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
||||
zone.free_definite_size = zone_free_definite_size;
|
||||
jemalloc_zone.free_definite_size = zone_free_definite_size;
|
||||
#endif
|
||||
#if (JEMALLOC_ZONE_VERSION >= 8)
|
||||
zone.pressure_relief = NULL;
|
||||
jemalloc_zone.pressure_relief = NULL;
|
||||
#endif
|
||||
|
||||
zone_introspect.enumerator = NULL;
|
||||
zone_introspect.good_size = (void *)zone_good_size;
|
||||
zone_introspect.check = NULL;
|
||||
zone_introspect.print = NULL;
|
||||
zone_introspect.log = NULL;
|
||||
zone_introspect.force_lock = (void *)zone_force_lock;
|
||||
zone_introspect.force_unlock = (void *)zone_force_unlock;
|
||||
zone_introspect.statistics = NULL;
|
||||
jemalloc_zone_introspect.enumerator = NULL;
|
||||
jemalloc_zone_introspect.good_size = (void *)zone_good_size;
|
||||
jemalloc_zone_introspect.check = NULL;
|
||||
jemalloc_zone_introspect.print = NULL;
|
||||
jemalloc_zone_introspect.log = NULL;
|
||||
jemalloc_zone_introspect.force_lock = (void *)zone_force_lock;
|
||||
jemalloc_zone_introspect.force_unlock = (void *)zone_force_unlock;
|
||||
jemalloc_zone_introspect.statistics = NULL;
|
||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
||||
zone_introspect.zone_locked = NULL;
|
||||
jemalloc_zone_introspect.zone_locked = NULL;
|
||||
#endif
|
||||
#if (JEMALLOC_ZONE_VERSION >= 7)
|
||||
zone_introspect.enable_discharge_checking = NULL;
|
||||
zone_introspect.disable_discharge_checking = NULL;
|
||||
zone_introspect.discharge = NULL;
|
||||
#ifdef __BLOCKS__
|
||||
zone_introspect.enumerate_discharged_pointers = NULL;
|
||||
#else
|
||||
zone_introspect.enumerate_unavailable_without_blocks = NULL;
|
||||
#endif
|
||||
jemalloc_zone_introspect.enable_discharge_checking = NULL;
|
||||
jemalloc_zone_introspect.disable_discharge_checking = NULL;
|
||||
jemalloc_zone_introspect.discharge = NULL;
|
||||
# ifdef __BLOCKS__
|
||||
jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
|
||||
# else
|
||||
jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
||||
static malloc_zone_t *
|
||||
zone_default_get(void)
|
||||
{
|
||||
malloc_zone_t **zones = NULL;
|
||||
unsigned int num_zones = 0;
|
||||
|
||||
/*
|
||||
* The default purgeable zone is created lazily by OSX's libc. It uses
|
||||
* the default zone when it is created for "small" allocations
|
||||
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
|
||||
* obviously fails when the default zone is the jemalloc zone, so
|
||||
* malloc_default_purgeable_zone is called beforehand so that the
|
||||
* default purgeable zone is created when the default zone is still
|
||||
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
|
||||
* to check for the existence of malloc_default_purgeable_zone() at
|
||||
* run time.
|
||||
* On OSX 10.12, malloc_default_zone returns a special zone that is not
|
||||
* present in the list of registered zones. That zone uses a "lite zone"
|
||||
* if one is present (apparently enabled when malloc stack logging is
|
||||
* enabled), or the first registered zone otherwise. In practice this
|
||||
* means unless malloc stack logging is enabled, the first registered
|
||||
* zone is the default. So get the list of zones to get the first one,
|
||||
* instead of relying on malloc_default_zone.
|
||||
*/
|
||||
if (malloc_default_purgeable_zone != NULL)
|
||||
purgeable_zone = malloc_default_purgeable_zone();
|
||||
if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
|
||||
(vm_address_t**)&zones, &num_zones)) {
|
||||
/*
|
||||
* Reset the value in case the failure happened after it was
|
||||
* set.
|
||||
*/
|
||||
num_zones = 0;
|
||||
}
|
||||
|
||||
/* Register the custom zone. At this point it won't be the default. */
|
||||
malloc_zone_register(&zone);
|
||||
if (num_zones)
|
||||
return (zones[0]);
|
||||
|
||||
return (malloc_default_zone());
|
||||
}
|
||||
|
||||
/* As written, this function can only promote jemalloc_zone. */
|
||||
static void
|
||||
zone_promote(void)
|
||||
{
|
||||
malloc_zone_t *zone;
|
||||
|
||||
do {
|
||||
default_zone = malloc_default_zone();
|
||||
/*
|
||||
* Unregister and reregister the default zone. On OSX >= 10.6,
|
||||
* unregistering takes the last registered zone and places it
|
||||
@ -255,6 +272,7 @@ register_zone(void)
|
||||
*/
|
||||
malloc_zone_unregister(default_zone);
|
||||
malloc_zone_register(default_zone);
|
||||
|
||||
/*
|
||||
* On OSX 10.6, having the default purgeable zone appear before
|
||||
* the default zone makes some things crash because it thinks it
|
||||
@ -266,9 +284,47 @@ register_zone(void)
|
||||
* above, i.e. the default zone. Registering it again then puts
|
||||
* it at the end, obviously after the default zone.
|
||||
*/
|
||||
if (purgeable_zone) {
|
||||
if (purgeable_zone != NULL) {
|
||||
malloc_zone_unregister(purgeable_zone);
|
||||
malloc_zone_register(purgeable_zone);
|
||||
}
|
||||
} while (malloc_default_zone() != &zone);
|
||||
|
||||
zone = zone_default_get();
|
||||
} while (zone != &jemalloc_zone);
|
||||
}
|
||||
|
||||
JEMALLOC_ATTR(constructor)
|
||||
void
|
||||
zone_register(void)
|
||||
{
|
||||
|
||||
/*
|
||||
* If something else replaced the system default zone allocator, don't
|
||||
* register jemalloc's.
|
||||
*/
|
||||
default_zone = zone_default_get();
|
||||
if (!default_zone->zone_name || strcmp(default_zone->zone_name,
|
||||
"DefaultMallocZone") != 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The default purgeable zone is created lazily by OSX's libc. It uses
|
||||
* the default zone when it is created for "small" allocations
|
||||
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
|
||||
* obviously fails when the default zone is the jemalloc zone, so
|
||||
* malloc_default_purgeable_zone() is called beforehand so that the
|
||||
* default purgeable zone is created when the default zone is still
|
||||
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
|
||||
* to check for the existence of malloc_default_purgeable_zone() at
|
||||
* run time.
|
||||
*/
|
||||
purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
|
||||
malloc_default_purgeable_zone();
|
||||
|
||||
/* Register the custom zone. At this point it won't be the default. */
|
||||
zone_init();
|
||||
malloc_zone_register(&jemalloc_zone);
|
||||
|
||||
/* Promote the custom zone to be default. */
|
||||
zone_promote();
|
||||
}
|
||||
|
Reference in New Issue
Block a user