mirror of
https://github.com/fluencelabs/redis
synced 2025-06-23 22:11:33 +00:00
Jemalloc updated to 4.4.0.
The original jemalloc source tree was modified to: 1. Remove the configure error that prevents nested builds. 2. Insert the Redis private Jemalloc API in order to allow the Redis fragmentation function to work.
This commit is contained in:
428
deps/jemalloc/src/chunk.c
vendored
428
deps/jemalloc/src/chunk.c
vendored
@ -49,9 +49,10 @@ const chunk_hooks_t chunk_hooks_default = {
|
||||
* definition.
|
||||
*/
|
||||
|
||||
static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *chunk, size_t size, bool zeroed, bool committed);
|
||||
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
|
||||
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
|
||||
bool zeroed, bool committed);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
@ -63,23 +64,23 @@ chunk_hooks_get_locked(arena_t *arena)
|
||||
}
|
||||
|
||||
chunk_hooks_t
|
||||
chunk_hooks_get(arena_t *arena)
|
||||
chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
|
||||
{
|
||||
chunk_hooks_t chunk_hooks;
|
||||
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||
chunk_hooks = chunk_hooks_get_locked(arena);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
|
||||
return (chunk_hooks);
|
||||
}
|
||||
|
||||
chunk_hooks_t
|
||||
chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
|
||||
chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
|
||||
{
|
||||
chunk_hooks_t old_chunk_hooks;
|
||||
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||
old_chunk_hooks = arena->chunk_hooks;
|
||||
/*
|
||||
* Copy each field atomically so that it is impossible for readers to
|
||||
@ -104,14 +105,14 @@ chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
|
||||
ATOMIC_COPY_HOOK(split);
|
||||
ATOMIC_COPY_HOOK(merge);
|
||||
#undef ATOMIC_COPY_HOOK
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
|
||||
return (old_chunk_hooks);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
bool locked)
|
||||
chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks, bool locked)
|
||||
{
|
||||
static const chunk_hooks_t uninitialized_hooks =
|
||||
CHUNK_HOOKS_INITIALIZER;
|
||||
@ -119,27 +120,28 @@ chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
|
||||
0) {
|
||||
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
|
||||
chunk_hooks_get(arena);
|
||||
chunk_hooks_get(tsdn, arena);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_hooks_assure_initialized_locked(arena_t *arena,
|
||||
chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
|
||||
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
|
||||
chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
|
||||
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_register(const void *chunk, const extent_node_t *node)
|
||||
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
|
||||
{
|
||||
|
||||
assert(extent_node_addr_get(node) == chunk);
|
||||
@ -159,7 +161,7 @@ chunk_register(const void *chunk, const extent_node_t *node)
|
||||
high = atomic_read_z(&highchunks);
|
||||
}
|
||||
if (cur > high && prof_gdump_get_unlocked())
|
||||
prof_gdump();
|
||||
prof_gdump(tsdn);
|
||||
}
|
||||
|
||||
return (false);
|
||||
@ -181,33 +183,35 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
|
||||
}
|
||||
|
||||
/*
|
||||
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best
|
||||
* fits.
|
||||
* Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
|
||||
* best fits.
|
||||
*/
|
||||
static extent_node_t *
|
||||
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, size_t size)
|
||||
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
|
||||
{
|
||||
extent_node_t key;
|
||||
|
||||
assert(size == CHUNK_CEILING(size));
|
||||
|
||||
extent_node_init(&key, arena, NULL, size, false, false);
|
||||
return (extent_tree_szad_nsearch(chunks_szad, &key));
|
||||
extent_node_init(&key, arena, NULL, size, 0, false, false);
|
||||
return (extent_tree_szsnad_nsearch(chunks_szsnad, &key));
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
||||
bool dalloc_node)
|
||||
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
|
||||
bool *commit, bool dalloc_node)
|
||||
{
|
||||
void *ret;
|
||||
extent_node_t *node;
|
||||
size_t alloc_size, leadsize, trailsize;
|
||||
bool zeroed, committed;
|
||||
|
||||
assert(CHUNK_CEILING(size) == size);
|
||||
assert(alignment > 0);
|
||||
assert(new_addr == NULL || alignment == chunksize);
|
||||
assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
|
||||
/*
|
||||
* Cached chunks use the node linkage embedded in their headers, in
|
||||
* which case dalloc_node is true, and new_addr is non-NULL because
|
||||
@ -215,24 +219,23 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
*/
|
||||
assert(dalloc_node || new_addr != NULL);
|
||||
|
||||
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
||||
alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
|
||||
/* Beware size_t wrap-around. */
|
||||
if (alloc_size < size)
|
||||
return (NULL);
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
||||
if (new_addr != NULL) {
|
||||
extent_node_t key;
|
||||
extent_node_init(&key, arena, new_addr, alloc_size, false,
|
||||
extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
|
||||
false);
|
||||
node = extent_tree_ad_search(chunks_ad, &key);
|
||||
} else {
|
||||
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
|
||||
alloc_size);
|
||||
node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
|
||||
}
|
||||
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
|
||||
size)) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
|
||||
@ -241,6 +244,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
assert(extent_node_size_get(node) >= leadsize + size);
|
||||
trailsize = extent_node_size_get(node) - leadsize - size;
|
||||
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
||||
*sn = extent_node_sn_get(node);
|
||||
zeroed = extent_node_zeroed_get(node);
|
||||
if (zeroed)
|
||||
*zero = true;
|
||||
@ -251,17 +255,17 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
if (leadsize != 0 &&
|
||||
chunk_hooks->split(extent_node_addr_get(node),
|
||||
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
/* Remove node from the tree. */
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
extent_tree_szsnad_remove(chunks_szsnad, node);
|
||||
extent_tree_ad_remove(chunks_ad, node);
|
||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
||||
if (leadsize != 0) {
|
||||
/* Insert the leading space as a smaller chunk. */
|
||||
extent_node_size_set(node, leadsize);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
node = NULL;
|
||||
@ -271,41 +275,42 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
if (chunk_hooks->split(ret, size + trailsize, size,
|
||||
trailsize, false, arena->ind)) {
|
||||
if (dalloc_node && node != NULL)
|
||||
arena_node_dalloc(arena, node);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
|
||||
cache, ret, size + trailsize, zeroed, committed);
|
||||
arena_node_dalloc(tsdn, arena, node);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
|
||||
chunks_ad, cache, ret, size + trailsize, *sn,
|
||||
zeroed, committed);
|
||||
return (NULL);
|
||||
}
|
||||
/* Insert the trailing space as a smaller chunk. */
|
||||
if (node == NULL) {
|
||||
node = arena_node_alloc(arena);
|
||||
node = arena_node_alloc(tsdn, arena);
|
||||
if (node == NULL) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_record(arena, chunk_hooks, chunks_szad,
|
||||
chunks_ad, cache, ret, size + trailsize,
|
||||
zeroed, committed);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
chunk_record(tsdn, arena, chunk_hooks,
|
||||
chunks_szsnad, chunks_ad, cache, ret, size
|
||||
+ trailsize, *sn, zeroed, committed);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
|
||||
trailsize, zeroed, committed);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
trailsize, *sn, zeroed, committed);
|
||||
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
node = NULL;
|
||||
}
|
||||
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
|
||||
ret, size, zeroed, committed);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
|
||||
cache, ret, size, *sn, zeroed, committed);
|
||||
return (NULL);
|
||||
}
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
|
||||
assert(dalloc_node || node != NULL);
|
||||
if (dalloc_node && node != NULL)
|
||||
arena_node_dalloc(arena, node);
|
||||
arena_node_dalloc(tsdn, arena, node);
|
||||
if (*zero) {
|
||||
if (!zeroed)
|
||||
memset(ret, 0, size);
|
||||
@ -313,10 +318,11 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
size_t i;
|
||||
size_t *p = (size_t *)(uintptr_t)ret;
|
||||
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
||||
for (i = 0; i < size / sizeof(size_t); i++)
|
||||
assert(p[i] == 0);
|
||||
}
|
||||
if (config_valgrind)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
@ -328,39 +334,29 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
* them if they are returned.
|
||||
*/
|
||||
static void *
|
||||
chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero, bool *commit, dss_prec_t dss_prec)
|
||||
chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
|
||||
{
|
||||
void *ret;
|
||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
/* Retained. */
|
||||
if ((ret = chunk_recycle(arena, &chunk_hooks,
|
||||
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
|
||||
new_addr, size, alignment, zero, commit, true)) != NULL)
|
||||
return (ret);
|
||||
|
||||
/* "primary" dss. */
|
||||
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
||||
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
|
||||
NULL)
|
||||
return (ret);
|
||||
/*
|
||||
* mmap. Requesting an address is not implemented for
|
||||
* chunk_alloc_mmap(), so only call it if (new_addr == NULL).
|
||||
*/
|
||||
if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
|
||||
chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
|
||||
commit)) != NULL)
|
||||
return (ret);
|
||||
/* mmap. */
|
||||
if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
|
||||
NULL)
|
||||
return (ret);
|
||||
/* "secondary" dss. */
|
||||
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
||||
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
|
||||
NULL)
|
||||
chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
|
||||
commit)) != NULL)
|
||||
return (ret);
|
||||
|
||||
/* All strategies for allocation failed. */
|
||||
@ -380,7 +376,7 @@ chunk_alloc_base(size_t size)
|
||||
*/
|
||||
zero = true;
|
||||
commit = true;
|
||||
ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
|
||||
ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
if (config_valgrind)
|
||||
@ -390,37 +386,33 @@ chunk_alloc_base(size_t size)
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
||||
size_t size, size_t alignment, bool *zero, bool dalloc_node)
|
||||
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
|
||||
bool *commit, bool dalloc_node)
|
||||
{
|
||||
void *ret;
|
||||
bool commit;
|
||||
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
commit = true;
|
||||
ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
|
||||
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
|
||||
&commit, dalloc_node);
|
||||
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
||||
&arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
|
||||
new_addr, size, alignment, sn, zero, commit, dalloc_node);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
assert(commit);
|
||||
if (config_valgrind)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static arena_t *
|
||||
chunk_arena_get(unsigned arena_ind)
|
||||
chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
|
||||
{
|
||||
arena_t *arena;
|
||||
|
||||
/* Dodge tsd for a0 in order to avoid bootstrapping issues. */
|
||||
arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
|
||||
false, true);
|
||||
arena = arena_get(tsdn, arena_ind, false);
|
||||
/*
|
||||
* The arena we're allocating on behalf of must have been initialized
|
||||
* already.
|
||||
@ -430,14 +422,12 @@ chunk_arena_get(unsigned arena_ind)
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
bool *commit, unsigned arena_ind)
|
||||
chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
|
||||
size_t size, size_t alignment, bool *zero, bool *commit)
|
||||
{
|
||||
void *ret;
|
||||
arena_t *arena;
|
||||
|
||||
arena = chunk_arena_get(arena_ind);
|
||||
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
|
||||
ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
|
||||
commit, arena->dss_prec);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
@ -447,26 +437,80 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
||||
size_t size, size_t alignment, bool *zero, bool *commit)
|
||||
static void *
|
||||
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
bool *commit, unsigned arena_ind)
|
||||
{
|
||||
tsdn_t *tsdn;
|
||||
arena_t *arena;
|
||||
|
||||
tsdn = tsdn_fetch();
|
||||
arena = chunk_arena_get(tsdn, arena_ind);
|
||||
|
||||
return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
|
||||
zero, commit));
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
|
||||
bool *commit)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
|
||||
arena->ind);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
||||
&arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
|
||||
new_addr, size, alignment, sn, zero, commit, true);
|
||||
|
||||
if (config_stats && ret != NULL)
|
||||
arena->stats.retained -= size;
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
|
||||
bool *commit)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||
|
||||
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
|
||||
alignment, sn, zero, commit);
|
||||
if (ret == NULL) {
|
||||
if (chunk_hooks->alloc == chunk_alloc_default) {
|
||||
/* Call directly to propagate tsdn. */
|
||||
ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
|
||||
size, alignment, zero, commit);
|
||||
} else {
|
||||
ret = chunk_hooks->alloc(new_addr, size, alignment,
|
||||
zero, commit, arena->ind);
|
||||
}
|
||||
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
|
||||
*sn = arena_extent_sn_next(arena);
|
||||
|
||||
if (config_valgrind && chunk_hooks->alloc !=
|
||||
chunk_alloc_default)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *chunk, size_t size, bool zeroed, bool committed)
|
||||
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
|
||||
{
|
||||
bool unzeroed;
|
||||
extent_node_t *node, *prev;
|
||||
@ -476,9 +520,9 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
unzeroed = cache || !zeroed;
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
||||
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
|
||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
||||
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
|
||||
false, false);
|
||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||
/* Try to coalesce forward. */
|
||||
@ -490,19 +534,21 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
/*
|
||||
* Coalesce chunk with the following address range. This does
|
||||
* not change the position within chunks_ad, so only
|
||||
* remove/insert from/into chunks_szad.
|
||||
* remove/insert from/into chunks_szsnad.
|
||||
*/
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
extent_tree_szsnad_remove(chunks_szsnad, node);
|
||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
||||
extent_node_addr_set(node, chunk);
|
||||
extent_node_size_set(node, size + extent_node_size_get(node));
|
||||
if (sn < extent_node_sn_get(node))
|
||||
extent_node_sn_set(node, sn);
|
||||
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
||||
!unzeroed);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
} else {
|
||||
/* Coalescing forward failed, so insert a new node. */
|
||||
node = arena_node_alloc(arena);
|
||||
node = arena_node_alloc(tsdn, arena);
|
||||
if (node == NULL) {
|
||||
/*
|
||||
* Node allocation failed, which is an exceedingly
|
||||
@ -511,15 +557,15 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
* a virtual memory leak.
|
||||
*/
|
||||
if (cache) {
|
||||
chunk_purge_wrapper(arena, chunk_hooks, chunk,
|
||||
size, 0, size);
|
||||
chunk_purge_wrapper(tsdn, arena, chunk_hooks,
|
||||
chunk, size, 0, size);
|
||||
}
|
||||
goto label_return;
|
||||
}
|
||||
extent_node_init(node, arena, chunk, size, !unzeroed,
|
||||
extent_node_init(node, arena, chunk, size, sn, !unzeroed,
|
||||
committed);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
}
|
||||
|
||||
@ -533,31 +579,33 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
/*
|
||||
* Coalesce chunk with the previous address range. This does
|
||||
* not change the position within chunks_ad, so only
|
||||
* remove/insert node from/into chunks_szad.
|
||||
* remove/insert node from/into chunks_szsnad.
|
||||
*/
|
||||
extent_tree_szad_remove(chunks_szad, prev);
|
||||
extent_tree_szsnad_remove(chunks_szsnad, prev);
|
||||
extent_tree_ad_remove(chunks_ad, prev);
|
||||
arena_chunk_cache_maybe_remove(arena, prev, cache);
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
extent_tree_szsnad_remove(chunks_szsnad, node);
|
||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
||||
extent_node_addr_set(node, extent_node_addr_get(prev));
|
||||
extent_node_size_set(node, extent_node_size_get(prev) +
|
||||
extent_node_size_get(node));
|
||||
if (extent_node_sn_get(prev) < extent_node_sn_get(node))
|
||||
extent_node_sn_set(node, extent_node_sn_get(prev));
|
||||
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
|
||||
extent_node_zeroed_get(node));
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
|
||||
arena_node_dalloc(arena, prev);
|
||||
arena_node_dalloc(tsdn, arena, prev);
|
||||
}
|
||||
|
||||
label_return:
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, bool committed)
|
||||
chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *chunk, size_t size, size_t sn, bool committed)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
@ -565,24 +613,49 @@ chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
|
||||
&arena->chunks_ad_cached, true, chunk, size, false, committed);
|
||||
arena_maybe_purge(arena);
|
||||
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
|
||||
&arena->chunks_ad_cached, true, chunk, size, sn, false,
|
||||
committed);
|
||||
arena_maybe_purge(tsdn, arena);
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_dalloc_default_impl(void *chunk, size_t size)
|
||||
{
|
||||
|
||||
if (!have_dss || !chunk_in_dss(chunk))
|
||||
return (chunk_dalloc_mmap(chunk, size));
|
||||
return (true);
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_dalloc_default(void *chunk, size_t size, bool committed,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
|
||||
return (chunk_dalloc_default_impl(chunk, size));
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, bool zeroed, bool committed)
|
||||
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
|
||||
{
|
||||
bool err;
|
||||
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||
/* Try to deallocate. */
|
||||
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
|
||||
if (chunk_hooks->dalloc == chunk_dalloc_default) {
|
||||
/* Call directly to propagate tsdn. */
|
||||
err = chunk_dalloc_default_impl(chunk, size);
|
||||
} else
|
||||
err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
|
||||
|
||||
if (!err)
|
||||
return;
|
||||
/* Try to decommit; purge if that fails. */
|
||||
if (committed) {
|
||||
@ -591,29 +664,12 @@ chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
}
|
||||
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
|
||||
arena->ind);
|
||||
chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
|
||||
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
|
||||
}
|
||||
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
|
||||
&arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
|
||||
committed);
|
||||
|
||||
static bool
|
||||
chunk_dalloc_default(void *chunk, size_t size, bool committed,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
|
||||
if (!have_dss || !chunk_in_dss(chunk))
|
||||
return (chunk_dalloc_mmap(chunk, size));
|
||||
return (true);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, bool committed)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
chunk_hooks->dalloc(chunk, size, committed, arena->ind);
|
||||
if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
if (config_stats)
|
||||
arena->stats.retained += size;
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -634,8 +690,9 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
|
||||
length));
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
|
||||
static bool
|
||||
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
@ -648,21 +705,12 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
|
||||
length));
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
|
||||
return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
|
||||
length));
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, size_t offset, size_t length)
|
||||
chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *chunk, size_t size, size_t offset, size_t length)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
|
||||
}
|
||||
|
||||
@ -677,23 +725,30 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
||||
bool committed, unsigned arena_ind)
|
||||
chunk_merge_default_impl(void *chunk_a, void *chunk_b)
|
||||
{
|
||||
|
||||
if (!maps_coalesce)
|
||||
return (true);
|
||||
if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
|
||||
if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b))
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
||||
bool committed, unsigned arena_ind)
|
||||
{
|
||||
|
||||
return (chunk_merge_default_impl(chunk_a, chunk_b));
|
||||
}
|
||||
|
||||
static rtree_node_elm_t *
|
||||
chunks_rtree_node_alloc(size_t nelms)
|
||||
{
|
||||
|
||||
return ((rtree_node_elm_t *)base_alloc(nelms *
|
||||
return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
|
||||
sizeof(rtree_node_elm_t)));
|
||||
}
|
||||
|
||||
@ -716,7 +771,7 @@ chunk_boot(void)
|
||||
* so pages_map will always take fast path.
|
||||
*/
|
||||
if (!opt_lg_chunk) {
|
||||
opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
|
||||
opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
|
||||
- 1;
|
||||
}
|
||||
#else
|
||||
@ -730,32 +785,11 @@ chunk_boot(void)
|
||||
chunksize_mask = chunksize - 1;
|
||||
chunk_npages = (chunksize >> LG_PAGE);
|
||||
|
||||
if (have_dss && chunk_dss_boot())
|
||||
return (true);
|
||||
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk, chunks_rtree_node_alloc, NULL))
|
||||
if (have_dss)
|
||||
chunk_dss_boot();
|
||||
if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk), chunks_rtree_node_alloc, NULL))
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_prefork(void)
|
||||
{
|
||||
|
||||
chunk_dss_prefork();
|
||||
}
|
||||
|
||||
void
|
||||
chunk_postfork_parent(void)
|
||||
{
|
||||
|
||||
chunk_dss_postfork_parent();
|
||||
}
|
||||
|
||||
void
|
||||
chunk_postfork_child(void)
|
||||
{
|
||||
|
||||
chunk_dss_postfork_child();
|
||||
}
|
||||
|
Reference in New Issue
Block a user