Jemalloc updated to 4.4.0.

The original jemalloc source tree was modified to:

1. Remove the configure error that prevents nested builds.
2. Insert the Redis private Jemalloc API in order to allow the
Redis fragmentation function to work.
This commit is contained in:
antirez
2017-01-30 09:58:34 +01:00
parent ca532c94ef
commit 153f2f00ea
150 changed files with 17254 additions and 6339 deletions

170
deps/jemalloc/src/tcache.c vendored Normal file → Executable file
View File

@ -10,7 +10,7 @@ ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
tcache_bin_info_t *tcache_bin_info;
static unsigned stack_nelms; /* Total stack elms per tcache. */
size_t nhbins;
unsigned nhbins;
size_t tcache_maxclass;
tcaches_t *tcaches;
@ -23,10 +23,11 @@ static tcaches_t *tcaches_avail;
/******************************************************************************/
size_t tcache_salloc(const void *ptr)
size_t
tcache_salloc(tsdn_t *tsdn, const void *ptr)
{
return (arena_salloc(ptr, false));
return (arena_salloc(tsdn, ptr, false));
}
void
@ -67,20 +68,19 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins)
tcache->next_gc_bin = 0;
tcache->ev_cnt = 0;
}
void *
tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind)
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
{
void *ret;
arena_tcache_fill_small(arena, tbin, binind, config_prof ?
arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
ret = tcache_alloc_easy(tbin);
ret = tcache_alloc_easy(tbin, tcache_success);
return (ret);
}
@ -102,17 +102,18 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
*(tbin->avail - 1));
arena_t *bin_arena = extent_node_arena_get(&chunk->node);
arena_bin_t *bin = &bin_arena->bins[binind];
if (config_prof && bin_arena == arena) {
if (arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
if (arena_prof_accum(tsd_tsdn(tsd), arena,
tcache->prof_accumbytes))
prof_idump(tsd_tsdn(tsd));
tcache->prof_accumbytes = 0;
}
malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (config_stats && bin_arena == arena) {
assert(!merged_stats);
merged_stats = true;
@ -122,16 +123,16 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
ptr = *(tbin->avail - 1 - i);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) == bin_arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_bits_t *bitselm =
arena_bitselm_get(chunk, pageind);
arena_dalloc_bin_junked_locked(bin_arena, chunk,
ptr, bitselm);
arena_bitselm_get_mutable(chunk, pageind);
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
bin_arena, chunk, ptr, bitselm);
} else {
/*
* This object was allocated via a different
@ -139,11 +140,12 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* locked. Stash the object, so that it can be
* handled in a future pass.
*/
tbin->avail[ndeferred] = ptr;
*(tbin->avail - 1 - ndeferred) = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&bin->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
}
if (config_stats && !merged_stats) {
/*
@ -151,15 +153,15 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_bin_t *bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&bin->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
@ -182,13 +184,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
*(tbin->avail - 1));
arena_t *locked_arena = extent_node_arena_get(&chunk->node);
UNUSED bool idump;
if (config_prof)
idump = false;
malloc_mutex_lock(&locked_arena->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) {
idump = arena_prof_accum_locked(arena,
@ -206,13 +208,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
ptr = *(tbin->avail - 1 - i);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) ==
locked_arena) {
arena_dalloc_large_junked_locked(locked_arena,
chunk, ptr);
arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
locked_arena, chunk, ptr);
} else {
/*
* This object was allocated via a different
@ -220,62 +222,56 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
* Stash the object, so that it can be handled
* in a future pass.
*/
tbin->avail[ndeferred] = ptr;
*(tbin->avail - 1 - ndeferred) = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&locked_arena->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
if (config_prof && idump)
prof_idump();
prof_idump(tsd_tsdn(tsd));
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
ndeferred);
}
if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
void
tcache_arena_associate(tcache_t *tcache, arena_t *arena)
static void
tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
}
}
void
tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
{
tcache_arena_dissociate(tcache, oldarena);
tcache_arena_associate(tcache, newarena);
}
void
tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
static void
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
if (config_debug) {
bool in_ql = false;
tcache_t *iter;
@ -288,11 +284,20 @@ tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
assert(in_ql);
}
ql_remove(&arena->tcache_ql, tcache, link);
tcache_stats_merge(tcache, arena);
malloc_mutex_unlock(&arena->lock);
tcache_stats_merge(tsdn, tcache, arena);
malloc_mutex_unlock(tsdn, &arena->lock);
}
}
void
tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
arena_t *newarena)
{
tcache_arena_dissociate(tsdn, tcache, oldarena);
tcache_arena_associate(tsdn, tcache, newarena);
}
tcache_t *
tcache_get_hard(tsd_t *tsd)
{
@ -306,11 +311,11 @@ tcache_get_hard(tsd_t *tsd)
arena = arena_choose(tsd, NULL);
if (unlikely(arena == NULL))
return (NULL);
return (tcache_create(tsd, arena));
return (tcache_create(tsd_tsdn(tsd), arena));
}
tcache_t *
tcache_create(tsd_t *tsd, arena_t *arena)
tcache_create(tsdn_t *tsdn, arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
@ -324,18 +329,26 @@ tcache_create(tsd_t *tsd, arena_t *arena)
/* Avoid false cacheline sharing. */
size = sa2u(size, CACHELINE);
tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
arena_get(TSDN_NULL, 0, true));
if (tcache == NULL)
return (NULL);
tcache_arena_associate(tcache, arena);
tcache_arena_associate(tsdn, tcache, arena);
ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
for (i = 0; i < nhbins; i++) {
tcache->tbins[i].lg_fill_div = 1;
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
/*
* avail points past the available space. Allocations will
* access the slots toward higher addresses (for the benefit of
* prefetch).
*/
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
(uintptr_t)stack_offset);
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
}
return (tcache);
@ -348,7 +361,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
unsigned i;
arena = arena_choose(tsd, NULL);
tcache_arena_dissociate(tcache, arena);
tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena);
for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
@ -356,9 +369,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
if (config_stats && tbin->tstats.nrequests != 0) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
}
@ -367,19 +380,19 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
}
if (config_prof && tcache->prof_accumbytes > 0 &&
arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
prof_idump(tsd_tsdn(tsd));
idalloctm(tsd, tcache, false, true);
idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
}
void
@ -403,21 +416,22 @@ tcache_enabled_cleanup(tsd_t *tsd)
/* Do nothing. */
}
/* Caller must own arena->lock. */
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
{
unsigned i;
cassert(config_stats);
malloc_mutex_assert_owner(tsdn, &arena->lock);
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i];
malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsdn, &bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
malloc_mutex_unlock(tsdn, &bin->lock);
tbin->tstats.nrequests = 0;
}
@ -433,11 +447,12 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
bool
tcaches_create(tsd_t *tsd, unsigned *r_ind)
{
arena_t *arena;
tcache_t *tcache;
tcaches_t *elm;
if (tcaches == NULL) {
tcaches = base_alloc(sizeof(tcache_t *) *
tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) *
(MALLOCX_TCACHE_MAX+1));
if (tcaches == NULL)
return (true);
@ -445,7 +460,10 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
return (true);
tcache = tcache_create(tsd, a0get());
arena = arena_ichoose(tsd, NULL);
if (unlikely(arena == NULL))
return (true);
tcache = tcache_create(tsd_tsdn(tsd), arena);
if (tcache == NULL)
return (true);
@ -453,7 +471,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
elm = tcaches_avail;
tcaches_avail = tcaches_avail->next;
elm->tcache = tcache;
*r_ind = elm - tcaches;
*r_ind = (unsigned)(elm - tcaches);
} else {
elm = &tcaches[tcaches_past];
elm->tcache = tcache;
@ -491,7 +509,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind)
}
bool
tcache_boot(void)
tcache_boot(tsdn_t *tsdn)
{
unsigned i;
@ -499,17 +517,17 @@ tcache_boot(void)
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
* known.
*/
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
else if ((1U << opt_lg_tcache_max) > large_maxclass)
else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass)
tcache_maxclass = large_maxclass;
else
tcache_maxclass = (1U << opt_lg_tcache_max);
tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
nhbins = size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins *
sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL)
return (true);