mirror of
https://github.com/fluencelabs/redis
synced 2025-06-21 13:01:32 +00:00
Revert "Jemalloc updated to 4.4.0."
This reverts commit 153f2f00ea
.
Jemalloc 4.4.0 is apparently causing deadlocks in certain
systems. See for example https://github.com/antirez/redis/issues/3799.
As a cautionary step we are reverting the commit back and
releasing a new stable Redis version.
This commit is contained in:
170
deps/jemalloc/src/tcache.c
vendored
Executable file → Normal file
170
deps/jemalloc/src/tcache.c
vendored
Executable file → Normal file
@ -10,7 +10,7 @@ ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
|
||||
tcache_bin_info_t *tcache_bin_info;
|
||||
static unsigned stack_nelms; /* Total stack elms per tcache. */
|
||||
|
||||
unsigned nhbins;
|
||||
size_t nhbins;
|
||||
size_t tcache_maxclass;
|
||||
|
||||
tcaches_t *tcaches;
|
||||
@ -23,11 +23,10 @@ static tcaches_t *tcaches_avail;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
size_t
|
||||
tcache_salloc(tsdn_t *tsdn, const void *ptr)
|
||||
size_t tcache_salloc(const void *ptr)
|
||||
{
|
||||
|
||||
return (arena_salloc(tsdn, ptr, false));
|
||||
return (arena_salloc(ptr, false));
|
||||
}
|
||||
|
||||
void
|
||||
@ -68,19 +67,20 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
|
||||
tcache->next_gc_bin++;
|
||||
if (tcache->next_gc_bin == nhbins)
|
||||
tcache->next_gc_bin = 0;
|
||||
tcache->ev_cnt = 0;
|
||||
}
|
||||
|
||||
void *
|
||||
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
|
||||
tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, szind_t binind)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
|
||||
arena_tcache_fill_small(arena, tbin, binind, config_prof ?
|
||||
tcache->prof_accumbytes : 0);
|
||||
if (config_prof)
|
||||
tcache->prof_accumbytes = 0;
|
||||
ret = tcache_alloc_easy(tbin, tcache_success);
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@ -102,18 +102,17 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
||||
/* Lock the arena bin associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
*(tbin->avail - 1));
|
||||
tbin->avail[0]);
|
||||
arena_t *bin_arena = extent_node_arena_get(&chunk->node);
|
||||
arena_bin_t *bin = &bin_arena->bins[binind];
|
||||
|
||||
if (config_prof && bin_arena == arena) {
|
||||
if (arena_prof_accum(tsd_tsdn(tsd), arena,
|
||||
tcache->prof_accumbytes))
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
if (arena_prof_accum(arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
if (config_stats && bin_arena == arena) {
|
||||
assert(!merged_stats);
|
||||
merged_stats = true;
|
||||
@ -123,16 +122,16 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
}
|
||||
ndeferred = 0;
|
||||
for (i = 0; i < nflush; i++) {
|
||||
ptr = *(tbin->avail - 1 - i);
|
||||
ptr = tbin->avail[i];
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (extent_node_arena_get(&chunk->node) == bin_arena) {
|
||||
size_t pageind = ((uintptr_t)ptr -
|
||||
(uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_chunk_map_bits_t *bitselm =
|
||||
arena_bitselm_get_mutable(chunk, pageind);
|
||||
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
|
||||
bin_arena, chunk, ptr, bitselm);
|
||||
arena_bitselm_get(chunk, pageind);
|
||||
arena_dalloc_bin_junked_locked(bin_arena, chunk,
|
||||
ptr, bitselm);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
@ -140,12 +139,11 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
* locked. Stash the object, so that it can be
|
||||
* handled in a future pass.
|
||||
*/
|
||||
*(tbin->avail - 1 - ndeferred) = ptr;
|
||||
tbin->avail[ndeferred] = ptr;
|
||||
ndeferred++;
|
||||
}
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||
arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
}
|
||||
if (config_stats && !merged_stats) {
|
||||
/*
|
||||
@ -153,15 +151,15 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
bin->stats.nflushes++;
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
}
|
||||
|
||||
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||
sizeof(void *));
|
||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||
rem * sizeof(void *));
|
||||
tbin->ncached = rem;
|
||||
if ((int)tbin->ncached < tbin->low_water)
|
||||
tbin->low_water = tbin->ncached;
|
||||
@ -184,13 +182,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
||||
/* Lock the arena associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
*(tbin->avail - 1));
|
||||
tbin->avail[0]);
|
||||
arena_t *locked_arena = extent_node_arena_get(&chunk->node);
|
||||
UNUSED bool idump;
|
||||
|
||||
if (config_prof)
|
||||
idump = false;
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
|
||||
malloc_mutex_lock(&locked_arena->lock);
|
||||
if ((config_prof || config_stats) && locked_arena == arena) {
|
||||
if (config_prof) {
|
||||
idump = arena_prof_accum_locked(arena,
|
||||
@ -208,13 +206,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
}
|
||||
ndeferred = 0;
|
||||
for (i = 0; i < nflush; i++) {
|
||||
ptr = *(tbin->avail - 1 - i);
|
||||
ptr = tbin->avail[i];
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (extent_node_arena_get(&chunk->node) ==
|
||||
locked_arena) {
|
||||
arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
|
||||
locked_arena, chunk, ptr);
|
||||
arena_dalloc_large_junked_locked(locked_arena,
|
||||
chunk, ptr);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
@ -222,56 +220,62 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
* Stash the object, so that it can be handled
|
||||
* in a future pass.
|
||||
*/
|
||||
*(tbin->avail - 1 - ndeferred) = ptr;
|
||||
tbin->avail[ndeferred] = ptr;
|
||||
ndeferred++;
|
||||
}
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
|
||||
malloc_mutex_unlock(&locked_arena->lock);
|
||||
if (config_prof && idump)
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
|
||||
ndeferred);
|
||||
prof_idump();
|
||||
}
|
||||
if (config_stats && !merged_stats) {
|
||||
/*
|
||||
* The flush loop didn't happen to flush to this thread's
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
arena->stats.lstats[binind - NBINS].nrequests +=
|
||||
tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||
sizeof(void *));
|
||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||
rem * sizeof(void *));
|
||||
tbin->ncached = rem;
|
||||
if ((int)tbin->ncached < tbin->low_water)
|
||||
tbin->low_water = tbin->ncached;
|
||||
}
|
||||
|
||||
static void
|
||||
tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
|
||||
void
|
||||
tcache_arena_associate(tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
|
||||
if (config_stats) {
|
||||
/* Link into list of extant tcaches. */
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
ql_elm_new(tcache, link);
|
||||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
|
||||
void
|
||||
tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
|
||||
{
|
||||
|
||||
tcache_arena_dissociate(tcache, oldarena);
|
||||
tcache_arena_associate(tcache, newarena);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
|
||||
if (config_stats) {
|
||||
/* Unlink from list of extant tcaches. */
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
if (config_debug) {
|
||||
bool in_ql = false;
|
||||
tcache_t *iter;
|
||||
@ -284,20 +288,11 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
|
||||
assert(in_ql);
|
||||
}
|
||||
ql_remove(&arena->tcache_ql, tcache, link);
|
||||
tcache_stats_merge(tsdn, tcache, arena);
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
tcache_stats_merge(tcache, arena);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
|
||||
arena_t *newarena)
|
||||
{
|
||||
|
||||
tcache_arena_dissociate(tsdn, tcache, oldarena);
|
||||
tcache_arena_associate(tsdn, tcache, newarena);
|
||||
}
|
||||
|
||||
tcache_t *
|
||||
tcache_get_hard(tsd_t *tsd)
|
||||
{
|
||||
@ -311,11 +306,11 @@ tcache_get_hard(tsd_t *tsd)
|
||||
arena = arena_choose(tsd, NULL);
|
||||
if (unlikely(arena == NULL))
|
||||
return (NULL);
|
||||
return (tcache_create(tsd_tsdn(tsd), arena));
|
||||
return (tcache_create(tsd, arena));
|
||||
}
|
||||
|
||||
tcache_t *
|
||||
tcache_create(tsdn_t *tsdn, arena_t *arena)
|
||||
tcache_create(tsd_t *tsd, arena_t *arena)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
size_t size, stack_offset;
|
||||
@ -329,26 +324,18 @@ tcache_create(tsdn_t *tsdn, arena_t *arena)
|
||||
/* Avoid false cacheline sharing. */
|
||||
size = sa2u(size, CACHELINE);
|
||||
|
||||
tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
|
||||
arena_get(TSDN_NULL, 0, true));
|
||||
tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
|
||||
if (tcache == NULL)
|
||||
return (NULL);
|
||||
|
||||
tcache_arena_associate(tsdn, tcache, arena);
|
||||
|
||||
ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
|
||||
tcache_arena_associate(tcache, arena);
|
||||
|
||||
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
|
||||
for (i = 0; i < nhbins; i++) {
|
||||
tcache->tbins[i].lg_fill_div = 1;
|
||||
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
||||
/*
|
||||
* avail points past the available space. Allocations will
|
||||
* access the slots toward higher addresses (for the benefit of
|
||||
* prefetch).
|
||||
*/
|
||||
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
|
||||
(uintptr_t)stack_offset);
|
||||
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
||||
}
|
||||
|
||||
return (tcache);
|
||||
@ -361,7 +348,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
||||
unsigned i;
|
||||
|
||||
arena = arena_choose(tsd, NULL);
|
||||
tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena);
|
||||
tcache_arena_dissociate(tcache, arena);
|
||||
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
@ -369,9 +356,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
||||
|
||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -380,19 +367,19 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
||||
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
||||
|
||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
arena->stats.lstats[i - NBINS].nrequests +=
|
||||
tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
}
|
||||
|
||||
if (config_prof && tcache->prof_accumbytes > 0 &&
|
||||
arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
arena_prof_accum(arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
|
||||
idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
|
||||
idalloctm(tsd, tcache, false, true);
|
||||
}
|
||||
|
||||
void
|
||||
@ -416,22 +403,21 @@ tcache_enabled_cleanup(tsd_t *tsd)
|
||||
/* Do nothing. */
|
||||
}
|
||||
|
||||
/* Caller must own arena->lock. */
|
||||
void
|
||||
tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
|
||||
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
cassert(config_stats);
|
||||
|
||||
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||
|
||||
/* Merge and reset tcache stats. */
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
|
||||
@ -447,12 +433,11 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
|
||||
bool
|
||||
tcaches_create(tsd_t *tsd, unsigned *r_ind)
|
||||
{
|
||||
arena_t *arena;
|
||||
tcache_t *tcache;
|
||||
tcaches_t *elm;
|
||||
|
||||
if (tcaches == NULL) {
|
||||
tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) *
|
||||
tcaches = base_alloc(sizeof(tcache_t *) *
|
||||
(MALLOCX_TCACHE_MAX+1));
|
||||
if (tcaches == NULL)
|
||||
return (true);
|
||||
@ -460,10 +445,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
|
||||
|
||||
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
|
||||
return (true);
|
||||
arena = arena_ichoose(tsd, NULL);
|
||||
if (unlikely(arena == NULL))
|
||||
return (true);
|
||||
tcache = tcache_create(tsd_tsdn(tsd), arena);
|
||||
tcache = tcache_create(tsd, a0get());
|
||||
if (tcache == NULL)
|
||||
return (true);
|
||||
|
||||
@ -471,7 +453,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
|
||||
elm = tcaches_avail;
|
||||
tcaches_avail = tcaches_avail->next;
|
||||
elm->tcache = tcache;
|
||||
*r_ind = (unsigned)(elm - tcaches);
|
||||
*r_ind = elm - tcaches;
|
||||
} else {
|
||||
elm = &tcaches[tcaches_past];
|
||||
elm->tcache = tcache;
|
||||
@ -509,7 +491,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind)
|
||||
}
|
||||
|
||||
bool
|
||||
tcache_boot(tsdn_t *tsdn)
|
||||
tcache_boot(void)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
@ -517,17 +499,17 @@ tcache_boot(tsdn_t *tsdn)
|
||||
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
|
||||
* known.
|
||||
*/
|
||||
if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS)
|
||||
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
|
||||
tcache_maxclass = SMALL_MAXCLASS;
|
||||
else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass)
|
||||
else if ((1U << opt_lg_tcache_max) > large_maxclass)
|
||||
tcache_maxclass = large_maxclass;
|
||||
else
|
||||
tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
|
||||
tcache_maxclass = (1U << opt_lg_tcache_max);
|
||||
|
||||
nhbins = size2index(tcache_maxclass) + 1;
|
||||
|
||||
/* Initialize tcache_bin_info. */
|
||||
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins *
|
||||
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
|
||||
sizeof(tcache_bin_info_t));
|
||||
if (tcache_bin_info == NULL)
|
||||
return (true);
|
||||
|
Reference in New Issue
Block a user