mirror of
https://github.com/fluencelabs/redis
synced 2025-06-18 03:31:21 +00:00
Jemalloc updated to 3.6.0.
Not a single bug in about 3 months, and our previous version was too old (3.2.0).
This commit is contained in:
108
deps/jemalloc/src/chunk.c
vendored
108
deps/jemalloc/src/chunk.c
vendored
@ -78,6 +78,9 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
||||
assert(node->size >= leadsize + size);
|
||||
trailsize = node->size - leadsize - size;
|
||||
ret = (void *)((uintptr_t)node->addr + leadsize);
|
||||
zeroed = node->zeroed;
|
||||
if (zeroed)
|
||||
*zero = true;
|
||||
/* Remove node from the tree. */
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
extent_tree_ad_remove(chunks_ad, node);
|
||||
@ -108,23 +111,26 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
||||
}
|
||||
node->addr = (void *)((uintptr_t)(ret) + size);
|
||||
node->size = trailsize;
|
||||
node->zeroed = zeroed;
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
node = NULL;
|
||||
}
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
|
||||
zeroed = false;
|
||||
if (node != NULL) {
|
||||
if (node->zeroed) {
|
||||
zeroed = true;
|
||||
*zero = true;
|
||||
}
|
||||
if (node != NULL)
|
||||
base_node_dealloc(node);
|
||||
}
|
||||
if (zeroed == false && *zero) {
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
memset(ret, 0, size);
|
||||
if (*zero) {
|
||||
if (zeroed == false)
|
||||
memset(ret, 0, size);
|
||||
else if (config_debug) {
|
||||
size_t i;
|
||||
size_t *p = (size_t *)(uintptr_t)ret;
|
||||
|
||||
VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
||||
for (i = 0; i < size / sizeof(size_t); i++)
|
||||
assert(p[i] == 0);
|
||||
}
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
@ -172,35 +178,32 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
||||
/* All strategies for allocation failed. */
|
||||
ret = NULL;
|
||||
label_return:
|
||||
if (config_ivsalloc && base == false && ret != NULL) {
|
||||
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
|
||||
chunk_dealloc(ret, size, true);
|
||||
return (NULL);
|
||||
if (ret != NULL) {
|
||||
if (config_ivsalloc && base == false) {
|
||||
if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
|
||||
chunk_dealloc(ret, size, true);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
if ((config_stats || config_prof) && ret != NULL) {
|
||||
bool gdump;
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
if (config_stats)
|
||||
stats_chunks.nchunks += (size / chunksize);
|
||||
stats_chunks.curchunks += (size / chunksize);
|
||||
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
||||
stats_chunks.highchunks = stats_chunks.curchunks;
|
||||
if (config_prof)
|
||||
gdump = true;
|
||||
} else if (config_prof)
|
||||
gdump = false;
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
if (config_prof && opt_prof && opt_prof_gdump && gdump)
|
||||
prof_gdump();
|
||||
}
|
||||
if (config_debug && *zero && ret != NULL) {
|
||||
size_t i;
|
||||
size_t *p = (size_t *)(uintptr_t)ret;
|
||||
|
||||
VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
||||
for (i = 0; i < size / sizeof(size_t); i++)
|
||||
assert(p[i] == 0);
|
||||
if (config_stats || config_prof) {
|
||||
bool gdump;
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
if (config_stats)
|
||||
stats_chunks.nchunks += (size / chunksize);
|
||||
stats_chunks.curchunks += (size / chunksize);
|
||||
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
||||
stats_chunks.highchunks =
|
||||
stats_chunks.curchunks;
|
||||
if (config_prof)
|
||||
gdump = true;
|
||||
} else if (config_prof)
|
||||
gdump = false;
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
if (config_prof && opt_prof && opt_prof_gdump && gdump)
|
||||
prof_gdump();
|
||||
}
|
||||
if (config_valgrind)
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
}
|
||||
assert(CHUNK_ADDR2BASE(ret) == ret);
|
||||
return (ret);
|
||||
@ -211,9 +214,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
size_t size)
|
||||
{
|
||||
bool unzeroed;
|
||||
extent_node_t *xnode, *node, *prev, key;
|
||||
extent_node_t *xnode, *node, *prev, *xprev, key;
|
||||
|
||||
unzeroed = pages_purge(chunk, size);
|
||||
VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
|
||||
/*
|
||||
* Allocate a node before acquiring chunks_mtx even though it might not
|
||||
@ -222,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
* held.
|
||||
*/
|
||||
xnode = base_node_alloc();
|
||||
/* Use xprev to implement conditional deferred deallocation of prev. */
|
||||
xprev = NULL;
|
||||
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
key.addr = (void *)((uintptr_t)chunk + size);
|
||||
@ -238,8 +244,6 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
node->size += size;
|
||||
node->zeroed = (node->zeroed && (unzeroed == false));
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
if (xnode != NULL)
|
||||
base_node_dealloc(xnode);
|
||||
} else {
|
||||
/* Coalescing forward failed, so insert a new node. */
|
||||
if (xnode == NULL) {
|
||||
@ -249,10 +253,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
* already been purged, so this is only a virtual
|
||||
* memory leak.
|
||||
*/
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
return;
|
||||
goto label_return;
|
||||
}
|
||||
node = xnode;
|
||||
xnode = NULL; /* Prevent deallocation below. */
|
||||
node->addr = chunk;
|
||||
node->size = size;
|
||||
node->zeroed = (unzeroed == false);
|
||||
@ -278,9 +282,19 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
node->zeroed = (node->zeroed && prev->zeroed);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
|
||||
base_node_dealloc(prev);
|
||||
xprev = prev;
|
||||
}
|
||||
|
||||
label_return:
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
/*
|
||||
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
|
||||
* avoid potential deadlock.
|
||||
*/
|
||||
if (xnode != NULL)
|
||||
base_node_dealloc(xnode);
|
||||
if (xprev != NULL)
|
||||
base_node_dealloc(xprev);
|
||||
}
|
||||
|
||||
void
|
||||
@ -307,7 +321,7 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
if (config_ivsalloc)
|
||||
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
|
||||
rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
|
||||
if (config_stats || config_prof) {
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
assert(stats_chunks.curchunks >= (size / chunksize));
|
||||
@ -342,7 +356,7 @@ chunk_boot(void)
|
||||
extent_tree_ad_new(&chunks_ad_dss);
|
||||
if (config_ivsalloc) {
|
||||
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk);
|
||||
opt_lg_chunk, base_alloc, NULL);
|
||||
if (chunks_rtree == NULL)
|
||||
return (true);
|
||||
}
|
||||
@ -354,7 +368,7 @@ void
|
||||
chunk_prefork(void)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
malloc_mutex_prefork(&chunks_mtx);
|
||||
if (config_ivsalloc)
|
||||
rtree_prefork(chunks_rtree);
|
||||
chunk_dss_prefork();
|
||||
|
Reference in New Issue
Block a user