mirror of
https://github.com/fluencelabs/redis
synced 2025-06-18 19:51:22 +00:00
Jemalloc updated to 3.6.0.
Not a single bug in about 3 months, and our previous version was too old (3.2.0).
This commit is contained in:
1167
deps/jemalloc/src/arena.c
vendored
1167
deps/jemalloc/src/arena.c
vendored
File diff suppressed because it is too large
Load Diff
3
deps/jemalloc/src/base.c
vendored
3
deps/jemalloc/src/base.c
vendored
@ -63,6 +63,7 @@ base_alloc(size_t size)
|
||||
ret = base_next_addr;
|
||||
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@ -88,6 +89,7 @@ base_node_alloc(void)
|
||||
ret = base_nodes;
|
||||
base_nodes = *(extent_node_t **)ret;
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
|
||||
} else {
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
|
||||
@ -100,6 +102,7 @@ void
|
||||
base_node_dealloc(extent_node_t *node)
|
||||
{
|
||||
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
*(extent_node_t **)node = base_nodes;
|
||||
base_nodes = node;
|
||||
|
2
deps/jemalloc/src/bitmap.c
vendored
2
deps/jemalloc/src/bitmap.c
vendored
@ -1,4 +1,4 @@
|
||||
#define JEMALLOC_BITMAP_C_
|
||||
#define JEMALLOC_BITMAP_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
|
108
deps/jemalloc/src/chunk.c
vendored
108
deps/jemalloc/src/chunk.c
vendored
@ -78,6 +78,9 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
||||
assert(node->size >= leadsize + size);
|
||||
trailsize = node->size - leadsize - size;
|
||||
ret = (void *)((uintptr_t)node->addr + leadsize);
|
||||
zeroed = node->zeroed;
|
||||
if (zeroed)
|
||||
*zero = true;
|
||||
/* Remove node from the tree. */
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
extent_tree_ad_remove(chunks_ad, node);
|
||||
@ -108,23 +111,26 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
||||
}
|
||||
node->addr = (void *)((uintptr_t)(ret) + size);
|
||||
node->size = trailsize;
|
||||
node->zeroed = zeroed;
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
node = NULL;
|
||||
}
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
|
||||
zeroed = false;
|
||||
if (node != NULL) {
|
||||
if (node->zeroed) {
|
||||
zeroed = true;
|
||||
*zero = true;
|
||||
}
|
||||
if (node != NULL)
|
||||
base_node_dealloc(node);
|
||||
}
|
||||
if (zeroed == false && *zero) {
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
memset(ret, 0, size);
|
||||
if (*zero) {
|
||||
if (zeroed == false)
|
||||
memset(ret, 0, size);
|
||||
else if (config_debug) {
|
||||
size_t i;
|
||||
size_t *p = (size_t *)(uintptr_t)ret;
|
||||
|
||||
VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
||||
for (i = 0; i < size / sizeof(size_t); i++)
|
||||
assert(p[i] == 0);
|
||||
}
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
@ -172,35 +178,32 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
||||
/* All strategies for allocation failed. */
|
||||
ret = NULL;
|
||||
label_return:
|
||||
if (config_ivsalloc && base == false && ret != NULL) {
|
||||
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
|
||||
chunk_dealloc(ret, size, true);
|
||||
return (NULL);
|
||||
if (ret != NULL) {
|
||||
if (config_ivsalloc && base == false) {
|
||||
if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
|
||||
chunk_dealloc(ret, size, true);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
if ((config_stats || config_prof) && ret != NULL) {
|
||||
bool gdump;
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
if (config_stats)
|
||||
stats_chunks.nchunks += (size / chunksize);
|
||||
stats_chunks.curchunks += (size / chunksize);
|
||||
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
||||
stats_chunks.highchunks = stats_chunks.curchunks;
|
||||
if (config_prof)
|
||||
gdump = true;
|
||||
} else if (config_prof)
|
||||
gdump = false;
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
if (config_prof && opt_prof && opt_prof_gdump && gdump)
|
||||
prof_gdump();
|
||||
}
|
||||
if (config_debug && *zero && ret != NULL) {
|
||||
size_t i;
|
||||
size_t *p = (size_t *)(uintptr_t)ret;
|
||||
|
||||
VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
||||
for (i = 0; i < size / sizeof(size_t); i++)
|
||||
assert(p[i] == 0);
|
||||
if (config_stats || config_prof) {
|
||||
bool gdump;
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
if (config_stats)
|
||||
stats_chunks.nchunks += (size / chunksize);
|
||||
stats_chunks.curchunks += (size / chunksize);
|
||||
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
||||
stats_chunks.highchunks =
|
||||
stats_chunks.curchunks;
|
||||
if (config_prof)
|
||||
gdump = true;
|
||||
} else if (config_prof)
|
||||
gdump = false;
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
if (config_prof && opt_prof && opt_prof_gdump && gdump)
|
||||
prof_gdump();
|
||||
}
|
||||
if (config_valgrind)
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
}
|
||||
assert(CHUNK_ADDR2BASE(ret) == ret);
|
||||
return (ret);
|
||||
@ -211,9 +214,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
size_t size)
|
||||
{
|
||||
bool unzeroed;
|
||||
extent_node_t *xnode, *node, *prev, key;
|
||||
extent_node_t *xnode, *node, *prev, *xprev, key;
|
||||
|
||||
unzeroed = pages_purge(chunk, size);
|
||||
VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
|
||||
/*
|
||||
* Allocate a node before acquiring chunks_mtx even though it might not
|
||||
@ -222,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
* held.
|
||||
*/
|
||||
xnode = base_node_alloc();
|
||||
/* Use xprev to implement conditional deferred deallocation of prev. */
|
||||
xprev = NULL;
|
||||
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
key.addr = (void *)((uintptr_t)chunk + size);
|
||||
@ -238,8 +244,6 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
node->size += size;
|
||||
node->zeroed = (node->zeroed && (unzeroed == false));
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
if (xnode != NULL)
|
||||
base_node_dealloc(xnode);
|
||||
} else {
|
||||
/* Coalescing forward failed, so insert a new node. */
|
||||
if (xnode == NULL) {
|
||||
@ -249,10 +253,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
* already been purged, so this is only a virtual
|
||||
* memory leak.
|
||||
*/
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
return;
|
||||
goto label_return;
|
||||
}
|
||||
node = xnode;
|
||||
xnode = NULL; /* Prevent deallocation below. */
|
||||
node->addr = chunk;
|
||||
node->size = size;
|
||||
node->zeroed = (unzeroed == false);
|
||||
@ -278,9 +282,19 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
node->zeroed = (node->zeroed && prev->zeroed);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
|
||||
base_node_dealloc(prev);
|
||||
xprev = prev;
|
||||
}
|
||||
|
||||
label_return:
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
/*
|
||||
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
|
||||
* avoid potential deadlock.
|
||||
*/
|
||||
if (xnode != NULL)
|
||||
base_node_dealloc(xnode);
|
||||
if (xprev != NULL)
|
||||
base_node_dealloc(xprev);
|
||||
}
|
||||
|
||||
void
|
||||
@ -307,7 +321,7 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
if (config_ivsalloc)
|
||||
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
|
||||
rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
|
||||
if (config_stats || config_prof) {
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
assert(stats_chunks.curchunks >= (size / chunksize));
|
||||
@ -342,7 +356,7 @@ chunk_boot(void)
|
||||
extent_tree_ad_new(&chunks_ad_dss);
|
||||
if (config_ivsalloc) {
|
||||
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk);
|
||||
opt_lg_chunk, base_alloc, NULL);
|
||||
if (chunks_rtree == NULL)
|
||||
return (true);
|
||||
}
|
||||
@ -354,7 +368,7 @@ void
|
||||
chunk_prefork(void)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
malloc_mutex_prefork(&chunks_mtx);
|
||||
if (config_ivsalloc)
|
||||
rtree_prefork(chunks_rtree);
|
||||
chunk_dss_prefork();
|
||||
|
15
deps/jemalloc/src/chunk_dss.c
vendored
15
deps/jemalloc/src/chunk_dss.c
vendored
@ -28,16 +28,17 @@ static void *dss_max;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
#ifndef JEMALLOC_HAVE_SBRK
|
||||
static void *
|
||||
sbrk(intptr_t increment)
|
||||
chunk_dss_sbrk(intptr_t increment)
|
||||
{
|
||||
|
||||
#ifdef JEMALLOC_HAVE_SBRK
|
||||
return (sbrk(increment));
|
||||
#else
|
||||
not_implemented();
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
dss_prec_t
|
||||
chunk_dss_prec_get(void)
|
||||
@ -93,7 +94,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
||||
*/
|
||||
do {
|
||||
/* Get the current end of the DSS. */
|
||||
dss_max = sbrk(0);
|
||||
dss_max = chunk_dss_sbrk(0);
|
||||
/*
|
||||
* Calculate how much padding is necessary to
|
||||
* chunk-align the end of the DSS.
|
||||
@ -117,7 +118,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
||||
return (NULL);
|
||||
}
|
||||
incr = gap_size + cpad_size + size;
|
||||
dss_prev = sbrk(incr);
|
||||
dss_prev = chunk_dss_sbrk(incr);
|
||||
if (dss_prev == dss_max) {
|
||||
/* Success. */
|
||||
dss_max = dss_next;
|
||||
@ -163,7 +164,7 @@ chunk_dss_boot(void)
|
||||
|
||||
if (malloc_mutex_init(&dss_mtx))
|
||||
return (true);
|
||||
dss_base = sbrk(0);
|
||||
dss_base = chunk_dss_sbrk(0);
|
||||
dss_prev = dss_base;
|
||||
dss_max = dss_base;
|
||||
|
||||
|
4
deps/jemalloc/src/chunk_mmap.c
vendored
4
deps/jemalloc/src/chunk_mmap.c
vendored
@ -43,7 +43,7 @@ pages_map(void *addr, size_t size)
|
||||
if (munmap(ret, size) == -1) {
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(buf, sizeof(buf));
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc: Error in munmap(): %s\n",
|
||||
buf);
|
||||
if (opt_abort)
|
||||
@ -69,7 +69,7 @@ pages_unmap(void *addr, size_t size)
|
||||
{
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(buf, sizeof(buf));
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc>: Error in "
|
||||
#ifdef _WIN32
|
||||
"VirtualFree"
|
||||
|
98
deps/jemalloc/src/ckh.c
vendored
98
deps/jemalloc/src/ckh.c
vendored
@ -49,7 +49,7 @@ static void ckh_shrink(ckh_t *ckh);
|
||||
* Search bucket for key and return the cell number if found; SIZE_T_MAX
|
||||
* otherwise.
|
||||
*/
|
||||
JEMALLOC_INLINE size_t
|
||||
JEMALLOC_INLINE_C size_t
|
||||
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
|
||||
{
|
||||
ckhc_t *cell;
|
||||
@ -67,28 +67,28 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
|
||||
/*
|
||||
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
|
||||
*/
|
||||
JEMALLOC_INLINE size_t
|
||||
JEMALLOC_INLINE_C size_t
|
||||
ckh_isearch(ckh_t *ckh, const void *key)
|
||||
{
|
||||
size_t hash1, hash2, bucket, cell;
|
||||
size_t hashes[2], bucket, cell;
|
||||
|
||||
assert(ckh != NULL);
|
||||
|
||||
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
|
||||
ckh->hash(key, hashes);
|
||||
|
||||
/* Search primary bucket. */
|
||||
bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
cell = ckh_bucket_search(ckh, bucket, key);
|
||||
if (cell != SIZE_T_MAX)
|
||||
return (cell);
|
||||
|
||||
/* Search secondary bucket. */
|
||||
bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
cell = ckh_bucket_search(ckh, bucket, key);
|
||||
return (cell);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
JEMALLOC_INLINE_C bool
|
||||
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
||||
const void *data)
|
||||
{
|
||||
@ -120,13 +120,13 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
||||
* eviction/relocation procedure until either success or detection of an
|
||||
* eviction/relocation bucket cycle.
|
||||
*/
|
||||
JEMALLOC_INLINE bool
|
||||
JEMALLOC_INLINE_C bool
|
||||
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
void const **argdata)
|
||||
{
|
||||
const void *key, *data, *tkey, *tdata;
|
||||
ckhc_t *cell;
|
||||
size_t hash1, hash2, bucket, tbucket;
|
||||
size_t hashes[2], bucket, tbucket;
|
||||
unsigned i;
|
||||
|
||||
bucket = argbucket;
|
||||
@ -155,10 +155,11 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
#endif
|
||||
|
||||
/* Find the alternate bucket for the evicted item. */
|
||||
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
|
||||
tbucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
ckh->hash(key, hashes);
|
||||
tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
if (tbucket == bucket) {
|
||||
tbucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
|
||||
- 1);
|
||||
/*
|
||||
* It may be that (tbucket == bucket) still, if the
|
||||
* item's hashes both indicate this bucket. However,
|
||||
@ -189,22 +190,22 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
JEMALLOC_INLINE_C bool
|
||||
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
|
||||
{
|
||||
size_t hash1, hash2, bucket;
|
||||
size_t hashes[2], bucket;
|
||||
const void *key = *argkey;
|
||||
const void *data = *argdata;
|
||||
|
||||
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
|
||||
ckh->hash(key, hashes);
|
||||
|
||||
/* Try to insert in primary bucket. */
|
||||
bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
|
||||
return (false);
|
||||
|
||||
/* Try to insert in secondary bucket. */
|
||||
bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
|
||||
return (false);
|
||||
|
||||
@ -218,7 +219,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
|
||||
* Try to rebuild the hash table from scratch by inserting all items from the
|
||||
* old table into the new.
|
||||
*/
|
||||
JEMALLOC_INLINE bool
|
||||
JEMALLOC_INLINE_C bool
|
||||
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
|
||||
{
|
||||
size_t count, i, nins;
|
||||
@ -417,9 +418,8 @@ ckh_delete(ckh_t *ckh)
|
||||
#endif
|
||||
|
||||
idalloc(ckh->tab);
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
memset(ckh, 0x5a, sizeof(ckh_t));
|
||||
#endif
|
||||
if (config_debug)
|
||||
memset(ckh, 0x5a, sizeof(ckh_t));
|
||||
}
|
||||
|
||||
size_t
|
||||
@ -526,31 +526,10 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
||||
}
|
||||
|
||||
void
|
||||
ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
|
||||
ckh_string_hash(const void *key, size_t r_hash[2])
|
||||
{
|
||||
size_t ret1, ret2;
|
||||
uint64_t h;
|
||||
|
||||
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
|
||||
assert(hash1 != NULL);
|
||||
assert(hash2 != NULL);
|
||||
|
||||
h = hash(key, strlen((const char *)key), UINT64_C(0x94122f335b332aea));
|
||||
if (minbits <= 32) {
|
||||
/*
|
||||
* Avoid doing multiple hashes, since a single hash provides
|
||||
* enough bits.
|
||||
*/
|
||||
ret1 = h & ZU(0xffffffffU);
|
||||
ret2 = h >> 32;
|
||||
} else {
|
||||
ret1 = h;
|
||||
ret2 = hash(key, strlen((const char *)key),
|
||||
UINT64_C(0x8432a476666bbc13));
|
||||
}
|
||||
|
||||
*hash1 = ret1;
|
||||
*hash2 = ret2;
|
||||
hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
|
||||
}
|
||||
|
||||
bool
|
||||
@ -564,41 +543,16 @@ ckh_string_keycomp(const void *k1, const void *k2)
|
||||
}
|
||||
|
||||
void
|
||||
ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
|
||||
size_t *hash2)
|
||||
ckh_pointer_hash(const void *key, size_t r_hash[2])
|
||||
{
|
||||
size_t ret1, ret2;
|
||||
uint64_t h;
|
||||
union {
|
||||
const void *v;
|
||||
uint64_t i;
|
||||
size_t i;
|
||||
} u;
|
||||
|
||||
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
|
||||
assert(hash1 != NULL);
|
||||
assert(hash2 != NULL);
|
||||
|
||||
assert(sizeof(u.v) == sizeof(u.i));
|
||||
#if (LG_SIZEOF_PTR != LG_SIZEOF_INT)
|
||||
u.i = 0;
|
||||
#endif
|
||||
u.v = key;
|
||||
h = hash(&u.i, sizeof(u.i), UINT64_C(0xd983396e68886082));
|
||||
if (minbits <= 32) {
|
||||
/*
|
||||
* Avoid doing multiple hashes, since a single hash provides
|
||||
* enough bits.
|
||||
*/
|
||||
ret1 = h & ZU(0xffffffffU);
|
||||
ret2 = h >> 32;
|
||||
} else {
|
||||
assert(SIZEOF_PTR == 8);
|
||||
ret1 = h;
|
||||
ret2 = hash(&u.i, sizeof(u.i), UINT64_C(0x5e2be9aff8709a5d));
|
||||
}
|
||||
|
||||
*hash1 = ret1;
|
||||
*hash2 = ret2;
|
||||
hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
|
||||
}
|
||||
|
||||
bool
|
||||
|
353
deps/jemalloc/src/ctl.c
vendored
353
deps/jemalloc/src/ctl.c
vendored
@ -546,43 +546,30 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
|
||||
static bool
|
||||
ctl_grow(void)
|
||||
{
|
||||
size_t astats_size;
|
||||
ctl_arena_stats_t *astats;
|
||||
arena_t **tarenas;
|
||||
|
||||
/* Extend arena stats and arenas arrays. */
|
||||
astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t);
|
||||
if (ctl_stats.narenas == narenas_auto) {
|
||||
/* ctl_stats.arenas and arenas came from base_alloc(). */
|
||||
astats = (ctl_arena_stats_t *)imalloc(astats_size);
|
||||
if (astats == NULL)
|
||||
return (true);
|
||||
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
|
||||
sizeof(ctl_arena_stats_t));
|
||||
|
||||
tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
|
||||
sizeof(arena_t *));
|
||||
if (tarenas == NULL) {
|
||||
idalloc(astats);
|
||||
return (true);
|
||||
}
|
||||
memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *));
|
||||
} else {
|
||||
astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas,
|
||||
astats_size, 0, 0, false, false);
|
||||
if (astats == NULL)
|
||||
return (true);
|
||||
|
||||
tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) *
|
||||
sizeof(arena_t *), 0, 0, false, false);
|
||||
if (tarenas == NULL)
|
||||
return (true);
|
||||
}
|
||||
/* Initialize the new astats and arenas elements. */
|
||||
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
|
||||
if (ctl_arena_init(&astats[ctl_stats.narenas + 1]))
|
||||
/* Allocate extended arena stats and arenas arrays. */
|
||||
astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) *
|
||||
sizeof(ctl_arena_stats_t));
|
||||
if (astats == NULL)
|
||||
return (true);
|
||||
tarenas[ctl_stats.narenas] = NULL;
|
||||
tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
|
||||
sizeof(arena_t *));
|
||||
if (tarenas == NULL) {
|
||||
idalloc(astats);
|
||||
return (true);
|
||||
}
|
||||
|
||||
/* Initialize the new astats element. */
|
||||
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
|
||||
sizeof(ctl_arena_stats_t));
|
||||
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
|
||||
if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
|
||||
idalloc(tarenas);
|
||||
idalloc(astats);
|
||||
return (true);
|
||||
}
|
||||
/* Swap merged stats to their new location. */
|
||||
{
|
||||
ctl_arena_stats_t tstats;
|
||||
@ -593,13 +580,34 @@ ctl_grow(void)
|
||||
memcpy(&astats[ctl_stats.narenas + 1], &tstats,
|
||||
sizeof(ctl_arena_stats_t));
|
||||
}
|
||||
/* Initialize the new arenas element. */
|
||||
tarenas[ctl_stats.narenas] = NULL;
|
||||
{
|
||||
arena_t **arenas_old = arenas;
|
||||
/*
|
||||
* Swap extended arenas array into place. Although ctl_mtx
|
||||
* protects this function from other threads extending the
|
||||
* array, it does not protect from other threads mutating it
|
||||
* (i.e. initializing arenas and setting array elements to
|
||||
* point to them). Therefore, array copying must happen under
|
||||
* the protection of arenas_lock.
|
||||
*/
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
arenas = tarenas;
|
||||
memcpy(arenas, arenas_old, ctl_stats.narenas *
|
||||
sizeof(arena_t *));
|
||||
narenas_total++;
|
||||
arenas_extend(narenas_total - 1);
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
/*
|
||||
* Deallocate arenas_old only if it came from imalloc() (not
|
||||
* base_alloc()).
|
||||
*/
|
||||
if (ctl_stats.narenas != narenas_auto)
|
||||
idalloc(arenas_old);
|
||||
}
|
||||
ctl_stats.arenas = astats;
|
||||
ctl_stats.narenas++;
|
||||
malloc_mutex_lock(&arenas_lock);
|
||||
arenas = tarenas;
|
||||
narenas_total++;
|
||||
arenas_extend(narenas_total - 1);
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
|
||||
return (false);
|
||||
}
|
||||
@ -921,7 +929,7 @@ void
|
||||
ctl_prefork(void)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
malloc_mutex_prefork(&ctl_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
@ -960,11 +968,11 @@ ctl_postfork_child(void)
|
||||
if (*oldlenp != sizeof(t)) { \
|
||||
size_t copylen = (sizeof(t) <= *oldlenp) \
|
||||
? sizeof(t) : *oldlenp; \
|
||||
memcpy(oldp, (void *)&v, copylen); \
|
||||
memcpy(oldp, (void *)&(v), copylen); \
|
||||
ret = EINVAL; \
|
||||
goto label_return; \
|
||||
} else \
|
||||
*(t *)oldp = v; \
|
||||
*(t *)oldp = (v); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
@ -974,7 +982,7 @@ ctl_postfork_child(void)
|
||||
ret = EINVAL; \
|
||||
goto label_return; \
|
||||
} \
|
||||
v = *(t *)newp; \
|
||||
(v) = *(t *)newp; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
@ -995,7 +1003,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
|
||||
if (l) \
|
||||
malloc_mutex_lock(&ctl_mtx); \
|
||||
READONLY(); \
|
||||
oldval = v; \
|
||||
oldval = (v); \
|
||||
READ(oldval, t); \
|
||||
\
|
||||
ret = 0; \
|
||||
@ -1017,7 +1025,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
|
||||
return (ENOENT); \
|
||||
malloc_mutex_lock(&ctl_mtx); \
|
||||
READONLY(); \
|
||||
oldval = v; \
|
||||
oldval = (v); \
|
||||
READ(oldval, t); \
|
||||
\
|
||||
ret = 0; \
|
||||
@ -1036,7 +1044,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
|
||||
\
|
||||
malloc_mutex_lock(&ctl_mtx); \
|
||||
READONLY(); \
|
||||
oldval = v; \
|
||||
oldval = (v); \
|
||||
READ(oldval, t); \
|
||||
\
|
||||
ret = 0; \
|
||||
@ -1060,7 +1068,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
|
||||
if ((c) == false) \
|
||||
return (ENOENT); \
|
||||
READONLY(); \
|
||||
oldval = v; \
|
||||
oldval = (v); \
|
||||
READ(oldval, t); \
|
||||
\
|
||||
ret = 0; \
|
||||
@ -1077,7 +1085,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
|
||||
t oldval; \
|
||||
\
|
||||
READONLY(); \
|
||||
oldval = v; \
|
||||
oldval = (v); \
|
||||
READ(oldval, t); \
|
||||
\
|
||||
ret = 0; \
|
||||
@ -1102,6 +1110,8 @@ label_return: \
|
||||
return (ret); \
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
|
||||
|
||||
static int
|
||||
@ -1109,7 +1119,7 @@ epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
uint64_t newval;
|
||||
UNUSED uint64_t newval;
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
WRITE(newval, uint64_t);
|
||||
@ -1123,49 +1133,52 @@ label_return:
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
bool oldval;
|
||||
/******************************************************************************/
|
||||
|
||||
if (config_tcache == false)
|
||||
return (ENOENT);
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_debug)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_dss)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_fill)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_mremap)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_munmap)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_prof)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_stats)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_tcache)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_tls)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_utrace)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
|
||||
|
||||
oldval = tcache_enabled_get();
|
||||
if (newp != NULL) {
|
||||
if (newlen != sizeof(bool)) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
tcache_enabled_set(*(bool *)newp);
|
||||
}
|
||||
READ(oldval, bool);
|
||||
/******************************************************************************/
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
return (ret);
|
||||
}
|
||||
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
|
||||
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
|
||||
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
|
||||
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
|
||||
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
|
||||
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
|
||||
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
|
||||
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
|
||||
CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
|
||||
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
|
||||
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
|
||||
CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
|
||||
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
|
||||
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
|
||||
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
|
||||
CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
|
||||
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
|
||||
|
||||
static int
|
||||
thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (config_tcache == false)
|
||||
return (ENOENT);
|
||||
|
||||
READONLY();
|
||||
WRITEONLY();
|
||||
|
||||
tcache_flush();
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
return (ret);
|
||||
}
|
||||
/******************************************************************************/
|
||||
|
||||
static int
|
||||
thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
@ -1227,50 +1240,49 @@ CTL_RO_NL_CGEN(config_stats, thread_deallocated,
|
||||
CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
|
||||
&thread_allocated_tsd_get()->deallocated, uint64_t *)
|
||||
|
||||
/******************************************************************************/
|
||||
static int
|
||||
thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
bool oldval;
|
||||
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_debug)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_dss)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_fill)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_mremap)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_munmap)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_prof)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_stats)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_tcache)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_tls)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_utrace)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
|
||||
if (config_tcache == false)
|
||||
return (ENOENT);
|
||||
|
||||
/******************************************************************************/
|
||||
oldval = tcache_enabled_get();
|
||||
if (newp != NULL) {
|
||||
if (newlen != sizeof(bool)) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
tcache_enabled_set(*(bool *)newp);
|
||||
}
|
||||
READ(oldval, bool);
|
||||
|
||||
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
|
||||
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
|
||||
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
|
||||
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
|
||||
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
|
||||
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
|
||||
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
|
||||
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
|
||||
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
|
||||
CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
|
||||
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
|
||||
CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
|
||||
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
|
||||
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
|
||||
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
|
||||
CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
|
||||
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
|
||||
ret = 0;
|
||||
label_return:
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (config_tcache == false)
|
||||
return (ENOENT);
|
||||
|
||||
READONLY();
|
||||
WRITEONLY();
|
||||
|
||||
tcache_flush();
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
@ -1382,31 +1394,8 @@ label_return:
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
|
||||
static const ctl_named_node_t *
|
||||
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
{
|
||||
|
||||
if (i > NBINS)
|
||||
return (NULL);
|
||||
return (super_arenas_bin_i_node);
|
||||
}
|
||||
|
||||
CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
|
||||
static const ctl_named_node_t *
|
||||
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
{
|
||||
|
||||
if (i > nlclasses)
|
||||
return (NULL);
|
||||
return (super_arenas_lrun_i_node);
|
||||
}
|
||||
|
||||
static int
|
||||
arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen)
|
||||
@ -1460,7 +1449,28 @@ CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
|
||||
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
|
||||
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
|
||||
CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
|
||||
static const ctl_named_node_t *
|
||||
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
{
|
||||
|
||||
if (i > NBINS)
|
||||
return (NULL);
|
||||
return (super_arenas_bin_i_node);
|
||||
}
|
||||
|
||||
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
|
||||
CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
|
||||
static const ctl_named_node_t *
|
||||
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
{
|
||||
|
||||
if (i > nlclasses)
|
||||
return (NULL);
|
||||
return (super_arenas_lrun_i_node);
|
||||
}
|
||||
|
||||
static int
|
||||
arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
@ -1492,6 +1502,7 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
unsigned narenas;
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
READONLY();
|
||||
@ -1499,7 +1510,8 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
ret = EAGAIN;
|
||||
goto label_return;
|
||||
}
|
||||
READ(ctl_stats.narenas - 1, unsigned);
|
||||
narenas = ctl_stats.narenas - 1;
|
||||
READ(narenas, unsigned);
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
@ -1565,6 +1577,11 @@ CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
|
||||
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
|
||||
size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
|
||||
@ -1572,6 +1589,20 @@ CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
|
||||
|
||||
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
|
||||
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
|
||||
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
|
||||
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
|
||||
ctl_stats.arenas[mib[2]].astats.mapped, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
|
||||
ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
|
||||
ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
|
||||
ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
|
||||
ctl_stats.arenas[mib[2]].allocated_small, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
|
||||
@ -1635,19 +1666,6 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
|
||||
return (super_stats_arenas_i_lruns_j_node);
|
||||
}
|
||||
|
||||
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
|
||||
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
|
||||
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
|
||||
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
|
||||
ctl_stats.arenas[mib[2]].astats.mapped, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
|
||||
ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
|
||||
ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
|
||||
ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
|
||||
|
||||
static const ctl_named_node_t *
|
||||
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
{
|
||||
@ -1664,8 +1682,3 @@ label_return:
|
||||
malloc_mutex_unlock(&ctl_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
|
||||
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
|
||||
|
82
deps/jemalloc/src/huge.c
vendored
82
deps/jemalloc/src/huge.c
vendored
@ -16,14 +16,14 @@ malloc_mutex_t huge_mtx;
|
||||
static extent_tree_t huge;
|
||||
|
||||
void *
|
||||
huge_malloc(size_t size, bool zero)
|
||||
huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
|
||||
{
|
||||
|
||||
return (huge_palloc(size, chunksize, zero));
|
||||
return (huge_palloc(size, chunksize, zero, dss_prec));
|
||||
}
|
||||
|
||||
void *
|
||||
huge_palloc(size_t size, size_t alignment, bool zero)
|
||||
huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
|
||||
{
|
||||
void *ret;
|
||||
size_t csize;
|
||||
@ -48,8 +48,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
|
||||
* it is possible to make correct junk/zero fill decisions below.
|
||||
*/
|
||||
is_zeroed = zero;
|
||||
ret = chunk_alloc(csize, alignment, false, &is_zeroed,
|
||||
chunk_dss_prec_get());
|
||||
ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
|
||||
if (ret == NULL) {
|
||||
base_node_dealloc(node);
|
||||
return (NULL);
|
||||
@ -78,7 +77,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
bool
|
||||
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
|
||||
{
|
||||
|
||||
@ -89,28 +88,23 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
|
||||
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
|
||||
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
|
||||
assert(CHUNK_CEILING(oldsize) == oldsize);
|
||||
if (config_fill && opt_junk && size < oldsize) {
|
||||
memset((void *)((uintptr_t)ptr + size), 0x5a,
|
||||
oldsize - size);
|
||||
}
|
||||
return (ptr);
|
||||
return (false);
|
||||
}
|
||||
|
||||
/* Reallocation would require a move. */
|
||||
return (NULL);
|
||||
return (true);
|
||||
}
|
||||
|
||||
void *
|
||||
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero, bool try_tcache_dalloc)
|
||||
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
|
||||
{
|
||||
void *ret;
|
||||
size_t copysize;
|
||||
|
||||
/* Try to avoid moving the allocation. */
|
||||
ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
|
||||
if (ret != NULL)
|
||||
return (ret);
|
||||
if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
|
||||
return (ptr);
|
||||
|
||||
/*
|
||||
* size and oldsize are different enough that we need to use a
|
||||
@ -118,18 +112,18 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
* space and copying.
|
||||
*/
|
||||
if (alignment > chunksize)
|
||||
ret = huge_palloc(size + extra, alignment, zero);
|
||||
ret = huge_palloc(size + extra, alignment, zero, dss_prec);
|
||||
else
|
||||
ret = huge_malloc(size + extra, zero);
|
||||
ret = huge_malloc(size + extra, zero, dss_prec);
|
||||
|
||||
if (ret == NULL) {
|
||||
if (extra == 0)
|
||||
return (NULL);
|
||||
/* Try again, this time without extra. */
|
||||
if (alignment > chunksize)
|
||||
ret = huge_palloc(size, alignment, zero);
|
||||
ret = huge_palloc(size, alignment, zero, dss_prec);
|
||||
else
|
||||
ret = huge_malloc(size, zero);
|
||||
ret = huge_malloc(size, zero, dss_prec);
|
||||
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
@ -169,23 +163,56 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
*/
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(buf, sizeof(buf));
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc>: Error in mremap(): %s\n",
|
||||
buf);
|
||||
if (opt_abort)
|
||||
abort();
|
||||
memcpy(ret, ptr, copysize);
|
||||
chunk_dealloc_mmap(ptr, oldsize);
|
||||
} else if (config_fill && zero == false && opt_junk && oldsize
|
||||
< newsize) {
|
||||
/*
|
||||
* mremap(2) clobbers the original mapping, so
|
||||
* junk/zero filling is not preserved. There is no
|
||||
* need to zero fill here, since any trailing
|
||||
* uninititialized memory is demand-zeroed by the
|
||||
* kernel, but junk filling must be redone.
|
||||
*/
|
||||
memset(ret + oldsize, 0xa5, newsize - oldsize);
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
memcpy(ret, ptr, copysize);
|
||||
iqallocx(ptr, try_tcache_dalloc);
|
||||
iqalloct(ptr, try_tcache_dalloc);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef huge_dalloc_junk
|
||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
|
||||
#endif
|
||||
static void
|
||||
huge_dalloc_junk(void *ptr, size_t usize)
|
||||
{
|
||||
|
||||
if (config_fill && config_dss && opt_junk) {
|
||||
/*
|
||||
* Only bother junk filling if the chunk isn't about to be
|
||||
* unmapped.
|
||||
*/
|
||||
if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
|
||||
memset(ptr, 0x5a, usize);
|
||||
}
|
||||
}
|
||||
#ifdef JEMALLOC_JET
|
||||
#undef huge_dalloc_junk
|
||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
|
||||
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
||||
#endif
|
||||
|
||||
void
|
||||
huge_dalloc(void *ptr, bool unmap)
|
||||
{
|
||||
@ -208,8 +235,8 @@ huge_dalloc(void *ptr, bool unmap)
|
||||
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
if (unmap && config_fill && config_dss && opt_junk)
|
||||
memset(node->addr, 0x5a, node->size);
|
||||
if (unmap)
|
||||
huge_dalloc_junk(node->addr, node->size);
|
||||
|
||||
chunk_dealloc(node->addr, node->size, unmap);
|
||||
|
||||
@ -236,6 +263,13 @@ huge_salloc(const void *ptr)
|
||||
return (ret);
|
||||
}
|
||||
|
||||
dss_prec_t
|
||||
huge_dss_prec_get(arena_t *arena)
|
||||
{
|
||||
|
||||
return (arena_dss_prec_get(choose_arena(arena)));
|
||||
}
|
||||
|
||||
prof_ctx_t *
|
||||
huge_prof_ctx_get(const void *ptr)
|
||||
{
|
||||
|
1377
deps/jemalloc/src/jemalloc.c
vendored
1377
deps/jemalloc/src/jemalloc.c
vendored
File diff suppressed because it is too large
Load Diff
2
deps/jemalloc/src/mutex.c
vendored
2
deps/jemalloc/src/mutex.c
vendored
@ -6,7 +6,7 @@
|
||||
#endif
|
||||
|
||||
#ifndef _CRT_SPINCOUNT
|
||||
#define _CRT_SPINCOUNT 4000
|
||||
#define _CRT_SPINCOUNT 4000
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
|
907
deps/jemalloc/src/prof.c
vendored
907
deps/jemalloc/src/prof.c
vendored
File diff suppressed because it is too large
Load Diff
97
deps/jemalloc/src/quarantine.c
vendored
97
deps/jemalloc/src/quarantine.c
vendored
@ -1,3 +1,4 @@
|
||||
#define JEMALLOC_QUARANTINE_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/*
|
||||
@ -11,39 +12,18 @@
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
typedef struct quarantine_obj_s quarantine_obj_t;
|
||||
typedef struct quarantine_s quarantine_t;
|
||||
|
||||
struct quarantine_obj_s {
|
||||
void *ptr;
|
||||
size_t usize;
|
||||
};
|
||||
|
||||
struct quarantine_s {
|
||||
size_t curbytes;
|
||||
size_t curobjs;
|
||||
size_t first;
|
||||
#define LG_MAXOBJS_INIT 10
|
||||
size_t lg_maxobjs;
|
||||
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
|
||||
};
|
||||
|
||||
static void quarantine_cleanup(void *arg);
|
||||
|
||||
malloc_tsd_data(static, quarantine, quarantine_t *, NULL)
|
||||
malloc_tsd_funcs(JEMALLOC_INLINE, quarantine, quarantine_t *, NULL,
|
||||
quarantine_cleanup)
|
||||
malloc_tsd_data(, quarantine, quarantine_t *, NULL)
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static quarantine_t *quarantine_init(size_t lg_maxobjs);
|
||||
static quarantine_t *quarantine_grow(quarantine_t *quarantine);
|
||||
static void quarantine_drain_one(quarantine_t *quarantine);
|
||||
static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static quarantine_t *
|
||||
quarantine_t *
|
||||
quarantine_init(size_t lg_maxobjs)
|
||||
{
|
||||
quarantine_t *quarantine;
|
||||
@ -68,8 +48,10 @@ quarantine_grow(quarantine_t *quarantine)
|
||||
quarantine_t *ret;
|
||||
|
||||
ret = quarantine_init(quarantine->lg_maxobjs + 1);
|
||||
if (ret == NULL)
|
||||
if (ret == NULL) {
|
||||
quarantine_drain_one(quarantine);
|
||||
return (quarantine);
|
||||
}
|
||||
|
||||
ret->curbytes = quarantine->curbytes;
|
||||
ret->curobjs = quarantine->curobjs;
|
||||
@ -89,23 +71,29 @@ quarantine_grow(quarantine_t *quarantine)
|
||||
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
|
||||
sizeof(quarantine_obj_t));
|
||||
}
|
||||
idalloc(quarantine);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
quarantine_drain_one(quarantine_t *quarantine)
|
||||
{
|
||||
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
|
||||
assert(obj->usize == isalloc(obj->ptr, config_prof));
|
||||
idalloc(obj->ptr);
|
||||
quarantine->curbytes -= obj->usize;
|
||||
quarantine->curobjs--;
|
||||
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
|
||||
quarantine->lg_maxobjs) - 1);
|
||||
}
|
||||
|
||||
static void
|
||||
quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
|
||||
{
|
||||
|
||||
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) {
|
||||
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
|
||||
assert(obj->usize == isalloc(obj->ptr, config_prof));
|
||||
idalloc(obj->ptr);
|
||||
quarantine->curbytes -= obj->usize;
|
||||
quarantine->curobjs--;
|
||||
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
|
||||
quarantine->lg_maxobjs) - 1);
|
||||
}
|
||||
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
|
||||
quarantine_drain_one(quarantine);
|
||||
}
|
||||
|
||||
void
|
||||
@ -119,24 +107,16 @@ quarantine(void *ptr)
|
||||
|
||||
quarantine = *quarantine_tsd_get();
|
||||
if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
|
||||
if (quarantine == NULL) {
|
||||
if ((quarantine = quarantine_init(LG_MAXOBJS_INIT)) ==
|
||||
NULL) {
|
||||
idalloc(ptr);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (quarantine == QUARANTINE_STATE_PURGATORY) {
|
||||
/*
|
||||
* Make a note that quarantine() was called
|
||||
* after quarantine_cleanup() was called.
|
||||
*/
|
||||
quarantine = QUARANTINE_STATE_REINCARNATED;
|
||||
quarantine_tsd_set(&quarantine);
|
||||
}
|
||||
idalloc(ptr);
|
||||
return;
|
||||
if (quarantine == QUARANTINE_STATE_PURGATORY) {
|
||||
/*
|
||||
* Make a note that quarantine() was called after
|
||||
* quarantine_cleanup() was called.
|
||||
*/
|
||||
quarantine = QUARANTINE_STATE_REINCARNATED;
|
||||
quarantine_tsd_set(&quarantine);
|
||||
}
|
||||
idalloc(ptr);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Drain one or more objects if the quarantine size limit would be
|
||||
@ -161,15 +141,24 @@ quarantine(void *ptr)
|
||||
obj->usize = usize;
|
||||
quarantine->curbytes += usize;
|
||||
quarantine->curobjs++;
|
||||
if (opt_junk)
|
||||
memset(ptr, 0x5a, usize);
|
||||
if (config_fill && opt_junk) {
|
||||
/*
|
||||
* Only do redzone validation if Valgrind isn't in
|
||||
* operation.
|
||||
*/
|
||||
if ((config_valgrind == false || opt_valgrind == false)
|
||||
&& usize <= SMALL_MAXCLASS)
|
||||
arena_quarantine_junk_small(ptr, usize);
|
||||
else
|
||||
memset(ptr, 0x5a, usize);
|
||||
}
|
||||
} else {
|
||||
assert(quarantine->curbytes == 0);
|
||||
idalloc(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
quarantine_cleanup(void *arg)
|
||||
{
|
||||
quarantine_t *quarantine = *(quarantine_t **)arg;
|
||||
|
76
deps/jemalloc/src/rtree.c
vendored
76
deps/jemalloc/src/rtree.c
vendored
@ -2,42 +2,55 @@
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
rtree_t *
|
||||
rtree_new(unsigned bits)
|
||||
rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc)
|
||||
{
|
||||
rtree_t *ret;
|
||||
unsigned bits_per_level, height, i;
|
||||
unsigned bits_per_level, bits_in_leaf, height, i;
|
||||
|
||||
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
|
||||
|
||||
bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
|
||||
height = bits / bits_per_level;
|
||||
if (height * bits_per_level != bits)
|
||||
height++;
|
||||
assert(height * bits_per_level >= bits);
|
||||
bits_in_leaf = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1;
|
||||
if (bits > bits_in_leaf) {
|
||||
height = 1 + (bits - bits_in_leaf) / bits_per_level;
|
||||
if ((height-1) * bits_per_level + bits_in_leaf != bits)
|
||||
height++;
|
||||
} else {
|
||||
height = 1;
|
||||
}
|
||||
assert((height-1) * bits_per_level + bits_in_leaf >= bits);
|
||||
|
||||
ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) +
|
||||
ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) +
|
||||
(sizeof(unsigned) * height));
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
|
||||
height));
|
||||
|
||||
ret->alloc = alloc;
|
||||
ret->dalloc = dalloc;
|
||||
if (malloc_mutex_init(&ret->mutex)) {
|
||||
/* Leak the rtree. */
|
||||
if (dalloc != NULL)
|
||||
dalloc(ret);
|
||||
return (NULL);
|
||||
}
|
||||
ret->height = height;
|
||||
if (bits_per_level * height > bits)
|
||||
ret->level2bits[0] = bits % bits_per_level;
|
||||
else
|
||||
ret->level2bits[0] = bits_per_level;
|
||||
for (i = 1; i < height; i++)
|
||||
ret->level2bits[i] = bits_per_level;
|
||||
if (height > 1) {
|
||||
if ((height-1) * bits_per_level + bits_in_leaf > bits) {
|
||||
ret->level2bits[0] = (bits - bits_in_leaf) %
|
||||
bits_per_level;
|
||||
} else
|
||||
ret->level2bits[0] = bits_per_level;
|
||||
for (i = 1; i < height-1; i++)
|
||||
ret->level2bits[i] = bits_per_level;
|
||||
ret->level2bits[height-1] = bits_in_leaf;
|
||||
} else
|
||||
ret->level2bits[0] = bits;
|
||||
|
||||
ret->root = (void**)base_alloc(sizeof(void *) << ret->level2bits[0]);
|
||||
ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]);
|
||||
if (ret->root == NULL) {
|
||||
/*
|
||||
* We leak the rtree here, since there's no generic base
|
||||
* deallocation.
|
||||
*/
|
||||
if (dalloc != NULL)
|
||||
dalloc(ret);
|
||||
return (NULL);
|
||||
}
|
||||
memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
|
||||
@ -45,6 +58,31 @@ rtree_new(unsigned bits)
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
|
||||
{
|
||||
|
||||
if (level < rtree->height - 1) {
|
||||
size_t nchildren, i;
|
||||
|
||||
nchildren = ZU(1) << rtree->level2bits[level];
|
||||
for (i = 0; i < nchildren; i++) {
|
||||
void **child = (void **)node[i];
|
||||
if (child != NULL)
|
||||
rtree_delete_subtree(rtree, child, level + 1);
|
||||
}
|
||||
}
|
||||
rtree->dalloc(node);
|
||||
}
|
||||
|
||||
void
|
||||
rtree_delete(rtree_t *rtree)
|
||||
{
|
||||
|
||||
rtree_delete_subtree(rtree, rtree->root, 0);
|
||||
rtree->dalloc(rtree);
|
||||
}
|
||||
|
||||
void
|
||||
rtree_prefork(rtree_t *rtree)
|
||||
{
|
||||
|
8
deps/jemalloc/src/stats.c
vendored
8
deps/jemalloc/src/stats.c
vendored
@ -345,25 +345,25 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
|
||||
bv ? "enabled" : "disabled");
|
||||
|
||||
#define OPT_WRITE_BOOL(n) \
|
||||
#define OPT_WRITE_BOOL(n) \
|
||||
if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
|
||||
== 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": %s\n", bv ? "true" : "false"); \
|
||||
}
|
||||
#define OPT_WRITE_SIZE_T(n) \
|
||||
#define OPT_WRITE_SIZE_T(n) \
|
||||
if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
|
||||
== 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": %zu\n", sv); \
|
||||
}
|
||||
#define OPT_WRITE_SSIZE_T(n) \
|
||||
#define OPT_WRITE_SSIZE_T(n) \
|
||||
if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
|
||||
== 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": %zd\n", ssv); \
|
||||
}
|
||||
#define OPT_WRITE_CHAR_P(n) \
|
||||
#define OPT_WRITE_CHAR_P(n) \
|
||||
if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
|
||||
== 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
|
29
deps/jemalloc/src/tcache.c
vendored
29
deps/jemalloc/src/tcache.c
vendored
@ -97,9 +97,8 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
|
||||
if (config_prof && arena == tcache->arena) {
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena_prof_accum(arena, tcache->prof_accumbytes);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
if (arena_prof_accum(arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
|
||||
@ -176,11 +175,14 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
arena_t *arena = chunk->arena;
|
||||
UNUSED bool idump;
|
||||
|
||||
if (config_prof)
|
||||
idump = false;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
if ((config_prof || config_stats) && arena == tcache->arena) {
|
||||
if (config_prof) {
|
||||
arena_prof_accum(arena,
|
||||
idump = arena_prof_accum_locked(arena,
|
||||
tcache->prof_accumbytes);
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
@ -212,6 +214,8 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
}
|
||||
}
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
if (config_prof && idump)
|
||||
prof_idump();
|
||||
}
|
||||
if (config_stats && merged_stats == false) {
|
||||
/*
|
||||
@ -256,8 +260,8 @@ tcache_arena_dissociate(tcache_t *tcache)
|
||||
/* Unlink from list of extant tcaches. */
|
||||
malloc_mutex_lock(&tcache->arena->lock);
|
||||
ql_remove(&tcache->arena->tcache_ql, tcache, link);
|
||||
malloc_mutex_unlock(&tcache->arena->lock);
|
||||
tcache_stats_merge(tcache, tcache->arena);
|
||||
malloc_mutex_unlock(&tcache->arena->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -288,7 +292,7 @@ tcache_create(arena_t *arena)
|
||||
else if (size <= tcache_maxclass)
|
||||
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
|
||||
else
|
||||
tcache = (tcache_t *)icallocx(size, false, arena);
|
||||
tcache = (tcache_t *)icalloct(size, false, arena);
|
||||
|
||||
if (tcache == NULL)
|
||||
return (NULL);
|
||||
@ -343,11 +347,9 @@ tcache_destroy(tcache_t *tcache)
|
||||
}
|
||||
}
|
||||
|
||||
if (config_prof && tcache->prof_accumbytes > 0) {
|
||||
malloc_mutex_lock(&tcache->arena->lock);
|
||||
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
|
||||
malloc_mutex_unlock(&tcache->arena->lock);
|
||||
}
|
||||
if (config_prof && tcache->prof_accumbytes > 0 &&
|
||||
arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
|
||||
tcache_size = arena_salloc(tcache, false);
|
||||
if (tcache_size <= SMALL_MAXCLASS) {
|
||||
@ -364,7 +366,7 @@ tcache_destroy(tcache_t *tcache)
|
||||
|
||||
arena_dalloc_large(arena, chunk, tcache);
|
||||
} else
|
||||
idallocx(tcache, false);
|
||||
idalloct(tcache, false);
|
||||
}
|
||||
|
||||
void
|
||||
@ -397,11 +399,14 @@ tcache_thread_cleanup(void *arg)
|
||||
}
|
||||
}
|
||||
|
||||
/* Caller must own arena->lock. */
|
||||
void
|
||||
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
cassert(config_stats);
|
||||
|
||||
/* Merge and reset tcache stats. */
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
|
36
deps/jemalloc/src/tsd.c
vendored
36
deps/jemalloc/src/tsd.c
vendored
@ -21,7 +21,7 @@ void
|
||||
malloc_tsd_dalloc(void *wrapper)
|
||||
{
|
||||
|
||||
idalloc(wrapper);
|
||||
idalloct(wrapper, false);
|
||||
}
|
||||
|
||||
void
|
||||
@ -105,3 +105,37 @@ JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
|
||||
static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
|
||||
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
|
||||
#endif
|
||||
|
||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||
!defined(_WIN32))
|
||||
void *
|
||||
tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
|
||||
{
|
||||
pthread_t self = pthread_self();
|
||||
tsd_init_block_t *iter;
|
||||
|
||||
/* Check whether this thread has already inserted into the list. */
|
||||
malloc_mutex_lock(&head->lock);
|
||||
ql_foreach(iter, &head->blocks, link) {
|
||||
if (iter->thread == self) {
|
||||
malloc_mutex_unlock(&head->lock);
|
||||
return (iter->data);
|
||||
}
|
||||
}
|
||||
/* Insert block into list. */
|
||||
ql_elm_new(block, link);
|
||||
block->thread = self;
|
||||
ql_tail_insert(&head->blocks, block, link);
|
||||
malloc_mutex_unlock(&head->lock);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
void
|
||||
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(&head->lock);
|
||||
ql_remove(&head->blocks, block, link);
|
||||
malloc_mutex_unlock(&head->lock);
|
||||
}
|
||||
#endif
|
||||
|
85
deps/jemalloc/src/util.c
vendored
85
deps/jemalloc/src/util.c
vendored
@ -77,7 +77,7 @@ malloc_write(const char *s)
|
||||
* provide a wrapper.
|
||||
*/
|
||||
int
|
||||
buferror(char *buf, size_t buflen)
|
||||
buferror(int err, char *buf, size_t buflen)
|
||||
{
|
||||
|
||||
#ifdef _WIN32
|
||||
@ -85,34 +85,36 @@ buferror(char *buf, size_t buflen)
|
||||
(LPSTR)buf, buflen, NULL);
|
||||
return (0);
|
||||
#elif defined(_GNU_SOURCE)
|
||||
char *b = strerror_r(errno, buf, buflen);
|
||||
char *b = strerror_r(err, buf, buflen);
|
||||
if (b != buf) {
|
||||
strncpy(buf, b, buflen);
|
||||
buf[buflen-1] = '\0';
|
||||
}
|
||||
return (0);
|
||||
#else
|
||||
return (strerror_r(errno, buf, buflen));
|
||||
return (strerror_r(err, buf, buflen));
|
||||
#endif
|
||||
}
|
||||
|
||||
uintmax_t
|
||||
malloc_strtoumax(const char *nptr, char **endptr, int base)
|
||||
malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
|
||||
{
|
||||
uintmax_t ret, digit;
|
||||
int b;
|
||||
bool neg;
|
||||
const char *p, *ns;
|
||||
|
||||
p = nptr;
|
||||
if (base < 0 || base == 1 || base > 36) {
|
||||
ns = p;
|
||||
set_errno(EINVAL);
|
||||
return (UINTMAX_MAX);
|
||||
ret = UINTMAX_MAX;
|
||||
goto label_return;
|
||||
}
|
||||
b = base;
|
||||
|
||||
/* Swallow leading whitespace and get sign, if any. */
|
||||
neg = false;
|
||||
p = nptr;
|
||||
while (true) {
|
||||
switch (*p) {
|
||||
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
|
||||
@ -146,7 +148,7 @@ malloc_strtoumax(const char *nptr, char **endptr, int base)
|
||||
if (b == 8)
|
||||
p++;
|
||||
break;
|
||||
case 'x':
|
||||
case 'X': case 'x':
|
||||
switch (p[2]) {
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9':
|
||||
@ -164,7 +166,9 @@ malloc_strtoumax(const char *nptr, char **endptr, int base)
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
p++;
|
||||
ret = 0;
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
if (b == 0)
|
||||
@ -181,13 +185,22 @@ malloc_strtoumax(const char *nptr, char **endptr, int base)
|
||||
if (ret < pret) {
|
||||
/* Overflow. */
|
||||
set_errno(ERANGE);
|
||||
return (UINTMAX_MAX);
|
||||
ret = UINTMAX_MAX;
|
||||
goto label_return;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
if (neg)
|
||||
ret = -ret;
|
||||
|
||||
if (p == ns) {
|
||||
/* No conversion performed. */
|
||||
set_errno(EINVAL);
|
||||
ret = UINTMAX_MAX;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
label_return:
|
||||
if (endptr != NULL) {
|
||||
if (p == ns) {
|
||||
/* No characters were converted. */
|
||||
@ -195,7 +208,6 @@ malloc_strtoumax(const char *nptr, char **endptr, int base)
|
||||
} else
|
||||
*endptr = (char *)p;
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@ -331,7 +343,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
APPEND_C(' '); \
|
||||
} \
|
||||
} while (0)
|
||||
#define GET_ARG_NUMERIC(val, len) do { \
|
||||
#define GET_ARG_NUMERIC(val, len) do { \
|
||||
switch (len) { \
|
||||
case '?': \
|
||||
val = va_arg(ap, int); \
|
||||
@ -354,6 +366,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
case 'j': \
|
||||
val = va_arg(ap, intmax_t); \
|
||||
break; \
|
||||
case 'j' | 0x80: \
|
||||
val = va_arg(ap, uintmax_t); \
|
||||
break; \
|
||||
case 't': \
|
||||
val = va_arg(ap, ptrdiff_t); \
|
||||
break; \
|
||||
@ -385,11 +400,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
unsigned char len = '?';
|
||||
|
||||
f++;
|
||||
if (*f == '%') {
|
||||
/* %% */
|
||||
APPEND_C(*f);
|
||||
break;
|
||||
}
|
||||
/* Flags. */
|
||||
while (true) {
|
||||
switch (*f) {
|
||||
@ -419,6 +429,10 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
case '*':
|
||||
width = va_arg(ap, int);
|
||||
f++;
|
||||
if (width < 0) {
|
||||
left_justify = true;
|
||||
width = -width;
|
||||
}
|
||||
break;
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9': {
|
||||
@ -428,19 +442,16 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
assert(uwidth != UINTMAX_MAX || get_errno() !=
|
||||
ERANGE);
|
||||
width = (int)uwidth;
|
||||
if (*f == '.') {
|
||||
f++;
|
||||
goto label_precision;
|
||||
} else
|
||||
goto label_length;
|
||||
break;
|
||||
} case '.':
|
||||
f++;
|
||||
goto label_precision;
|
||||
default: goto label_length;
|
||||
} default:
|
||||
break;
|
||||
}
|
||||
/* Width/precision separator. */
|
||||
if (*f == '.')
|
||||
f++;
|
||||
else
|
||||
goto label_length;
|
||||
/* Precision. */
|
||||
label_precision:
|
||||
switch (*f) {
|
||||
case '*':
|
||||
prec = va_arg(ap, int);
|
||||
@ -469,16 +480,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
} else
|
||||
len = 'l';
|
||||
break;
|
||||
case 'j':
|
||||
len = 'j';
|
||||
f++;
|
||||
break;
|
||||
case 't':
|
||||
len = 't';
|
||||
f++;
|
||||
break;
|
||||
case 'z':
|
||||
len = 'z';
|
||||
case 'q': case 'j': case 't': case 'z':
|
||||
len = *f;
|
||||
f++;
|
||||
break;
|
||||
default: break;
|
||||
@ -487,6 +490,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
switch (*f) {
|
||||
char *s;
|
||||
size_t slen;
|
||||
case '%':
|
||||
/* %% */
|
||||
APPEND_C(*f);
|
||||
f++;
|
||||
break;
|
||||
case 'd': case 'i': {
|
||||
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||
char buf[D2S_BUFSIZE];
|
||||
@ -540,7 +548,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
assert(len == '?' || len == 'l');
|
||||
assert_not_implemented(len != 'l');
|
||||
s = va_arg(ap, char *);
|
||||
slen = (prec == -1) ? strlen(s) : prec;
|
||||
slen = (prec < 0) ? strlen(s) : prec;
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
@ -553,8 +561,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
}
|
||||
default: not_implemented();
|
||||
} default: not_reached();
|
||||
}
|
||||
break;
|
||||
} default: {
|
||||
|
2
deps/jemalloc/src/zone.c
vendored
2
deps/jemalloc/src/zone.c
vendored
@ -137,7 +137,7 @@ zone_destroy(malloc_zone_t *zone)
|
||||
{
|
||||
|
||||
/* This function should never be called. */
|
||||
assert(false);
|
||||
not_reached();
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user