Jemalloc updated to 4.0.3.

This commit is contained in:
antirez
2015-10-06 16:18:30 +02:00
parent 589c41e457
commit 5268379e5e
137 changed files with 20120 additions and 10261 deletions

View File

@ -32,7 +32,7 @@ static void *
chunk_dss_sbrk(intptr_t increment)
{
#ifdef JEMALLOC_HAVE_SBRK
#ifdef JEMALLOC_DSS
return (sbrk(increment));
#else
not_implemented();
@ -45,7 +45,7 @@ chunk_dss_prec_get(void)
{
dss_prec_t ret;
if (config_dss == false)
if (!have_dss)
return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
ret = dss_prec_default;
@ -57,8 +57,8 @@ bool
chunk_dss_prec_set(dss_prec_t dss_prec)
{
if (config_dss == false)
return (true);
if (!have_dss)
return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
dss_prec_default = dss_prec;
malloc_mutex_unlock(&dss_mtx);
@ -66,11 +66,10 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
}
void *
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit)
{
void *ret;
cassert(config_dss);
cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
@ -83,9 +82,6 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
malloc_mutex_lock(&dss_mtx);
if (dss_prev != (void *)-1) {
size_t gap_size, cpad_size;
void *cpad, *dss_next;
intptr_t incr;
/*
* The loop is necessary to recover from races with other
@ -93,8 +89,20 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
* malloc.
*/
do {
void *ret, *cpad, *dss_next;
size_t gap_size, cpad_size;
intptr_t incr;
/* Avoid an unnecessary system call. */
if (new_addr != NULL && dss_max != new_addr)
break;
/* Get the current end of the DSS. */
dss_max = chunk_dss_sbrk(0);
/* Make sure the earlier condition still holds. */
if (new_addr != NULL && dss_max != new_addr)
break;
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
@ -123,12 +131,20 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
/* Success. */
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0)
chunk_unmap(cpad, cpad_size);
if (cpad_size != 0) {
chunk_hooks_t chunk_hooks =
CHUNK_HOOKS_INITIALIZER;
chunk_dalloc_wrapper(arena,
&chunk_hooks, cpad, cpad_size,
true);
}
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
ret, size);
memset(ret, 0, size);
}
if (!*commit)
*commit = pages_decommit(ret, size);
return (ret);
}
} while (dss_prev != (void *)-1);
@ -143,7 +159,7 @@ chunk_in_dss(void *chunk)
{
bool ret;
cassert(config_dss);
cassert(have_dss);
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
@ -160,7 +176,7 @@ bool
chunk_dss_boot(void)
{
cassert(config_dss);
cassert(have_dss);
if (malloc_mutex_init(&dss_mtx))
return (true);
@ -175,7 +191,7 @@ void
chunk_dss_prefork(void)
{
if (config_dss)
if (have_dss)
malloc_mutex_prefork(&dss_mtx);
}
@ -183,7 +199,7 @@ void
chunk_dss_postfork_parent(void)
{
if (config_dss)
if (have_dss)
malloc_mutex_postfork_parent(&dss_mtx);
}
@ -191,7 +207,7 @@ void
chunk_dss_postfork_child(void)
{
if (config_dss)
if (have_dss)
malloc_mutex_postfork_child(&dss_mtx);
}