Jemalloc updated to 3.0.0.

Full changelog here:

http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git;a=blob_plain;f=ChangeLog;hb=master

Notable improvements from the point of view of Redis:

1) Bugfixing.
2) Support for Valgrind.
3) Support for OSX Lion, FreeBSD.
This commit is contained in:
antirez
2012-05-15 15:27:12 +02:00
parent 3c72c94aae
commit ad4c0b4117
157 changed files with 42924 additions and 9103 deletions

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,7 @@
/******************************************************************************/
/* Data. */
malloc_mutex_t base_mtx;
static malloc_mutex_t base_mtx;
/*
* Current pages that are being used for internal memory allocations. These
@ -32,7 +32,7 @@ base_pages_alloc(size_t minsize)
assert(minsize != 0);
csize = CHUNK_CEILING(minsize);
zero = false;
base_pages = chunk_alloc(csize, true, &zero);
base_pages = chunk_alloc(csize, chunksize, true, &zero);
if (base_pages == NULL)
return (true);
base_next_addr = base_pages;
@ -66,6 +66,17 @@ base_alloc(size_t size)
return (ret);
}
void *
base_calloc(size_t number, size_t size)
{
void *ret = base_alloc(number * size);
if (ret != NULL)
memset(ret, 0, number * size);
return (ret);
}
extent_node_t *
base_node_alloc(void)
{
@ -104,3 +115,24 @@ base_boot(void)
return (false);
}
void
base_prefork(void)
{
malloc_mutex_prefork(&base_mtx);
}
void
base_postfork_parent(void)
{
malloc_mutex_postfork_parent(&base_mtx);
}
void
base_postfork_child(void)
{
malloc_mutex_postfork_child(&base_mtx);
}

View File

@ -5,18 +5,20 @@
/* Data. */
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
#ifdef JEMALLOC_SWAP
bool opt_overcommit = true;
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
malloc_mutex_t chunks_mtx;
chunk_stats_t stats_chunks;
#endif
#ifdef JEMALLOC_IVSALLOC
/*
* Trees of chunks that were previously allocated (trees differ only in node
* ordering). These are used when allocating chunks, in an attempt to re-use
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
*/
static extent_tree_t chunks_szad;
static extent_tree_t chunks_ad;
rtree_t *chunks_rtree;
#endif
/* Various chunk-related settings. */
size_t chunksize;
@ -26,6 +28,98 @@ size_t map_bias;
size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *chunk_recycle(size_t size, size_t alignment, bool base,
bool *zero);
static void chunk_record(void *chunk, size_t size);
/******************************************************************************/
static void *
chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
{
void *ret;
extent_node_t *node;
extent_node_t key;
size_t alloc_size, leadsize, trailsize;
if (base) {
/*
* This function may need to call base_node_{,de}alloc(), but
* the current chunk allocation request is on behalf of the
* base allocator. Avoid deadlock (and if that weren't an
* issue, potential for infinite recursion) by returning NULL.
*/
return (NULL);
}
alloc_size = size + alignment - chunksize;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
key.addr = NULL;
key.size = alloc_size;
malloc_mutex_lock(&chunks_mtx);
node = extent_tree_szad_nsearch(&chunks_szad, &key);
if (node == NULL) {
malloc_mutex_unlock(&chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
(uintptr_t)node->addr;
assert(node->size >= leadsize + size);
trailsize = node->size - leadsize - size;
ret = (void *)((uintptr_t)node->addr + leadsize);
/* Remove node from the tree. */
extent_tree_szad_remove(&chunks_szad, node);
extent_tree_ad_remove(&chunks_ad, node);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
node->size = leadsize;
extent_tree_szad_insert(&chunks_szad, node);
extent_tree_ad_insert(&chunks_ad, node);
node = NULL;
}
if (trailsize != 0) {
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
/*
* An additional node is required, but
* base_node_alloc() can cause a new base chunk to be
* allocated. Drop chunks_mtx in order to avoid
* deadlock, and if node allocation fails, deallocate
* the result before returning an error.
*/
malloc_mutex_unlock(&chunks_mtx);
node = base_node_alloc();
if (node == NULL) {
chunk_dealloc(ret, size, true);
return (NULL);
}
malloc_mutex_lock(&chunks_mtx);
}
node->addr = (void *)((uintptr_t)(ret) + size);
node->size = trailsize;
extent_tree_szad_insert(&chunks_szad, node);
extent_tree_ad_insert(&chunks_ad, node);
node = NULL;
}
malloc_mutex_unlock(&chunks_mtx);
if (node != NULL)
base_node_dealloc(node);
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
/* Pages are zeroed as a side effect of pages_purge(). */
*zero = true;
#else
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
#endif
return (ret);
}
/*
* If the caller specifies (*zero == false), it is still possible to receive
@ -34,79 +128,138 @@ size_t arena_maxclass; /* Max size class for arenas. */
* advantage of them if they are returned.
*/
void *
chunk_alloc(size_t size, bool base, bool *zero)
chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
{
void *ret;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
#ifdef JEMALLOC_SWAP
if (swap_enabled) {
ret = chunk_alloc_swap(size, zero);
if (ret != NULL)
goto RETURN;
}
ret = chunk_recycle(size, alignment, base, zero);
if (ret != NULL)
goto label_return;
if (swap_enabled == false || opt_overcommit) {
#endif
#ifdef JEMALLOC_DSS
ret = chunk_alloc_dss(size, zero);
ret = chunk_alloc_mmap(size, alignment, zero);
if (ret != NULL)
goto label_return;
if (config_dss) {
ret = chunk_alloc_dss(size, alignment, zero);
if (ret != NULL)
goto RETURN;
#endif
ret = chunk_alloc_mmap(size);
if (ret != NULL) {
*zero = true;
goto RETURN;
}
#ifdef JEMALLOC_SWAP
goto label_return;
}
#endif
/* All strategies for allocation failed. */
ret = NULL;
RETURN:
#ifdef JEMALLOC_IVSALLOC
if (base == false && ret != NULL) {
label_return:
if (config_ivsalloc && base == false && ret != NULL) {
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
chunk_dealloc(ret, size, true);
return (NULL);
}
}
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
if (ret != NULL) {
# ifdef JEMALLOC_PROF
if ((config_stats || config_prof) && ret != NULL) {
bool gdump;
# endif
malloc_mutex_lock(&chunks_mtx);
# ifdef JEMALLOC_STATS
stats_chunks.nchunks += (size / chunksize);
# endif
if (config_stats)
stats_chunks.nchunks += (size / chunksize);
stats_chunks.curchunks += (size / chunksize);
if (stats_chunks.curchunks > stats_chunks.highchunks) {
stats_chunks.highchunks = stats_chunks.curchunks;
# ifdef JEMALLOC_PROF
gdump = true;
# endif
}
# ifdef JEMALLOC_PROF
else
if (config_prof)
gdump = true;
} else if (config_prof)
gdump = false;
# endif
malloc_mutex_unlock(&chunks_mtx);
# ifdef JEMALLOC_PROF
if (opt_prof && opt_prof_gdump && gdump)
if (config_prof && opt_prof && opt_prof_gdump && gdump)
prof_gdump();
# endif
}
#endif
if (config_debug && *zero && ret != NULL) {
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
}
static void
chunk_record(void *chunk, size_t size)
{
extent_node_t *xnode, *node, *prev, key;
pages_purge(chunk, size);
/*
* Allocate a node before acquiring chunks_mtx even though it might not
* be needed, because base_node_alloc() may cause a new base chunk to
* be allocated, which could cause deadlock if chunks_mtx were already
* held.
*/
xnode = base_node_alloc();
malloc_mutex_lock(&chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size);
node = extent_tree_ad_nsearch(&chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
extent_tree_szad_remove(&chunks_szad, node);
node->addr = chunk;
node->size += size;
extent_tree_szad_insert(&chunks_szad, node);
if (xnode != NULL)
base_node_dealloc(xnode);
} else {
/* Coalescing forward failed, so insert a new node. */
if (xnode == NULL) {
/*
* base_node_alloc() failed, which is an exceedingly
* unlikely failure. Leak chunk; its pages have
* already been purged, so this is only a virtual
* memory leak.
*/
malloc_mutex_unlock(&chunks_mtx);
return;
}
node = xnode;
node->addr = chunk;
node->size = size;
extent_tree_ad_insert(&chunks_ad, node);
extent_tree_szad_insert(&chunks_szad, node);
}
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(&chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
*/
extent_tree_szad_remove(&chunks_szad, prev);
extent_tree_ad_remove(&chunks_ad, prev);
extent_tree_szad_remove(&chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
extent_tree_szad_insert(&chunks_szad, node);
base_node_dealloc(prev);
}
malloc_mutex_unlock(&chunks_mtx);
}
void
chunk_dealloc(void *chunk, size_t size, bool unmap)
{
@ -116,25 +269,18 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
assert(size != 0);
assert((size & chunksize_mask) == 0);
#ifdef JEMALLOC_IVSALLOC
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
malloc_mutex_lock(&chunks_mtx);
stats_chunks.curchunks -= (size / chunksize);
malloc_mutex_unlock(&chunks_mtx);
#endif
if (config_ivsalloc)
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
if (config_stats || config_prof) {
malloc_mutex_lock(&chunks_mtx);
stats_chunks.curchunks -= (size / chunksize);
malloc_mutex_unlock(&chunks_mtx);
}
if (unmap) {
#ifdef JEMALLOC_SWAP
if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
return;
#endif
#ifdef JEMALLOC_DSS
if (chunk_dealloc_dss(chunk, size) == false)
return;
#endif
chunk_dealloc_mmap(chunk, size);
if ((config_dss && chunk_in_dss(chunk)) ||
chunk_dealloc_mmap(chunk, size))
chunk_record(chunk, size);
}
}
@ -144,30 +290,25 @@ chunk_boot(void)
/* Set variables according to the value of opt_lg_chunk. */
chunksize = (ZU(1) << opt_lg_chunk);
assert(chunksize >= PAGE_SIZE);
assert(chunksize >= PAGE);
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> PAGE_SHIFT);
chunk_npages = (chunksize >> LG_PAGE);
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
if (malloc_mutex_init(&chunks_mtx))
if (config_stats || config_prof) {
if (malloc_mutex_init(&chunks_mtx))
return (true);
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
}
if (config_dss && chunk_dss_boot())
return (true);
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
#endif
#ifdef JEMALLOC_SWAP
if (chunk_swap_boot())
return (true);
#endif
if (chunk_mmap_boot())
return (true);
#ifdef JEMALLOC_DSS
if (chunk_dss_boot())
return (true);
#endif
#ifdef JEMALLOC_IVSALLOC
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
if (chunks_rtree == NULL)
return (true);
#endif
extent_tree_szad_new(&chunks_szad);
extent_tree_ad_new(&chunks_ad);
if (config_ivsalloc) {
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk);
if (chunks_rtree == NULL)
return (true);
}
return (false);
}

View File

@ -1,82 +1,42 @@
#define JEMALLOC_CHUNK_DSS_C_
#include "jemalloc/internal/jemalloc_internal.h"
#ifdef JEMALLOC_DSS
/******************************************************************************/
/* Data. */
malloc_mutex_t dss_mtx;
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
*/
static malloc_mutex_t dss_mtx;
/* Base address of the DSS. */
static void *dss_base;
static void *dss_base;
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
static void *dss_prev;
static void *dss_prev;
/* Current upper limit on DSS addresses. */
static void *dss_max;
/*
* Trees of chunks that were previously allocated (trees differ only in node
* ordering). These are used when allocating chunks, in an attempt to re-use
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
*/
static extent_tree_t dss_chunks_szad;
static extent_tree_t dss_chunks_ad;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *chunk_recycle_dss(size_t size, bool *zero);
static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
static void *dss_max;
/******************************************************************************/
#ifndef JEMALLOC_HAVE_SBRK
static void *
chunk_recycle_dss(size_t size, bool *zero)
sbrk(intptr_t increment)
{
extent_node_t *node, key;
key.addr = NULL;
key.size = size;
malloc_mutex_lock(&dss_mtx);
node = extent_tree_szad_nsearch(&dss_chunks_szad, &key);
if (node != NULL) {
void *ret = node->addr;
/* Remove node from the tree. */
extent_tree_szad_remove(&dss_chunks_szad, node);
if (node->size == size) {
extent_tree_ad_remove(&dss_chunks_ad, node);
base_node_dealloc(node);
} else {
/*
* Insert the remainder of node's address range as a
* smaller chunk. Its position within dss_chunks_ad
* does not change.
*/
assert(node->size > size);
node->addr = (void *)((uintptr_t)node->addr + size);
node->size -= size;
extent_tree_szad_insert(&dss_chunks_szad, node);
}
malloc_mutex_unlock(&dss_mtx);
if (*zero)
memset(ret, 0, size);
return (ret);
}
malloc_mutex_unlock(&dss_mtx);
not_implemented();
return (NULL);
}
#endif
void *
chunk_alloc_dss(size_t size, bool *zero)
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
{
void *ret;
ret = chunk_recycle_dss(size, zero);
if (ret != NULL)
return (ret);
cassert(config_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
/*
* sbrk() uses a signed increment argument, so take care not to
@ -87,6 +47,8 @@ chunk_alloc_dss(size_t size, bool *zero)
malloc_mutex_lock(&dss_mtx);
if (dss_prev != (void *)-1) {
size_t gap_size, cpad_size;
void *cpad, *dss_next;
intptr_t incr;
/*
@ -97,26 +59,40 @@ chunk_alloc_dss(size_t size, bool *zero)
do {
/* Get the current end of the DSS. */
dss_max = sbrk(0);
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
*/
incr = (intptr_t)size
- (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
if (incr == (intptr_t)size)
ret = dss_max;
else {
ret = (void *)((intptr_t)dss_max + incr);
incr += size;
gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
chunksize_mask;
/*
* Compute how much chunk-aligned pad space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
cpad = (void *)((uintptr_t)dss_max + gap_size);
ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
alignment);
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max ||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
/* Wrap-around. */
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
incr = gap_size + cpad_size + size;
dss_prev = sbrk(incr);
if (dss_prev == dss_max) {
/* Success. */
dss_max = (void *)((intptr_t)dss_prev + incr);
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
*zero = true;
if (cpad_size != 0)
chunk_dealloc(cpad, cpad_size, true);
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
return (ret);
}
} while (dss_prev != (void *)-1);
@ -126,84 +102,13 @@ chunk_alloc_dss(size_t size, bool *zero)
return (NULL);
}
static extent_node_t *
chunk_dealloc_dss_record(void *chunk, size_t size)
{
extent_node_t *xnode, *node, *prev, key;
xnode = NULL;
while (true) {
key.addr = (void *)((uintptr_t)chunk + size);
node = extent_tree_ad_nsearch(&dss_chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range.
* This does not change the position within
* dss_chunks_ad, so only remove/insert from/into
* dss_chunks_szad.
*/
extent_tree_szad_remove(&dss_chunks_szad, node);
node->addr = chunk;
node->size += size;
extent_tree_szad_insert(&dss_chunks_szad, node);
break;
} else if (xnode == NULL) {
/*
* It is possible that base_node_alloc() will cause a
* new base chunk to be allocated, so take care not to
* deadlock on dss_mtx, and recover if another thread
* deallocates an adjacent chunk while this one is busy
* allocating xnode.
*/
malloc_mutex_unlock(&dss_mtx);
xnode = base_node_alloc();
malloc_mutex_lock(&dss_mtx);
if (xnode == NULL)
return (NULL);
} else {
/* Coalescing forward failed, so insert a new node. */
node = xnode;
xnode = NULL;
node->addr = chunk;
node->size = size;
extent_tree_ad_insert(&dss_chunks_ad, node);
extent_tree_szad_insert(&dss_chunks_szad, node);
break;
}
}
/* Discard xnode if it ended up unused do to a race. */
if (xnode != NULL)
base_node_dealloc(xnode);
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(&dss_chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within dss_chunks_ad, so only
* remove/insert node from/into dss_chunks_szad.
*/
extent_tree_szad_remove(&dss_chunks_szad, prev);
extent_tree_ad_remove(&dss_chunks_ad, prev);
extent_tree_szad_remove(&dss_chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
extent_tree_szad_insert(&dss_chunks_szad, node);
base_node_dealloc(prev);
}
return (node);
}
bool
chunk_in_dss(void *chunk)
{
bool ret;
cassert(config_dss);
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
@ -215,70 +120,43 @@ chunk_in_dss(void *chunk)
return (ret);
}
bool
chunk_dealloc_dss(void *chunk, size_t size)
{
bool ret;
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max) {
extent_node_t *node;
/* Try to coalesce with other unused chunks. */
node = chunk_dealloc_dss_record(chunk, size);
if (node != NULL) {
chunk = node->addr;
size = node->size;
}
/* Get the current end of the DSS. */
dss_max = sbrk(0);
/*
* Try to shrink the DSS if this chunk is at the end of the
* DSS. The sbrk() call here is subject to a race condition
* with threads that use brk(2) or sbrk(2) directly, but the
* alternative would be to leak memory for the sake of poorly
* designed multi-threaded programs.
*/
if ((void *)((uintptr_t)chunk + size) == dss_max
&& (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
/* Success. */
dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);
if (node != NULL) {
extent_tree_szad_remove(&dss_chunks_szad, node);
extent_tree_ad_remove(&dss_chunks_ad, node);
base_node_dealloc(node);
}
} else
madvise(chunk, size, MADV_DONTNEED);
ret = false;
goto RETURN;
}
ret = true;
RETURN:
malloc_mutex_unlock(&dss_mtx);
return (ret);
}
bool
chunk_dss_boot(void)
{
cassert(config_dss);
if (malloc_mutex_init(&dss_mtx))
return (true);
dss_base = sbrk(0);
dss_prev = dss_base;
dss_max = dss_base;
extent_tree_szad_new(&dss_chunks_szad);
extent_tree_ad_new(&dss_chunks_ad);
return (false);
}
void
chunk_dss_prefork(void)
{
if (config_dss)
malloc_mutex_prefork(&dss_mtx);
}
void
chunk_dss_postfork_parent(void)
{
if (config_dss)
malloc_mutex_postfork_parent(&dss_mtx);
}
void
chunk_dss_postfork_child(void)
{
if (config_dss)
malloc_mutex_postfork_child(&dss_mtx);
}
/******************************************************************************/
#endif /* JEMALLOC_DSS */

View File

@ -1,54 +1,37 @@
#define JEMALLOC_CHUNK_MMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
/*
* Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
* potentially avoid some system calls.
*/
#ifndef NO_TLS
static __thread bool mmap_unaligned_tls
JEMALLOC_ATTR(tls_model("initial-exec"));
#define MMAP_UNALIGNED_GET() mmap_unaligned_tls
#define MMAP_UNALIGNED_SET(v) do { \
mmap_unaligned_tls = (v); \
} while (0)
#else
static pthread_key_t mmap_unaligned_tsd;
#define MMAP_UNALIGNED_GET() ((bool)pthread_getspecific(mmap_unaligned_tsd))
#define MMAP_UNALIGNED_SET(v) do { \
pthread_setspecific(mmap_unaligned_tsd, (void *)(v)); \
} while (0)
#endif
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *pages_map(void *addr, size_t size, bool noreserve);
static void *pages_map(void *addr, size_t size);
static void pages_unmap(void *addr, size_t size);
static void *chunk_alloc_mmap_slow(size_t size, bool unaligned,
bool noreserve);
static void *chunk_alloc_mmap_internal(size_t size, bool noreserve);
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
bool *zero);
/******************************************************************************/
static void *
pages_map(void *addr, size_t size, bool noreserve)
pages_map(void *addr, size_t size)
{
void *ret;
assert(size != 0);
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL.
*/
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
int flags = MAP_PRIVATE | MAP_ANON;
#ifdef MAP_NORESERVE
if (noreserve)
flags |= MAP_NORESERVE;
#endif
ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
-1, 0);
assert(ret != NULL);
if (ret == MAP_FAILED)
@ -60,16 +43,15 @@ pages_map(void *addr, size_t size, bool noreserve)
if (munmap(ret, size) == -1) {
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
malloc_write("<jemalloc>: Error in munmap(): ");
malloc_write(buf);
malloc_write("\n");
buferror(buf, sizeof(buf));
malloc_printf("<jemalloc: Error in munmap(): %s\n",
buf);
if (opt_abort)
abort();
}
ret = NULL;
}
#endif
assert(ret == NULL || (addr == NULL && ret != addr)
|| (addr != NULL && ret == addr));
return (ret);
@ -79,161 +61,142 @@ static void
pages_unmap(void *addr, size_t size)
{
if (munmap(addr, size) == -1) {
#ifdef _WIN32
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
#else
if (munmap(addr, size) == -1)
#endif
{
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
malloc_write("<jemalloc>: Error in munmap(): ");
malloc_write(buf);
malloc_write("\n");
buferror(buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in "
#ifdef _WIN32
"VirtualFree"
#else
"munmap"
#endif
"(): %s\n", buf);
if (opt_abort)
abort();
}
}
static void *
chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
{
void *ret;
size_t offset;
void *ret = (void *)((uintptr_t)addr + leadsize);
/* Beware size_t wrap-around. */
if (size + chunksize <= size)
assert(alloc_size >= leadsize + size);
#ifdef _WIN32
{
void *new_addr;
pages_unmap(addr, alloc_size);
new_addr = pages_map(ret, size);
if (new_addr == ret)
return (ret);
if (new_addr)
pages_unmap(new_addr, size);
return (NULL);
ret = pages_map(NULL, size + chunksize, noreserve);
if (ret == NULL)
return (NULL);
/* Clean up unneeded leading/trailing space. */
offset = CHUNK_ADDR2OFFSET(ret);
if (offset != 0) {
/* Note that mmap() returned an unaligned mapping. */
unaligned = true;
/* Leading space. */
pages_unmap(ret, chunksize - offset);
ret = (void *)((uintptr_t)ret +
(chunksize - offset));
/* Trailing space. */
pages_unmap((void *)((uintptr_t)ret + size),
offset);
} else {
/* Trailing space only. */
pages_unmap((void *)((uintptr_t)ret + size),
chunksize);
}
#else
{
size_t trailsize = alloc_size - leadsize - size;
/*
* If mmap() returned an aligned mapping, reset mmap_unaligned so that
* the next chunk_alloc_mmap() execution tries the fast allocation
* method.
*/
if (unaligned == false)
MMAP_UNALIGNED_SET(false);
if (leadsize != 0)
pages_unmap(addr, leadsize);
if (trailsize != 0)
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
return (ret);
}
#endif
}
return (ret);
void
pages_purge(void *addr, size_t length)
{
#ifdef _WIN32
VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
#else
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
# else
# error "No method defined for purging unused dirty pages."
# endif
madvise(addr, length, JEMALLOC_MADV_PURGE);
#endif
}
static void *
chunk_alloc_mmap_internal(size_t size, bool noreserve)
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
{
void *ret, *pages;
size_t alloc_size, leadsize;
alloc_size = size + alignment - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
do {
pages = pages_map(NULL, alloc_size);
if (pages == NULL)
return (NULL);
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
ret = pages_trim(pages, alloc_size, leadsize, size);
} while (ret == NULL);
assert(ret != NULL);
*zero = true;
return (ret);
}
void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
{
void *ret;
size_t offset;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in at least one call to
* excess. However, that always results in one or two calls to
* pages_unmap().
*
* A more optimistic approach is to try mapping precisely the right
* amount, then try to append another mapping if alignment is off. In
* practice, this works out well as long as the application is not
* interleaving mappings via direct mmap() calls. If we do run into a
* situation where there is an interleaved mapping and we are unable to
* extend an unaligned mapping, our best option is to switch to the
* slow method until mmap() returns another aligned mapping. This will
* tend to leave a gap in the memory map that is too small to cause
* later problems for the optimistic method.
*
* Another possible confounding factor is address space layout
* randomization (ASLR), which causes mmap(2) to disregard the
* requested address. mmap_unaligned tracks whether the previous
* chunk_alloc_mmap() execution received any unaligned or relocated
* mappings, and if so, the current execution will immediately fall
* back to the slow method. However, we keep track of whether the fast
* method would have succeeded, and if so, we make a note to try the
* fast method next time.
* Optimistically try mapping precisely the right amount before falling
* back to the slow method, with the expectation that the optimistic
* approach works most of the time.
*/
if (MMAP_UNALIGNED_GET() == false) {
size_t offset;
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
ret = pages_map(NULL, size, noreserve);
if (ret == NULL)
return (NULL);
offset = CHUNK_ADDR2OFFSET(ret);
if (offset != 0) {
MMAP_UNALIGNED_SET(true);
/* Try to extend chunk boundary. */
if (pages_map((void *)((uintptr_t)ret + size),
chunksize - offset, noreserve) == NULL) {
/*
* Extension failed. Clean up, then revert to
* the reliable-but-expensive method.
*/
pages_unmap(ret, size);
ret = chunk_alloc_mmap_slow(size, true,
noreserve);
} else {
/* Clean up unneeded leading space. */
pages_unmap(ret, chunksize - offset);
ret = (void *)((uintptr_t)ret + (chunksize -
offset));
}
}
} else
ret = chunk_alloc_mmap_slow(size, false, noreserve);
ret = pages_map(NULL, size);
if (ret == NULL)
return (NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
return (chunk_alloc_mmap_slow(size, alignment, zero));
}
assert(ret != NULL);
*zero = true;
return (ret);
}
void *
chunk_alloc_mmap(size_t size)
{
return (chunk_alloc_mmap_internal(size, false));
}
void *
chunk_alloc_mmap_noreserve(size_t size)
{
return (chunk_alloc_mmap_internal(size, true));
}
void
bool
chunk_dealloc_mmap(void *chunk, size_t size)
{
pages_unmap(chunk, size);
}
bool
chunk_mmap_boot(void)
{
#ifdef NO_TLS
if (pthread_key_create(&mmap_unaligned_tsd, NULL) != 0) {
malloc_write("<jemalloc>: Error in pthread_key_create()\n");
return (true);
}
#endif
return (false);
if (config_munmap)
pages_unmap(chunk, size);
return (config_munmap == false);
}

View File

@ -1,402 +0,0 @@
#define JEMALLOC_CHUNK_SWAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
#ifdef JEMALLOC_SWAP
/******************************************************************************/
/* Data. */
malloc_mutex_t swap_mtx;
bool swap_enabled;
bool swap_prezeroed;
size_t swap_nfds;
int *swap_fds;
#ifdef JEMALLOC_STATS
size_t swap_avail;
#endif
/* Base address of the mmap()ed file(s). */
static void *swap_base;
/* Current end of the space in use (<= swap_max). */
static void *swap_end;
/* Absolute upper limit on file-backed addresses. */
static void *swap_max;
/*
* Trees of chunks that were previously allocated (trees differ only in node
* ordering). These are used when allocating chunks, in an attempt to re-use
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
*/
static extent_tree_t swap_chunks_szad;
static extent_tree_t swap_chunks_ad;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *chunk_recycle_swap(size_t size, bool *zero);
static extent_node_t *chunk_dealloc_swap_record(void *chunk, size_t size);
/******************************************************************************/
static void *
chunk_recycle_swap(size_t size, bool *zero)
{
extent_node_t *node, key;
key.addr = NULL;
key.size = size;
malloc_mutex_lock(&swap_mtx);
node = extent_tree_szad_nsearch(&swap_chunks_szad, &key);
if (node != NULL) {
void *ret = node->addr;
/* Remove node from the tree. */
extent_tree_szad_remove(&swap_chunks_szad, node);
if (node->size == size) {
extent_tree_ad_remove(&swap_chunks_ad, node);
base_node_dealloc(node);
} else {
/*
* Insert the remainder of node's address range as a
* smaller chunk. Its position within swap_chunks_ad
* does not change.
*/
assert(node->size > size);
node->addr = (void *)((uintptr_t)node->addr + size);
node->size -= size;
extent_tree_szad_insert(&swap_chunks_szad, node);
}
#ifdef JEMALLOC_STATS
swap_avail -= size;
#endif
malloc_mutex_unlock(&swap_mtx);
if (*zero)
memset(ret, 0, size);
return (ret);
}
malloc_mutex_unlock(&swap_mtx);
return (NULL);
}
void *
chunk_alloc_swap(size_t size, bool *zero)
{
void *ret;
assert(swap_enabled);
ret = chunk_recycle_swap(size, zero);
if (ret != NULL)
return (ret);
malloc_mutex_lock(&swap_mtx);
if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) {
ret = swap_end;
swap_end = (void *)((uintptr_t)swap_end + size);
#ifdef JEMALLOC_STATS
swap_avail -= size;
#endif
malloc_mutex_unlock(&swap_mtx);
if (swap_prezeroed)
*zero = true;
else if (*zero)
memset(ret, 0, size);
} else {
malloc_mutex_unlock(&swap_mtx);
return (NULL);
}
return (ret);
}
static extent_node_t *
chunk_dealloc_swap_record(void *chunk, size_t size)
{
extent_node_t *xnode, *node, *prev, key;
xnode = NULL;
while (true) {
key.addr = (void *)((uintptr_t)chunk + size);
node = extent_tree_ad_nsearch(&swap_chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range.
* This does not change the position within
* swap_chunks_ad, so only remove/insert from/into
* swap_chunks_szad.
*/
extent_tree_szad_remove(&swap_chunks_szad, node);
node->addr = chunk;
node->size += size;
extent_tree_szad_insert(&swap_chunks_szad, node);
break;
} else if (xnode == NULL) {
/*
* It is possible that base_node_alloc() will cause a
* new base chunk to be allocated, so take care not to
* deadlock on swap_mtx, and recover if another thread
* deallocates an adjacent chunk while this one is busy
* allocating xnode.
*/
malloc_mutex_unlock(&swap_mtx);
xnode = base_node_alloc();
malloc_mutex_lock(&swap_mtx);
if (xnode == NULL)
return (NULL);
} else {
/* Coalescing forward failed, so insert a new node. */
node = xnode;
xnode = NULL;
node->addr = chunk;
node->size = size;
extent_tree_ad_insert(&swap_chunks_ad, node);
extent_tree_szad_insert(&swap_chunks_szad, node);
break;
}
}
/* Discard xnode if it ended up unused do to a race. */
if (xnode != NULL)
base_node_dealloc(xnode);
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(&swap_chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within swap_chunks_ad, so only
* remove/insert node from/into swap_chunks_szad.
*/
extent_tree_szad_remove(&swap_chunks_szad, prev);
extent_tree_ad_remove(&swap_chunks_ad, prev);
extent_tree_szad_remove(&swap_chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
extent_tree_szad_insert(&swap_chunks_szad, node);
base_node_dealloc(prev);
}
return (node);
}
bool
chunk_in_swap(void *chunk)
{
bool ret;
assert(swap_enabled);
malloc_mutex_lock(&swap_mtx);
if ((uintptr_t)chunk >= (uintptr_t)swap_base
&& (uintptr_t)chunk < (uintptr_t)swap_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&swap_mtx);
return (ret);
}
bool
chunk_dealloc_swap(void *chunk, size_t size)
{
bool ret;
assert(swap_enabled);
malloc_mutex_lock(&swap_mtx);
if ((uintptr_t)chunk >= (uintptr_t)swap_base
&& (uintptr_t)chunk < (uintptr_t)swap_max) {
extent_node_t *node;
/* Try to coalesce with other unused chunks. */
node = chunk_dealloc_swap_record(chunk, size);
if (node != NULL) {
chunk = node->addr;
size = node->size;
}
/*
* Try to shrink the in-use memory if this chunk is at the end
* of the in-use memory.
*/
if ((void *)((uintptr_t)chunk + size) == swap_end) {
swap_end = (void *)((uintptr_t)swap_end - size);
if (node != NULL) {
extent_tree_szad_remove(&swap_chunks_szad,
node);
extent_tree_ad_remove(&swap_chunks_ad, node);
base_node_dealloc(node);
}
} else
madvise(chunk, size, MADV_DONTNEED);
#ifdef JEMALLOC_STATS
swap_avail += size;
#endif
ret = false;
goto RETURN;
}
ret = true;
RETURN:
malloc_mutex_unlock(&swap_mtx);
return (ret);
}
bool
chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
{
bool ret;
unsigned i;
off_t off;
void *vaddr;
size_t cumsize, voff;
size_t sizes[nfds];
malloc_mutex_lock(&swap_mtx);
/* Get file sizes. */
for (i = 0, cumsize = 0; i < nfds; i++) {
off = lseek(fds[i], 0, SEEK_END);
if (off == ((off_t)-1)) {
ret = true;
goto RETURN;
}
if (PAGE_CEILING(off) != off) {
/* Truncate to a multiple of the page size. */
off &= ~PAGE_MASK;
if (ftruncate(fds[i], off) != 0) {
ret = true;
goto RETURN;
}
}
sizes[i] = off;
if (cumsize + off < cumsize) {
/*
* Cumulative file size is greater than the total
* address space. Bail out while it's still obvious
* what the problem is.
*/
ret = true;
goto RETURN;
}
cumsize += off;
}
/* Round down to a multiple of the chunk size. */
cumsize &= ~chunksize_mask;
if (cumsize == 0) {
ret = true;
goto RETURN;
}
/*
* Allocate a chunk-aligned region of anonymous memory, which will
* be the final location for the memory-mapped files.
*/
vaddr = chunk_alloc_mmap_noreserve(cumsize);
if (vaddr == NULL) {
ret = true;
goto RETURN;
}
/* Overlay the files onto the anonymous mapping. */
for (i = 0, voff = 0; i < nfds; i++) {
void *addr = mmap((void *)((uintptr_t)vaddr + voff), sizes[i],
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fds[i], 0);
if (addr == MAP_FAILED) {
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
malloc_write(
"<jemalloc>: Error in mmap(..., MAP_FIXED, ...): ");
malloc_write(buf);
malloc_write("\n");
if (opt_abort)
abort();
if (munmap(vaddr, voff) == -1) {
buferror(errno, buf, sizeof(buf));
malloc_write("<jemalloc>: Error in munmap(): ");
malloc_write(buf);
malloc_write("\n");
}
ret = true;
goto RETURN;
}
assert(addr == (void *)((uintptr_t)vaddr + voff));
/*
* Tell the kernel that the mapping will be accessed randomly,
* and that it should not gratuitously sync pages to the
* filesystem.
*/
#ifdef MADV_RANDOM
madvise(addr, sizes[i], MADV_RANDOM);
#endif
#ifdef MADV_NOSYNC
madvise(addr, sizes[i], MADV_NOSYNC);
#endif
voff += sizes[i];
}
swap_prezeroed = prezeroed;
swap_base = vaddr;
swap_end = swap_base;
swap_max = (void *)((uintptr_t)vaddr + cumsize);
/* Copy the fds array for mallctl purposes. */
swap_fds = (int *)base_alloc(nfds * sizeof(int));
if (swap_fds == NULL) {
ret = true;
goto RETURN;
}
memcpy(swap_fds, fds, nfds * sizeof(int));
swap_nfds = nfds;
#ifdef JEMALLOC_STATS
swap_avail = cumsize;
#endif
swap_enabled = true;
ret = false;
RETURN:
malloc_mutex_unlock(&swap_mtx);
return (ret);
}
bool
chunk_swap_boot(void)
{
if (malloc_mutex_init(&swap_mtx))
return (true);
swap_enabled = false;
swap_prezeroed = false; /* swap.* mallctl's depend on this. */
swap_nfds = 0;
swap_fds = NULL;
#ifdef JEMALLOC_STATS
swap_avail = 0;
#endif
swap_base = NULL;
swap_end = NULL;
swap_max = NULL;
extent_tree_szad_new(&swap_chunks_szad);
extent_tree_ad_new(&swap_chunks_ad);
return (false);
}
/******************************************************************************/
#endif /* JEMALLOC_SWAP */

View File

@ -73,7 +73,6 @@ ckh_isearch(ckh_t *ckh, const void *key)
size_t hash1, hash2, bucket, cell;
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
@ -100,7 +99,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
prn32(offset, LG_CKH_BUCKET_CELLS, ckh->prn_state, CKH_A, CKH_C);
prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
@ -142,7 +141,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same
* bucket.
*/
prn32(i, LG_CKH_BUCKET_CELLS, ckh->prn_state, CKH_A, CKH_C);
prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
@ -265,15 +264,15 @@ ckh_grow(ckh_t *ckh)
size_t usize;
lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE, NULL);
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0) {
ret = true;
goto RETURN;
goto label_return;
}
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (tab == NULL) {
ret = true;
goto RETURN;
goto label_return;
}
/* Swap in new table. */
ttab = ckh->tab;
@ -293,7 +292,7 @@ ckh_grow(ckh_t *ckh)
}
ret = false;
RETURN:
label_return:
return (ret);
}
@ -310,7 +309,7 @@ ckh_shrink(ckh_t *ckh)
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE, NULL);
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0)
return;
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
@ -362,7 +361,7 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ckh->ninserts = 0;
ckh->nrelocs = 0;
#endif
ckh->prn_state = 42; /* Value doesn't really matter. */
ckh->prng_state = 42; /* Value doesn't really matter. */
ckh->count = 0;
/*
@ -383,23 +382,19 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ckh->hash = hash;
ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE, NULL);
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (usize == 0) {
ret = true;
goto RETURN;
goto label_return;
}
ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (ckh->tab == NULL) {
ret = true;
goto RETURN;
goto label_return;
}
#ifdef JEMALLOC_DEBUG
ckh->magic = CKH_MAGIC;
#endif
ret = false;
RETURN:
label_return:
return (ret);
}
@ -408,7 +403,6 @@ ckh_delete(ckh_t *ckh)
{
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
#ifdef CKH_VERBOSE
malloc_printf(
@ -433,7 +427,6 @@ ckh_count(ckh_t *ckh)
{
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
return (ckh->count);
}
@ -464,7 +457,6 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
bool ret;
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
assert(ckh_search(ckh, key, NULL, NULL));
#ifdef CKH_COUNT
@ -474,12 +466,12 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
while (ckh_try_insert(ckh, &key, &data)) {
if (ckh_grow(ckh)) {
ret = true;
goto RETURN;
goto label_return;
}
}
ret = false;
RETURN:
label_return:
return (ret);
}
@ -489,7 +481,6 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
size_t cell;
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
@ -521,7 +512,6 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
size_t cell;
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
@ -545,7 +535,7 @@ ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
assert(hash1 != NULL);
assert(hash2 != NULL);
h = hash(key, strlen((const char *)key), 0x94122f335b332aeaLLU);
h = hash(key, strlen((const char *)key), UINT64_C(0x94122f335b332aea));
if (minbits <= 32) {
/*
* Avoid doing multiple hashes, since a single hash provides
@ -556,7 +546,7 @@ ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
} else {
ret1 = h;
ret2 = hash(key, strlen((const char *)key),
0x8432a476666bbc13LLU);
UINT64_C(0x8432a476666bbc13));
}
*hash1 = ret1;
@ -593,7 +583,7 @@ ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
u.i = 0;
#endif
u.v = key;
h = hash(&u.i, sizeof(u.i), 0xd983396e68886082LLU);
h = hash(&u.i, sizeof(u.i), UINT64_C(0xd983396e68886082));
if (minbits <= 32) {
/*
* Avoid doing multiple hashes, since a single hash provides
@ -604,7 +594,7 @@ ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
} else {
assert(SIZEOF_PTR == 8);
ret1 = h;
ret2 = hash(&u.i, sizeof(u.i), 0x5e2be9aff8709a5dLLU);
ret2 = hash(&u.i, sizeof(u.i), UINT64_C(0x5e2be9aff8709a5d));
}
*hash1 = ret1;

1186
deps/jemalloc/src/ctl.c vendored

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,6 @@
/******************************************************************************/
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
static inline int
extent_szad_comp(extent_node_t *a, extent_node_t *b)
{
@ -25,7 +24,6 @@ extent_szad_comp(extent_node_t *a, extent_node_t *b)
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
extent_szad_comp)
#endif
static inline int
extent_ad_comp(extent_node_t *a, extent_node_t *b)

View File

@ -4,11 +4,9 @@
/******************************************************************************/
/* Data. */
#ifdef JEMALLOC_STATS
uint64_t huge_nmalloc;
uint64_t huge_ndalloc;
size_t huge_allocated;
#endif
malloc_mutex_t huge_mtx;
@ -19,10 +17,18 @@ static extent_tree_t huge;
void *
huge_malloc(size_t size, bool zero)
{
return (huge_palloc(size, chunksize, zero));
}
void *
huge_palloc(size_t size, size_t alignment, bool zero)
{
void *ret;
size_t csize;
extent_node_t *node;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
@ -37,7 +43,12 @@ huge_malloc(size_t size, bool zero)
if (node == NULL)
return (NULL);
ret = chunk_alloc(csize, false, &zero);
/*
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
ret = chunk_alloc(csize, alignment, false, &is_zeroed);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
@ -49,106 +60,19 @@ huge_malloc(size_t size, bool zero)
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_add(csize);
huge_nmalloc++;
huge_allocated += csize;
#endif
if (config_stats) {
stats_cactive_add(csize);
huge_nmalloc++;
huge_allocated += csize;
}
malloc_mutex_unlock(&huge_mtx);
#ifdef JEMALLOC_FILL
if (zero == false) {
if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, csize);
else if (opt_zero)
else if (opt_zero && is_zeroed == false)
memset(ret, 0, csize);
}
#endif
return (ret);
}
/* Only handles large allocations that require more than chunk alignment. */
void *
huge_palloc(size_t size, size_t alignment, bool zero)
{
void *ret;
size_t alloc_size, chunk_size, offset;
extent_node_t *node;
/*
* This allocation requires alignment that is even larger than chunk
* alignment. This means that huge_malloc() isn't good enough.
*
* Allocate almost twice as many chunks as are demanded by the size or
* alignment, in order to assure the alignment can be achieved, then
* unmap leading and trailing chunks.
*/
assert(alignment > chunksize);
chunk_size = CHUNK_CEILING(size);
if (size >= alignment)
alloc_size = chunk_size + alignment - chunksize;
else
alloc_size = (alignment << 1) - chunksize;
/* Allocate an extent node with which to track the chunk. */
node = base_node_alloc();
if (node == NULL)
return (NULL);
ret = chunk_alloc(alloc_size, false, &zero);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
}
offset = (uintptr_t)ret & (alignment - 1);
assert((offset & chunksize_mask) == 0);
assert(offset < alloc_size);
if (offset == 0) {
/* Trim trailing space. */
chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
- chunk_size, true);
} else {
size_t trailsize;
/* Trim leading space. */
chunk_dealloc(ret, alignment - offset, true);
ret = (void *)((uintptr_t)ret + (alignment - offset));
trailsize = alloc_size - (alignment - offset) - chunk_size;
if (trailsize != 0) {
/* Trim trailing space. */
assert(trailsize < alloc_size);
chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
trailsize, true);
}
}
/* Insert node into huge. */
node->addr = ret;
node->size = chunk_size;
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_add(chunk_size);
huge_nmalloc++;
huge_allocated += chunk_size;
#endif
malloc_mutex_unlock(&huge_mtx);
#ifdef JEMALLOC_FILL
if (zero == false) {
if (opt_junk)
memset(ret, 0xa5, chunk_size);
else if (opt_zero)
memset(ret, 0, chunk_size);
}
#endif
return (ret);
}
@ -164,12 +88,10 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
assert(CHUNK_CEILING(oldsize) == oldsize);
#ifdef JEMALLOC_FILL
if (opt_junk && size < oldsize) {
if (config_fill && opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0x5a,
oldsize - size);
}
#endif
return (ptr);
}
@ -218,20 +140,13 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
copysize = (size < oldsize) ? size : oldsize;
#ifdef JEMALLOC_MREMAP
/*
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
* source nor the destination are in swap or dss.
* source nor the destination are in dss.
*/
#ifdef JEMALLOC_MREMAP_FIXED
if (oldsize >= chunksize
# ifdef JEMALLOC_SWAP
&& (swap_enabled == false || (chunk_in_swap(ptr) == false &&
chunk_in_swap(ret) == false))
# endif
# ifdef JEMALLOC_DSS
&& chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
# endif
) {
if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
== false && chunk_in_dss(ret) == false))) {
size_t newsize = huge_salloc(ret);
/*
@ -253,10 +168,9 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
malloc_write("<jemalloc>: Error in mremap(): ");
malloc_write(buf);
malloc_write("\n");
buferror(buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in mremap(): %s\n",
buf);
if (opt_abort)
abort();
memcpy(ret, ptr, copysize);
@ -266,7 +180,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
#endif
{
memcpy(ret, ptr, copysize);
idalloc(ptr);
iqalloc(ptr);
}
return (ret);
}
@ -285,23 +199,16 @@ huge_dalloc(void *ptr, bool unmap)
assert(node->addr == ptr);
extent_tree_ad_remove(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_sub(node->size);
huge_ndalloc++;
huge_allocated -= node->size;
#endif
if (config_stats) {
stats_cactive_sub(node->size);
huge_ndalloc++;
huge_allocated -= node->size;
}
malloc_mutex_unlock(&huge_mtx);
if (unmap) {
/* Unmap chunk. */
#ifdef JEMALLOC_FILL
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
if (opt_junk)
memset(node->addr, 0x5a, node->size);
#endif
#endif
}
if (unmap && config_fill && config_dss && opt_junk)
memset(node->addr, 0x5a, node->size);
chunk_dealloc(node->addr, node->size, unmap);
@ -328,7 +235,6 @@ huge_salloc(const void *ptr)
return (ret);
}
#ifdef JEMALLOC_PROF
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
@ -365,7 +271,6 @@ huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
malloc_mutex_unlock(&huge_mtx);
}
#endif
bool
huge_boot(void)
@ -376,11 +281,32 @@ huge_boot(void)
return (true);
extent_tree_ad_new(&huge);
#ifdef JEMALLOC_STATS
huge_nmalloc = 0;
huge_ndalloc = 0;
huge_allocated = 0;
#endif
if (config_stats) {
huge_nmalloc = 0;
huge_ndalloc = 0;
huge_allocated = 0;
}
return (false);
}
void
huge_prefork(void)
{
malloc_mutex_prefork(&huge_mtx);
}
void
huge_postfork_parent(void)
{
malloc_mutex_postfork_parent(&huge_mtx);
}
void
huge_postfork_child(void)
{
malloc_mutex_postfork_child(&huge_mtx);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +1,26 @@
#define JEMALLOC_MUTEX_C_
#include "jemalloc/internal/jemalloc_internal.h"
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
#include <dlfcn.h>
#endif
#ifndef _CRT_SPINCOUNT
#define _CRT_SPINCOUNT 4000
#endif
/******************************************************************************/
/* Data. */
#ifdef JEMALLOC_LAZY_LOCK
bool isthreaded = false;
#endif
#ifdef JEMALLOC_MUTEX_INIT_CB
static bool postpone_init = true;
static malloc_mutex_t *postponed_mutexes = NULL;
#endif
#ifdef JEMALLOC_LAZY_LOCK
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
static void pthread_create_once(void);
#endif
@ -18,7 +30,7 @@ static void pthread_create_once(void);
* process goes multi-threaded.
*/
#ifdef JEMALLOC_LAZY_LOCK
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
@ -36,8 +48,7 @@ pthread_create_once(void)
isthreaded = true;
}
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_EXPORT int
pthread_create(pthread_t *__restrict thread,
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
void *__restrict arg)
@ -52,39 +63,87 @@ pthread_create(pthread_t *__restrict thread,
/******************************************************************************/
#ifdef JEMALLOC_MUTEX_INIT_CB
int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
#endif
bool
malloc_mutex_init(malloc_mutex_t *mutex)
{
#ifdef JEMALLOC_OSSPIN
*mutex = 0;
#ifdef _WIN32
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
_CRT_SPINCOUNT))
return (true);
#elif (defined(JEMALLOC_OSSPIN))
mutex->lock = 0;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
if (postpone_init) {
mutex->postponed_next = postponed_mutexes;
postponed_mutexes = mutex;
} else {
if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) !=
0)
return (true);
}
#else
pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr) != 0)
return (true);
#ifdef PTHREAD_MUTEX_ADAPTIVE_NP
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
#else
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
#endif
if (pthread_mutex_init(mutex, &attr) != 0) {
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
return (true);
}
pthread_mutexattr_destroy(&attr);
#endif
return (false);
}
void
malloc_mutex_destroy(malloc_mutex_t *mutex)
malloc_mutex_prefork(malloc_mutex_t *mutex)
{
#ifndef JEMALLOC_OSSPIN
if (pthread_mutex_destroy(mutex) != 0) {
malloc_write("<jemalloc>: Error in pthread_mutex_destroy()\n");
abort();
malloc_mutex_lock(mutex);
}
void
malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
{
malloc_mutex_unlock(mutex);
}
void
malloc_mutex_postfork_child(malloc_mutex_t *mutex)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
malloc_mutex_unlock(mutex);
#else
if (malloc_mutex_init(mutex)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
if (opt_abort)
abort();
}
#endif
}
bool
mutex_boot(void)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
postpone_init = false;
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
base_calloc) != 0)
return (true);
postponed_mutexes = postponed_mutexes->postponed_next;
}
#endif
return (false);
}

File diff suppressed because it is too large Load Diff

210
deps/jemalloc/src/quarantine.c vendored Normal file
View File

@ -0,0 +1,210 @@
#include "jemalloc/internal/jemalloc_internal.h"
/*
* quarantine pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2)
#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
/******************************************************************************/
/* Data. */
typedef struct quarantine_obj_s quarantine_obj_t;
typedef struct quarantine_s quarantine_t;
struct quarantine_obj_s {
void *ptr;
size_t usize;
};
struct quarantine_s {
size_t curbytes;
size_t curobjs;
size_t first;
#define LG_MAXOBJS_INIT 10
size_t lg_maxobjs;
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
};
static void quarantine_cleanup(void *arg);
malloc_tsd_data(static, quarantine, quarantine_t *, NULL)
malloc_tsd_funcs(JEMALLOC_INLINE, quarantine, quarantine_t *, NULL,
quarantine_cleanup)
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static quarantine_t *quarantine_init(size_t lg_maxobjs);
static quarantine_t *quarantine_grow(quarantine_t *quarantine);
static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
/******************************************************************************/
static quarantine_t *
quarantine_init(size_t lg_maxobjs)
{
quarantine_t *quarantine;
quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
quarantine->curobjs = 0;
quarantine->first = 0;
quarantine->lg_maxobjs = lg_maxobjs;
quarantine_tsd_set(&quarantine);
return (quarantine);
}
static quarantine_t *
quarantine_grow(quarantine_t *quarantine)
{
quarantine_t *ret;
ret = quarantine_init(quarantine->lg_maxobjs + 1);
if (ret == NULL)
return (quarantine);
ret->curbytes = quarantine->curbytes;
ret->curobjs = quarantine->curobjs;
if (quarantine->first + quarantine->curobjs <= (ZU(1) <<
quarantine->lg_maxobjs)) {
/* objs ring buffer data are contiguous. */
memcpy(ret->objs, &quarantine->objs[quarantine->first],
quarantine->curobjs * sizeof(quarantine_obj_t));
} else {
/* objs ring buffer data wrap around. */
size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) -
quarantine->first;
size_t ncopy_b = quarantine->curobjs - ncopy_a;
memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a
* sizeof(quarantine_obj_t));
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
return (ret);
}
static void
quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) {
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
assert(obj->usize == isalloc(obj->ptr, config_prof));
idalloc(obj->ptr);
quarantine->curbytes -= obj->usize;
quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
quarantine->lg_maxobjs) - 1);
}
}
void
quarantine(void *ptr)
{
quarantine_t *quarantine;
size_t usize = isalloc(ptr, config_prof);
cassert(config_fill);
assert(opt_quarantine);
quarantine = *quarantine_tsd_get();
if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
if (quarantine == NULL) {
if ((quarantine = quarantine_init(LG_MAXOBJS_INIT)) ==
NULL) {
idalloc(ptr);
return;
}
} else {
if (quarantine == QUARANTINE_STATE_PURGATORY) {
/*
* Make a note that quarantine() was called
* after quarantine_cleanup() was called.
*/
quarantine = QUARANTINE_STATE_REINCARNATED;
quarantine_tsd_set(&quarantine);
}
idalloc(ptr);
return;
}
}
/*
* Drain one or more objects if the quarantine size limit would be
* exceeded by appending ptr.
*/
if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0;
quarantine_drain(quarantine, upper_bound);
}
/* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
quarantine = quarantine_grow(quarantine);
/* quarantine_grow() must free a slot if it fails to grow. */
assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
/* Append ptr if its size doesn't exceed the quarantine size. */
if (quarantine->curbytes + usize <= opt_quarantine) {
size_t offset = (quarantine->first + quarantine->curobjs) &
((ZU(1) << quarantine->lg_maxobjs) - 1);
quarantine_obj_t *obj = &quarantine->objs[offset];
obj->ptr = ptr;
obj->usize = usize;
quarantine->curbytes += usize;
quarantine->curobjs++;
if (opt_junk)
memset(ptr, 0x5a, usize);
} else {
assert(quarantine->curbytes == 0);
idalloc(ptr);
}
}
static void
quarantine_cleanup(void *arg)
{
quarantine_t *quarantine = *(quarantine_t **)arg;
if (quarantine == QUARANTINE_STATE_REINCARNATED) {
/*
* Another destructor deallocated memory after this destructor
* was called. Reset quarantine to QUARANTINE_STATE_PURGATORY
* in order to receive another callback.
*/
quarantine = QUARANTINE_STATE_PURGATORY;
quarantine_tsd_set(&quarantine);
} else if (quarantine == QUARANTINE_STATE_PURGATORY) {
/*
* The previous time this destructor was called, we set the key
* to QUARANTINE_STATE_PURGATORY so that other destructors
* wouldn't cause re-creation of the quarantine. This time, do
* nothing, so that the destructor will not be called again.
*/
} else if (quarantine != NULL) {
quarantine_drain(quarantine, 0);
idalloc(quarantine);
quarantine = QUARANTINE_STATE_PURGATORY;
quarantine_tsd_set(&quarantine);
}
}
bool
quarantine_boot(void)
{
cassert(config_fill);
if (quarantine_tsd_boot())
return (true);
return (false);
}

View File

@ -39,140 +39,40 @@
bool opt_stats_print = false;
#ifdef JEMALLOC_STATS
size_t stats_cactive = 0;
#endif
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
#ifdef JEMALLOC_STATS
static void malloc_vcprintf(void (*write_cb)(void *, const char *),
void *cbopaque, const char *format, va_list ap);
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
#endif
void *cbopaque, unsigned i, bool bins, bool large);
/******************************************************************************/
/*
* We don't want to depend on vsnprintf() for production builds, since that can
* cause unnecessary bloat for static binaries. u2s() provides minimal integer
* printing functionality, so that malloc_printf() use can be limited to
* JEMALLOC_STATS code.
*/
char *
u2s(uint64_t x, unsigned base, char *s)
{
unsigned i;
i = UMAX2S_BUFSIZE - 1;
s[i] = '\0';
switch (base) {
case 10:
do {
i--;
s[i] = "0123456789"[x % (uint64_t)10];
x /= (uint64_t)10;
} while (x > 0);
break;
case 16:
do {
i--;
s[i] = "0123456789abcdef"[x & 0xf];
x >>= 4;
} while (x > 0);
break;
default:
do {
i--;
s[i] = "0123456789abcdefghijklmnopqrstuvwxyz"[x %
(uint64_t)base];
x /= (uint64_t)base;
} while (x > 0);
}
return (&s[i]);
}
#ifdef JEMALLOC_STATS
static void
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap)
{
char buf[4096];
if (write_cb == NULL) {
/*
* The caller did not provide an alternate write_cb callback
* function, so use the default one. malloc_write() is an
* inline function, so use malloc_message() directly here.
*/
write_cb = JEMALLOC_P(malloc_message);
cbopaque = NULL;
}
vsnprintf(buf, sizeof(buf), format, ap);
write_cb(cbopaque, buf);
}
/*
* Print to a callback function in such a way as to (hopefully) avoid memory
* allocation.
*/
JEMALLOC_ATTR(format(printf, 3, 4))
void
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(write_cb, cbopaque, format, ap);
va_end(ap);
}
/*
* Print to stderr in such a way as to (hopefully) avoid memory allocation.
*/
JEMALLOC_ATTR(format(printf, 1, 2))
void
malloc_printf(const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
}
#endif
#ifdef JEMALLOC_STATS
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
{
size_t pagesize;
size_t page;
bool config_tcache;
unsigned nbins, j, gap_start;
CTL_GET("arenas.pagesize", &pagesize, size_t);
CTL_GET("arenas.page", &page, size_t);
CTL_GET("config.tcache", &config_tcache, bool);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"bins: bin size regs pgs allocated nmalloc"
"bins: bin size regs pgs allocated nmalloc"
" ndalloc nrequests nfills nflushes"
" newruns reruns maxruns curruns\n");
" newruns reruns curruns\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"bins: bin size regs pgs allocated nmalloc"
" ndalloc newruns reruns maxruns"
" curruns\n");
"bins: bin size regs pgs allocated nmalloc"
" ndalloc newruns reruns curruns\n");
}
CTL_GET("arenas.nbins", &nbins, unsigned);
for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
@ -183,12 +83,11 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (gap_start == UINT_MAX)
gap_start = j;
} else {
unsigned ntbins_, nqbins, ncbins, nsbins;
size_t reg_size, run_size, allocated;
uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t reruns;
size_t highruns, curruns;
size_t curruns;
if (gap_start != UINT_MAX) {
if (j > gap_start + 1) {
@ -203,10 +102,6 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
gap_start = UINT_MAX;
}
CTL_GET("arenas.ntbins", &ntbins_, unsigned);
CTL_GET("arenas.nqbins", &nqbins, unsigned);
CTL_GET("arenas.ncbins", &ncbins, unsigned);
CTL_GET("arenas.nsbins", &nsbins, unsigned);
CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
@ -226,36 +121,25 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
uint64_t);
CTL_IJ_GET("stats.arenas.0.bins.0.highruns", &highruns,
size_t);
CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
size_t);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
"%13u %5zu %4u %3zu %12zu %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu %12zu\n",
j,
j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
"Q" : j < ntbins_ + nqbins + ncbins ? "C" :
"S",
reg_size, nregs, run_size / pagesize,
" %12zu\n",
j, reg_size, nregs, run_size / page,
allocated, nmalloc, ndalloc, nrequests,
nfills, nflushes, nruns, reruns, highruns,
curruns);
nfills, nflushes, nruns, reruns, curruns);
} else {
malloc_cprintf(write_cb, cbopaque,
"%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
"%13u %5zu %4u %3zu %12zu %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu %12zu\n",
j,
j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
"Q" : j < ntbins_ + nqbins + ncbins ? "C" :
"S",
reg_size, nregs, run_size / pagesize,
" %12zu\n",
j, reg_size, nregs, run_size / page,
allocated, nmalloc, ndalloc, nruns, reruns,
highruns, curruns);
curruns);
}
}
}
@ -275,18 +159,18 @@ static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
{
size_t pagesize, nlruns, j;
size_t page, nlruns, j;
ssize_t gap_start;
CTL_GET("arenas.pagesize", &pagesize, size_t);
CTL_GET("arenas.page", &page, size_t);
malloc_cprintf(write_cb, cbopaque,
"large: size pages nmalloc ndalloc nrequests"
" maxruns curruns\n");
" curruns\n");
CTL_GET("arenas.nlruns", &nlruns, size_t);
for (j = 0, gap_start = -1; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t run_size, highruns, curruns;
size_t run_size, curruns;
CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
uint64_t);
@ -299,8 +183,6 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
gap_start = j;
} else {
CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
CTL_IJ_GET("stats.arenas.0.lruns.0.highruns", &highruns,
size_t);
CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
size_t);
if (gap_start != -1) {
@ -310,9 +192,9 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
malloc_cprintf(write_cb, cbopaque,
"%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu %12zu\n",
run_size, run_size / pagesize, nmalloc, ndalloc,
nrequests, highruns, curruns);
" %12zu\n",
run_size, run_size / page, nmalloc, ndalloc,
nrequests, curruns);
}
}
if (gap_start != -1)
@ -321,17 +203,17 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
unsigned i, bool bins, bool large)
{
unsigned nthreads;
size_t pagesize, pactive, pdirty, mapped;
size_t page, pactive, pdirty, mapped;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
CTL_GET("arenas.pagesize", &pagesize, size_t);
CTL_GET("arenas.page", &page, size_t);
CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
malloc_cprintf(write_cb, cbopaque,
@ -369,15 +251,15 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
small_nmalloc + large_nmalloc,
small_ndalloc + large_ndalloc,
small_nrequests + large_nrequests);
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n",
pactive * pagesize );
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
stats_arena_bins_print(write_cb, cbopaque, i);
stats_arena_lruns_print(write_cb, cbopaque, i);
if (bins)
stats_arena_bins_print(write_cb, cbopaque, i);
if (large)
stats_arena_lruns_print(write_cb, cbopaque, i);
}
#endif
void
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
@ -386,7 +268,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
int err;
uint64_t epoch;
size_t u64sz;
char s[UMAX2S_BUFSIZE];
bool general = true;
bool merged = true;
bool unmerged = true;
@ -402,8 +283,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
* */
epoch = 1;
u64sz = sizeof(uint64_t);
err = JEMALLOC_P(mallctl)("epoch", &epoch, &u64sz, &epoch,
sizeof(uint64_t));
err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
if (err != 0) {
if (err == EAGAIN) {
malloc_write("<jemalloc>: Memory allocation failure in "
@ -415,42 +295,33 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
abort();
}
if (write_cb == NULL) {
/*
* The caller did not provide an alternate write_cb callback
* function, so use the default one. malloc_write() is an
* inline function, so use malloc_message() directly here.
*/
write_cb = JEMALLOC_P(malloc_message);
cbopaque = NULL;
}
if (opts != NULL) {
unsigned i;
for (i = 0; opts[i] != '\0'; i++) {
switch (opts[i]) {
case 'g':
general = false;
break;
case 'm':
merged = false;
break;
case 'a':
unmerged = false;
break;
case 'b':
bins = false;
break;
case 'l':
large = false;
break;
default:;
case 'g':
general = false;
break;
case 'm':
merged = false;
break;
case 'a':
unmerged = false;
break;
case 'b':
bins = false;
break;
case 'l':
large = false;
break;
default:;
}
}
}
write_cb(cbopaque, "___ Begin jemalloc statistics ___\n");
malloc_cprintf(write_cb, cbopaque,
"___ Begin jemalloc statistics ___\n");
if (general) {
int err;
const char *cpv;
@ -465,229 +336,126 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
cpsz = sizeof(const char *);
CTL_GET("version", &cpv, const char *);
write_cb(cbopaque, "Version: ");
write_cb(cbopaque, cpv);
write_cb(cbopaque, "\n");
malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
CTL_GET("config.debug", &bv, bool);
write_cb(cbopaque, "Assertions ");
write_cb(cbopaque, bv ? "enabled" : "disabled");
write_cb(cbopaque, "\n");
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
bv ? "enabled" : "disabled");
#define OPT_WRITE_BOOL(n) \
if ((err = JEMALLOC_P(mallctl)("opt."#n, &bv, &bsz, \
NULL, 0)) == 0) { \
write_cb(cbopaque, " opt."#n": "); \
write_cb(cbopaque, bv ? "true" : "false"); \
write_cb(cbopaque, "\n"); \
if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \
}
#define OPT_WRITE_SIZE_T(n) \
if ((err = JEMALLOC_P(mallctl)("opt."#n, &sv, &ssz, \
NULL, 0)) == 0) { \
write_cb(cbopaque, " opt."#n": "); \
write_cb(cbopaque, u2s(sv, 10, s)); \
write_cb(cbopaque, "\n"); \
if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
}
#define OPT_WRITE_SSIZE_T(n) \
if ((err = JEMALLOC_P(mallctl)("opt."#n, &ssv, &sssz, \
NULL, 0)) == 0) { \
if (ssv >= 0) { \
write_cb(cbopaque, " opt."#n": "); \
write_cb(cbopaque, u2s(ssv, 10, s)); \
} else { \
write_cb(cbopaque, " opt."#n": -"); \
write_cb(cbopaque, u2s(-ssv, 10, s)); \
} \
write_cb(cbopaque, "\n"); \
if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \
}
#define OPT_WRITE_CHAR_P(n) \
if ((err = JEMALLOC_P(mallctl)("opt."#n, &cpv, &cpsz, \
NULL, 0)) == 0) { \
write_cb(cbopaque, " opt."#n": \""); \
write_cb(cbopaque, cpv); \
write_cb(cbopaque, "\"\n"); \
if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": \"%s\"\n", cpv); \
}
write_cb(cbopaque, "Run-time option settings:\n");
malloc_cprintf(write_cb, cbopaque,
"Run-time option settings:\n");
OPT_WRITE_BOOL(abort)
OPT_WRITE_SIZE_T(lg_qspace_max)
OPT_WRITE_SIZE_T(lg_cspace_max)
OPT_WRITE_SIZE_T(lg_chunk)
OPT_WRITE_SIZE_T(narenas)
OPT_WRITE_SSIZE_T(lg_dirty_mult)
OPT_WRITE_BOOL(stats_print)
OPT_WRITE_BOOL(junk)
OPT_WRITE_SIZE_T(quarantine)
OPT_WRITE_BOOL(redzone)
OPT_WRITE_BOOL(zero)
OPT_WRITE_BOOL(sysv)
OPT_WRITE_BOOL(utrace)
OPT_WRITE_BOOL(valgrind)
OPT_WRITE_BOOL(xmalloc)
OPT_WRITE_BOOL(tcache)
OPT_WRITE_SSIZE_T(lg_tcache_gc_sweep)
OPT_WRITE_SSIZE_T(lg_tcache_max)
OPT_WRITE_BOOL(prof)
OPT_WRITE_CHAR_P(prof_prefix)
OPT_WRITE_SIZE_T(lg_prof_bt_max)
OPT_WRITE_BOOL(prof_active)
OPT_WRITE_SSIZE_T(lg_prof_sample)
OPT_WRITE_BOOL(prof_accum)
OPT_WRITE_SSIZE_T(lg_prof_tcmax)
OPT_WRITE_SSIZE_T(lg_prof_interval)
OPT_WRITE_BOOL(prof_gdump)
OPT_WRITE_BOOL(prof_final)
OPT_WRITE_BOOL(prof_leak)
OPT_WRITE_BOOL(overcommit)
#undef OPT_WRITE_BOOL
#undef OPT_WRITE_SIZE_T
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
write_cb(cbopaque, "CPUs: ");
write_cb(cbopaque, u2s(ncpus, 10, s));
write_cb(cbopaque, "\n");
malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
CTL_GET("arenas.narenas", &uv, unsigned);
write_cb(cbopaque, "Max arenas: ");
write_cb(cbopaque, u2s(uv, 10, s));
write_cb(cbopaque, "\n");
malloc_cprintf(write_cb, cbopaque, "Max arenas: %u\n", uv);
write_cb(cbopaque, "Pointer size: ");
write_cb(cbopaque, u2s(sizeof(void *), 10, s));
write_cb(cbopaque, "\n");
malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
sizeof(void *));
CTL_GET("arenas.quantum", &sv, size_t);
write_cb(cbopaque, "Quantum size: ");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "\n");
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
CTL_GET("arenas.cacheline", &sv, size_t);
write_cb(cbopaque, "Cacheline size (assumed): ");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "\n");
CTL_GET("arenas.subpage", &sv, size_t);
write_cb(cbopaque, "Subpage spacing: ");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "\n");
if ((err = JEMALLOC_P(mallctl)("arenas.tspace_min", &sv, &ssz,
NULL, 0)) == 0) {
write_cb(cbopaque, "Tiny 2^n-spaced sizes: [");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "..");
CTL_GET("arenas.tspace_max", &sv, size_t);
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "]\n");
}
CTL_GET("arenas.qspace_min", &sv, size_t);
write_cb(cbopaque, "Quantum-spaced sizes: [");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "..");
CTL_GET("arenas.qspace_max", &sv, size_t);
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "]\n");
CTL_GET("arenas.cspace_min", &sv, size_t);
write_cb(cbopaque, "Cacheline-spaced sizes: [");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "..");
CTL_GET("arenas.cspace_max", &sv, size_t);
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "]\n");
CTL_GET("arenas.sspace_min", &sv, size_t);
write_cb(cbopaque, "Subpage-spaced sizes: [");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "..");
CTL_GET("arenas.sspace_max", &sv, size_t);
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "]\n");
CTL_GET("arenas.page", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
if (ssv >= 0) {
write_cb(cbopaque,
"Min active:dirty page ratio per arena: ");
write_cb(cbopaque, u2s((1U << ssv), 10, s));
write_cb(cbopaque, ":1\n");
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: %u:1\n",
(1U << ssv));
} else {
write_cb(cbopaque,
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: N/A\n");
}
if ((err = JEMALLOC_P(mallctl)("arenas.tcache_max", &sv,
&ssz, NULL, 0)) == 0) {
write_cb(cbopaque,
"Maximum thread-cached size class: ");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "\n");
if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
== 0) {
malloc_cprintf(write_cb, cbopaque,
"Maximum thread-cached size class: %zu\n", sv);
}
if ((err = JEMALLOC_P(mallctl)("opt.lg_tcache_gc_sweep", &ssv,
&ssz, NULL, 0)) == 0) {
size_t tcache_gc_sweep = (1U << ssv);
bool tcache_enabled;
CTL_GET("opt.tcache", &tcache_enabled, bool);
write_cb(cbopaque, "Thread cache GC sweep interval: ");
write_cb(cbopaque, tcache_enabled && ssv >= 0 ?
u2s(tcache_gc_sweep, 10, s) : "N/A");
write_cb(cbopaque, "\n");
}
if ((err = JEMALLOC_P(mallctl)("opt.prof", &bv, &bsz, NULL, 0))
== 0 && bv) {
CTL_GET("opt.lg_prof_bt_max", &sv, size_t);
write_cb(cbopaque, "Maximum profile backtrace depth: ");
write_cb(cbopaque, u2s((1U << sv), 10, s));
write_cb(cbopaque, "\n");
CTL_GET("opt.lg_prof_tcmax", &ssv, ssize_t);
write_cb(cbopaque,
"Maximum per thread backtrace cache: ");
if (ssv >= 0) {
write_cb(cbopaque, u2s((1U << ssv), 10, s));
write_cb(cbopaque, " (2^");
write_cb(cbopaque, u2s(ssv, 10, s));
write_cb(cbopaque, ")\n");
} else
write_cb(cbopaque, "N/A\n");
if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
bv) {
CTL_GET("opt.lg_prof_sample", &sv, size_t);
write_cb(cbopaque, "Average profile sample interval: ");
write_cb(cbopaque, u2s((((uint64_t)1U) << sv), 10, s));
write_cb(cbopaque, " (2^");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, ")\n");
malloc_cprintf(write_cb, cbopaque,
"Average profile sample interval: %"PRIu64
" (2^%zu)\n", (((uint64_t)1U) << sv), sv);
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
write_cb(cbopaque, "Average profile dump interval: ");
if (ssv >= 0) {
write_cb(cbopaque, u2s((((uint64_t)1U) << ssv),
10, s));
write_cb(cbopaque, " (2^");
write_cb(cbopaque, u2s(ssv, 10, s));
write_cb(cbopaque, ")\n");
} else
write_cb(cbopaque, "N/A\n");
malloc_cprintf(write_cb, cbopaque,
"Average profile dump interval: %"PRIu64
" (2^%zd)\n",
(((uint64_t)1U) << ssv), ssv);
} else {
malloc_cprintf(write_cb, cbopaque,
"Average profile dump interval: N/A\n");
}
}
CTL_GET("arenas.chunksize", &sv, size_t);
write_cb(cbopaque, "Chunk size: ");
write_cb(cbopaque, u2s(sv, 10, s));
CTL_GET("opt.lg_chunk", &sv, size_t);
write_cb(cbopaque, " (2^");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, ")\n");
malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
(ZU(1) << sv), sv);
}
#ifdef JEMALLOC_STATS
{
int err;
size_t sszp, ssz;
if (config_stats) {
size_t *cactive;
size_t allocated, active, mapped;
size_t chunks_current, chunks_high, swap_avail;
size_t chunks_current, chunks_high;
uint64_t chunks_total;
size_t huge_allocated;
uint64_t huge_nmalloc, huge_ndalloc;
sszp = sizeof(size_t *);
ssz = sizeof(size_t);
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t);
@ -702,24 +470,10 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
CTL_GET("stats.chunks.high", &chunks_high, size_t);
CTL_GET("stats.chunks.current", &chunks_current, size_t);
if ((err = JEMALLOC_P(mallctl)("swap.avail", &swap_avail, &ssz,
NULL, 0)) == 0) {
size_t lg_chunk;
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
"highchunks curchunks swap_avail\n");
CTL_GET("opt.lg_chunk", &lg_chunk, size_t);
malloc_cprintf(write_cb, cbopaque,
" %13"PRIu64"%13zu%13zu%13zu\n",
chunks_total, chunks_high, chunks_current,
swap_avail << lg_chunk);
} else {
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
"highchunks curchunks\n");
malloc_cprintf(write_cb, cbopaque,
" %13"PRIu64"%13zu%13zu\n",
chunks_total, chunks_high, chunks_current);
}
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
"highchunks curchunks\n");
malloc_cprintf(write_cb, cbopaque, " %13"PRIu64"%13zu%13zu\n",
chunks_total, chunks_high, chunks_current);
/* Print huge stats. */
CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
@ -736,11 +490,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("arenas.narenas", &narenas, unsigned);
{
bool initialized[narenas];
VARIABLE_ARRAY(bool, initialized, narenas);
size_t isz;
unsigned i, ninitialized;
isz = sizeof(initialized);
isz = sizeof(bool) * narenas;
xmallctl("arenas.initialized", initialized,
&isz, NULL, 0);
for (i = ninitialized = 0; i < narenas; i++) {
@ -753,7 +507,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
stats_arena_print(write_cb, cbopaque,
narenas);
narenas, bins, large);
}
}
}
@ -765,11 +519,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("arenas.narenas", &narenas, unsigned);
{
bool initialized[narenas];
VARIABLE_ARRAY(bool, initialized, narenas);
size_t isz;
unsigned i;
isz = sizeof(initialized);
isz = sizeof(bool) * narenas;
xmallctl("arenas.initialized", initialized,
&isz, NULL, 0);
@ -779,12 +533,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
cbopaque,
"\narenas[%u]:\n", i);
stats_arena_print(write_cb,
cbopaque, i);
cbopaque, i, bins, large);
}
}
}
}
}
#endif /* #ifdef JEMALLOC_STATS */
write_cb(cbopaque, "--- End jemalloc statistics ---\n");
malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
}

View File

@ -1,70 +1,92 @@
#define JEMALLOC_TCACHE_C_
#include "jemalloc/internal/jemalloc_internal.h"
#ifdef JEMALLOC_TCACHE
/******************************************************************************/
/* Data. */
malloc_tsd_data(, tcache, tcache_t *, NULL)
malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
bool opt_tcache = true;
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
ssize_t opt_lg_tcache_gc_sweep = LG_TCACHE_GC_SWEEP_DEFAULT;
tcache_bin_info_t *tcache_bin_info;
static unsigned stack_nelms; /* Total stack elms per tcache. */
/* Map of thread-specific caches. */
#ifndef NO_TLS
__thread tcache_t *tcache_tls JEMALLOC_ATTR(tls_model("initial-exec"));
#endif
/*
* Same contents as tcache, but initialized such that the TSD destructor is
* called when a thread exits, so that the cache can be cleaned up.
*/
pthread_key_t tcache_tsd;
size_t nhbins;
size_t tcache_maxclass;
unsigned tcache_gc_incr;
size_t nhbins;
size_t tcache_maxclass;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void tcache_thread_cleanup(void *arg);
size_t tcache_salloc(const void *ptr)
{
/******************************************************************************/
return (arena_salloc(ptr, false));
}
void
tcache_event_hard(tcache_t *tcache)
{
size_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
if (tbin->low_water > 0) {
/*
* Flush (ceiling) 3/4 of the objects below the low water mark.
*/
if (binind < NBINS) {
tcache_bin_flush_small(tbin, binind, tbin->ncached -
tbin->low_water + (tbin->low_water >> 2), tcache);
} else {
tcache_bin_flush_large(tbin, binind, tbin->ncached -
tbin->low_water + (tbin->low_water >> 2), tcache);
}
/*
* Reduce fill count by 2X. Limit lg_fill_div such that the
* fill count is always at least 1.
*/
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
tbin->lg_fill_div++;
} else if (tbin->low_water < 0) {
/*
* Increase fill count by 2X. Make sure lg_fill_div stays
* greater than 0.
*/
if (tbin->lg_fill_div > 1)
tbin->lg_fill_div--;
}
tbin->low_water = tbin->ncached;
tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins)
tcache->next_gc_bin = 0;
tcache->ev_cnt = 0;
}
void *
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
{
void *ret;
arena_tcache_fill_small(tcache->arena, tbin, binind
#ifdef JEMALLOC_PROF
, tcache->prof_accumbytes
#endif
);
#ifdef JEMALLOC_PROF
tcache->prof_accumbytes = 0;
#endif
arena_tcache_fill_small(tcache->arena, tbin, binind,
config_prof ? tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
ret = tcache_alloc_easy(tbin);
return (ret);
}
void
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache_t *tcache
#endif
)
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache)
{
void *ptr;
unsigned i, nflush, ndeferred;
#ifdef JEMALLOC_STATS
bool merged_stats = false;
#endif
assert(binind < nbins);
assert(binind < NBINS);
assert(rem <= tbin->ncached);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
@ -74,25 +96,21 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
arena_t *arena = chunk->arena;
arena_bin_t *bin = &arena->bins[binind];
#ifdef JEMALLOC_PROF
if (arena == tcache->arena) {
if (config_prof && arena == tcache->arena) {
malloc_mutex_lock(&arena->lock);
arena_prof_accum(arena, tcache->prof_accumbytes);
malloc_mutex_unlock(&arena->lock);
tcache->prof_accumbytes = 0;
}
#endif
malloc_mutex_lock(&bin->lock);
#ifdef JEMALLOC_STATS
if (arena == tcache->arena) {
if (config_stats && arena == tcache->arena) {
assert(merged_stats == false);
merged_stats = true;
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
#endif
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
@ -100,10 +118,15 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> PAGE_SHIFT;
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm =
&chunk->map[pageind-map_bias];
arena_dalloc_bin(arena, chunk, ptr, mapelm);
arena_mapp_get(chunk, pageind);
if (config_fill && opt_junk) {
arena_alloc_junk_small(ptr,
&arena_bin_info[binind], true);
}
arena_dalloc_bin_locked(arena, chunk, ptr,
mapelm);
} else {
/*
* This object was allocated via a different
@ -117,8 +140,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
}
malloc_mutex_unlock(&bin->lock);
}
#ifdef JEMALLOC_STATS
if (merged_stats == false) {
if (config_stats && merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
@ -130,7 +152,6 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&bin->lock);
}
#endif
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
@ -140,17 +161,12 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
}
void
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache_t *tcache
#endif
)
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache)
{
void *ptr;
unsigned i, nflush, ndeferred;
#ifdef JEMALLOC_STATS
bool merged_stats = false;
#endif
assert(binind < nhbins);
assert(rem <= tbin->ncached);
@ -162,30 +178,28 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
arena_t *arena = chunk->arena;
malloc_mutex_lock(&arena->lock);
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
if (arena == tcache->arena) {
#endif
#ifdef JEMALLOC_PROF
arena_prof_accum(arena, tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
#endif
#ifdef JEMALLOC_STATS
merged_stats = true;
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - nbins].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
#endif
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
if ((config_prof || config_stats) && arena == tcache->arena) {
if (config_prof) {
arena_prof_accum(arena,
tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}
if (config_stats) {
merged_stats = true;
arena->stats.nrequests_large +=
tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
}
#endif
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena)
arena_dalloc_large(arena, chunk, ptr);
arena_dalloc_large_locked(arena, chunk, ptr);
else {
/*
* This object was allocated via a different
@ -199,8 +213,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
}
malloc_mutex_unlock(&arena->lock);
}
#ifdef JEMALLOC_STATS
if (merged_stats == false) {
if (config_stats && merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
@ -208,12 +221,11 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - nbins].nrequests +=
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&arena->lock);
}
#endif
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
@ -222,6 +234,33 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
tbin->low_water = tbin->ncached;
}
void
tcache_arena_associate(tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(&arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&arena->lock);
}
tcache->arena = arena;
}
void
tcache_arena_dissociate(tcache_t *tcache)
{
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(&tcache->arena->lock);
ql_remove(&tcache->arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&tcache->arena->lock);
tcache_stats_merge(tcache, tcache->arena);
}
}
tcache_t *
tcache_create(arena_t *arena)
{
@ -244,7 +283,7 @@ tcache_create(arena_t *arena)
*/
size = (size + CACHELINE_MASK) & (-CACHELINE);
if (size <= small_maxclass)
if (size <= SMALL_MAXCLASS)
tcache = (tcache_t *)arena_malloc_small(arena, size, true);
else if (size <= tcache_maxclass)
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
@ -254,15 +293,8 @@ tcache_create(arena_t *arena)
if (tcache == NULL)
return (NULL);
#ifdef JEMALLOC_STATS
/* Link into list of extant tcaches. */
malloc_mutex_lock(&arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&arena->lock);
#endif
tcache_arena_associate(tcache, arena);
tcache->arena = arena;
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
for (i = 0; i < nhbins; i++) {
tcache->tbins[i].lg_fill_div = 1;
@ -271,7 +303,7 @@ tcache_create(arena_t *arena)
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
}
TCACHE_SET(tcache);
tcache_tsd_set(&tcache);
return (tcache);
}
@ -282,121 +314,96 @@ tcache_destroy(tcache_t *tcache)
unsigned i;
size_t tcache_size;
#ifdef JEMALLOC_STATS
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(&tcache->arena->lock);
ql_remove(&tcache->arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&tcache->arena->lock);
tcache_stats_merge(tcache, tcache->arena);
#endif
tcache_arena_dissociate(tcache);
for (i = 0; i < nbins; i++) {
for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_small(tbin, i, 0
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
tcache_bin_flush_small(tbin, i, 0, tcache);
#ifdef JEMALLOC_STATS
if (tbin->tstats.nrequests != 0) {
if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
}
#endif
}
for (; i < nhbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_large(tbin, i, 0
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
tcache_bin_flush_large(tbin, i, 0, tcache);
#ifdef JEMALLOC_STATS
if (tbin->tstats.nrequests != 0) {
if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - nbins].nrequests +=
arena->stats.lstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
malloc_mutex_unlock(&arena->lock);
}
#endif
}
#ifdef JEMALLOC_PROF
if (tcache->prof_accumbytes > 0) {
if (config_prof && tcache->prof_accumbytes > 0) {
malloc_mutex_lock(&tcache->arena->lock);
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
malloc_mutex_unlock(&tcache->arena->lock);
}
#endif
tcache_size = arena_salloc(tcache);
if (tcache_size <= small_maxclass) {
tcache_size = arena_salloc(tcache, false);
if (tcache_size <= SMALL_MAXCLASS) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
PAGE_SHIFT;
arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapelm->bits >> PAGE_SHIFT)) <<
PAGE_SHIFT));
arena_bin_t *bin = run->bin;
LG_PAGE;
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
malloc_mutex_lock(&bin->lock);
arena_dalloc_bin(arena, chunk, tcache, mapelm);
malloc_mutex_unlock(&bin->lock);
arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
} else if (tcache_size <= tcache_maxclass) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;
malloc_mutex_lock(&arena->lock);
arena_dalloc_large(arena, chunk, tcache);
malloc_mutex_unlock(&arena->lock);
} else
idalloc(tcache);
}
static void
void
tcache_thread_cleanup(void *arg)
{
tcache_t *tcache = (tcache_t *)arg;
tcache_t *tcache = *(tcache_t **)arg;
if (tcache == (void *)(uintptr_t)1) {
/*
* The previous time this destructor was called, we set the key
* to 1 so that other destructors wouldn't cause re-creation of
* the tcache. This time, do nothing, so that the destructor
* will not be called again.
*/
} else if (tcache == (void *)(uintptr_t)2) {
if (tcache == TCACHE_STATE_DISABLED) {
/* Do nothing. */
} else if (tcache == TCACHE_STATE_REINCARNATED) {
/*
* Another destructor called an allocator function after this
* destructor was called. Reset tcache to 1 in order to
* receive another callback.
* destructor was called. Reset tcache to
* TCACHE_STATE_PURGATORY in order to receive another callback.
*/
tcache = TCACHE_STATE_PURGATORY;
tcache_tsd_set(&tcache);
} else if (tcache == TCACHE_STATE_PURGATORY) {
/*
* The previous time this destructor was called, we set the key
* to TCACHE_STATE_PURGATORY so that other destructors wouldn't
* cause re-creation of the tcache. This time, do nothing, so
* that the destructor will not be called again.
*/
TCACHE_SET((uintptr_t)1);
} else if (tcache != NULL) {
assert(tcache != (void *)(uintptr_t)1);
assert(tcache != TCACHE_STATE_PURGATORY);
tcache_destroy(tcache);
TCACHE_SET((uintptr_t)1);
tcache = TCACHE_STATE_PURGATORY;
tcache_tsd_set(&tcache);
}
}
#ifdef JEMALLOC_STATS
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{
unsigned i;
/* Merge and reset tcache stats. */
for (i = 0; i < nbins; i++) {
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i];
malloc_mutex_lock(&bin->lock);
@ -406,75 +413,62 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
}
for (; i < nhbins; i++) {
malloc_large_stats_t *lstats = &arena->stats.lstats[i - nbins];
malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
tcache_bin_t *tbin = &tcache->tbins[i];
arena->stats.nrequests_large += tbin->tstats.nrequests;
lstats->nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
}
#endif
bool
tcache_boot(void)
tcache_boot0(void)
{
unsigned i;
if (opt_tcache) {
unsigned i;
/*
* If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
* known.
*/
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
else if ((1U << opt_lg_tcache_max) > arena_maxclass)
tcache_maxclass = arena_maxclass;
else
tcache_maxclass = (1U << opt_lg_tcache_max);
/*
* If necessary, clamp opt_lg_tcache_max, now that
* small_maxclass and arena_maxclass are known.
*/
if (opt_lg_tcache_max < 0 || (1U <<
opt_lg_tcache_max) < small_maxclass)
tcache_maxclass = small_maxclass;
else if ((1U << opt_lg_tcache_max) > arena_maxclass)
tcache_maxclass = arena_maxclass;
else
tcache_maxclass = (1U << opt_lg_tcache_max);
nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
nhbins = nbins + (tcache_maxclass >> PAGE_SHIFT);
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL)
return (true);
stack_nelms = 0;
for (i = 0; i < nbins; i++) {
if ((arena_bin_info[i].nregs << 1) <=
TCACHE_NSLOTS_SMALL_MAX) {
tcache_bin_info[i].ncached_max =
(arena_bin_info[i].nregs << 1);
} else {
tcache_bin_info[i].ncached_max =
TCACHE_NSLOTS_SMALL_MAX;
}
stack_nelms += tcache_bin_info[i].ncached_max;
}
for (; i < nhbins; i++) {
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
stack_nelms += tcache_bin_info[i].ncached_max;
}
/* Compute incremental GC event threshold. */
if (opt_lg_tcache_gc_sweep >= 0) {
tcache_gc_incr = ((1U << opt_lg_tcache_gc_sweep) /
nbins) + (((1U << opt_lg_tcache_gc_sweep) % nbins ==
0) ? 0 : 1);
} else
tcache_gc_incr = 0;
if (pthread_key_create(&tcache_tsd, tcache_thread_cleanup) !=
0) {
malloc_write(
"<jemalloc>: Error in pthread_key_create()\n");
abort();
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL)
return (true);
stack_nelms = 0;
for (i = 0; i < NBINS; i++) {
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
tcache_bin_info[i].ncached_max =
(arena_bin_info[i].nregs << 1);
} else {
tcache_bin_info[i].ncached_max =
TCACHE_NSLOTS_SMALL_MAX;
}
stack_nelms += tcache_bin_info[i].ncached_max;
}
for (; i < nhbins; i++) {
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
stack_nelms += tcache_bin_info[i].ncached_max;
}
return (false);
}
/******************************************************************************/
#endif /* JEMALLOC_TCACHE */
bool
tcache_boot1(void)
{
if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
return (true);
return (false);
}

107
deps/jemalloc/src/tsd.c vendored Normal file
View File

@ -0,0 +1,107 @@
#define JEMALLOC_TSD_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
static unsigned ncleanups;
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
/******************************************************************************/
void *
malloc_tsd_malloc(size_t size)
{
/* Avoid choose_arena() in order to dodge bootstrapping issues. */
return (arena_malloc(arenas[0], size, false, false));
}
void
malloc_tsd_dalloc(void *wrapper)
{
idalloc(wrapper);
}
void
malloc_tsd_no_cleanup(void *arg)
{
not_reached();
}
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
#ifndef _WIN32
JEMALLOC_EXPORT
#endif
void
_malloc_thread_cleanup(void)
{
bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
unsigned i;
for (i = 0; i < ncleanups; i++)
pending[i] = true;
do {
again = false;
for (i = 0; i < ncleanups; i++) {
if (pending[i]) {
pending[i] = cleanups[i]();
if (pending[i])
again = true;
}
}
} while (again);
}
#endif
void
malloc_tsd_cleanup_register(bool (*f)(void))
{
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
cleanups[ncleanups] = f;
ncleanups++;
}
void
malloc_tsd_boot(void)
{
ncleanups = 0;
}
#ifdef _WIN32
static BOOL WINAPI
_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
{
switch (fdwReason) {
#ifdef JEMALLOC_LAZY_LOCK
case DLL_THREAD_ATTACH:
isthreaded = true;
break;
#endif
case DLL_THREAD_DETACH:
_malloc_thread_cleanup();
break;
default:
break;
}
return (true);
}
#ifdef _MSC_VER
# ifdef _M_IX86
# pragma comment(linker, "/INCLUDE:__tls_used")
# else
# pragma comment(linker, "/INCLUDE:_tls_used")
# endif
# pragma section(".CRT$XLY",long,read)
#endif
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif

646
deps/jemalloc/src/util.c vendored Normal file
View File

@ -0,0 +1,646 @@
#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_write("<jemalloc>: Failed assertion\n"); \
abort(); \
} \
} while (0)
#define not_reached() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \
} \
} while (0)
#define not_implemented() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Not implemented\n"); \
abort(); \
} \
} while (0)
#define JEMALLOC_UTIL_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void wrtmessage(void *cbopaque, const char *s);
#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
size_t *slen_p);
#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
size_t *slen_p);
/******************************************************************************/
/* malloc_message() setup. */
static void
wrtmessage(void *cbopaque, const char *s)
{
#ifdef SYS_write
/*
* Use syscall(2) rather than write(2) when possible in order to avoid
* the possibility of memory allocation within libc. This is necessary
* on FreeBSD; most operating systems do not have this problem though.
*/
UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
#else
UNUSED int result = write(STDERR_FILENO, s, strlen(s));
#endif
}
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
/*
* Wrapper around malloc_message() that avoids the need for
* je_malloc_message(...) throughout the code.
*/
void
malloc_write(const char *s)
{
if (je_malloc_message != NULL)
je_malloc_message(NULL, s);
else
wrtmessage(NULL, s);
}
/*
* glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
* provide a wrapper.
*/
int
buferror(char *buf, size_t buflen)
{
#ifdef _WIN32
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0,
(LPSTR)buf, buflen, NULL);
return (0);
#elif defined(_GNU_SOURCE)
char *b = strerror_r(errno, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);
buf[buflen-1] = '\0';
}
return (0);
#else
return (strerror_r(errno, buf, buflen));
#endif
}
uintmax_t
malloc_strtoumax(const char *nptr, char **endptr, int base)
{
uintmax_t ret, digit;
int b;
bool neg;
const char *p, *ns;
if (base < 0 || base == 1 || base > 36) {
set_errno(EINVAL);
return (UINTMAX_MAX);
}
b = base;
/* Swallow leading whitespace and get sign, if any. */
neg = false;
p = nptr;
while (true) {
switch (*p) {
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
p++;
break;
case '-':
neg = true;
/* Fall through. */
case '+':
p++;
/* Fall through. */
default:
goto label_prefix;
}
}
/* Get prefix, if any. */
label_prefix:
/*
* Note where the first non-whitespace/sign character is so that it is
* possible to tell whether any digits are consumed (e.g., " 0" vs.
* " -x").
*/
ns = p;
if (*p == '0') {
switch (p[1]) {
case '0': case '1': case '2': case '3': case '4': case '5':
case '6': case '7':
if (b == 0)
b = 8;
if (b == 8)
p++;
break;
case 'x':
switch (p[2]) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
case 'A': case 'B': case 'C': case 'D': case 'E':
case 'F':
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'f':
if (b == 0)
b = 16;
if (b == 16)
p += 2;
break;
default:
break;
}
break;
default:
break;
}
}
if (b == 0)
b = 10;
/* Convert. */
ret = 0;
while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
|| (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
|| (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
uintmax_t pret = ret;
ret *= b;
ret += digit;
if (ret < pret) {
/* Overflow. */
set_errno(ERANGE);
return (UINTMAX_MAX);
}
p++;
}
if (neg)
ret = -ret;
if (endptr != NULL) {
if (p == ns) {
/* No characters were converted. */
*endptr = (char *)nptr;
} else
*endptr = (char *)p;
}
return (ret);
}
static char *
u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
{
unsigned i;
i = U2S_BUFSIZE - 1;
s[i] = '\0';
switch (base) {
case 10:
do {
i--;
s[i] = "0123456789"[x % (uint64_t)10];
x /= (uint64_t)10;
} while (x > 0);
break;
case 16: {
const char *digits = (uppercase)
? "0123456789ABCDEF"
: "0123456789abcdef";
do {
i--;
s[i] = digits[x & 0xf];
x >>= 4;
} while (x > 0);
break;
} default: {
const char *digits = (uppercase)
? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
: "0123456789abcdefghijklmnopqrstuvwxyz";
assert(base >= 2 && base <= 36);
do {
i--;
s[i] = digits[x % (uint64_t)base];
x /= (uint64_t)base;
} while (x > 0);
}}
*slen_p = U2S_BUFSIZE - 1 - i;
return (&s[i]);
}
static char *
d2s(intmax_t x, char sign, char *s, size_t *slen_p)
{
bool neg;
if ((neg = (x < 0)))
x = -x;
s = u2s(x, 10, false, s, slen_p);
if (neg)
sign = '-';
switch (sign) {
case '-':
if (neg == false)
break;
/* Fall through. */
case ' ':
case '+':
s--;
(*slen_p)++;
*s = sign;
break;
default: not_reached();
}
return (s);
}
static char *
o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
{
s = u2s(x, 8, false, s, slen_p);
if (alt_form && *s != '0') {
s--;
(*slen_p)++;
*s = '0';
}
return (s);
}
static char *
x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
{
s = u2s(x, 16, uppercase, s, slen_p);
if (alt_form) {
s -= 2;
(*slen_p) += 2;
memcpy(s, uppercase ? "0X" : "0x", 2);
}
return (s);
}
int
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
{
int ret;
size_t i;
const char *f;
#define APPEND_C(c) do { \
if (i < size) \
str[i] = (c); \
i++; \
} while (0)
#define APPEND_S(s, slen) do { \
if (i < size) { \
size_t cpylen = (slen <= size - i) ? slen : size - i; \
memcpy(&str[i], s, cpylen); \
} \
i += slen; \
} while (0)
#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
/* Left padding. */ \
size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
(size_t)width - slen : 0); \
if (left_justify == false && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) \
APPEND_C(' '); \
} \
/* Value. */ \
APPEND_S(s, slen); \
/* Right padding. */ \
if (left_justify && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) \
APPEND_C(' '); \
} \
} while (0)
#define GET_ARG_NUMERIC(val, len) do { \
switch (len) { \
case '?': \
val = va_arg(ap, int); \
break; \
case '?' | 0x80: \
val = va_arg(ap, unsigned int); \
break; \
case 'l': \
val = va_arg(ap, long); \
break; \
case 'l' | 0x80: \
val = va_arg(ap, unsigned long); \
break; \
case 'q': \
val = va_arg(ap, long long); \
break; \
case 'q' | 0x80: \
val = va_arg(ap, unsigned long long); \
break; \
case 'j': \
val = va_arg(ap, intmax_t); \
break; \
case 't': \
val = va_arg(ap, ptrdiff_t); \
break; \
case 'z': \
val = va_arg(ap, ssize_t); \
break; \
case 'z' | 0x80: \
val = va_arg(ap, size_t); \
break; \
case 'p': /* Synthetic; used for %p. */ \
val = va_arg(ap, uintptr_t); \
break; \
default: not_reached(); \
} \
} while (0)
i = 0;
f = format;
while (true) {
switch (*f) {
case '\0': goto label_out;
case '%': {
bool alt_form = false;
bool zero_pad = false;
bool left_justify = false;
bool plus_space = false;
bool plus_plus = false;
int prec = -1;
int width = -1;
unsigned char len = '?';
f++;
if (*f == '%') {
/* %% */
APPEND_C(*f);
break;
}
/* Flags. */
while (true) {
switch (*f) {
case '#':
assert(alt_form == false);
alt_form = true;
break;
case '0':
assert(zero_pad == false);
zero_pad = true;
break;
case '-':
assert(left_justify == false);
left_justify = true;
break;
case ' ':
assert(plus_space == false);
plus_space = true;
break;
case '+':
assert(plus_plus == false);
plus_plus = true;
break;
default: goto label_width;
}
f++;
}
/* Width. */
label_width:
switch (*f) {
case '*':
width = va_arg(ap, int);
f++;
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
uintmax_t uwidth;
set_errno(0);
uwidth = malloc_strtoumax(f, (char **)&f, 10);
assert(uwidth != UINTMAX_MAX || get_errno() !=
ERANGE);
width = (int)uwidth;
if (*f == '.') {
f++;
goto label_precision;
} else
goto label_length;
break;
} case '.':
f++;
goto label_precision;
default: goto label_length;
}
/* Precision. */
label_precision:
switch (*f) {
case '*':
prec = va_arg(ap, int);
f++;
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
uintmax_t uprec;
set_errno(0);
uprec = malloc_strtoumax(f, (char **)&f, 10);
assert(uprec != UINTMAX_MAX || get_errno() !=
ERANGE);
prec = (int)uprec;
break;
}
default: break;
}
/* Length. */
label_length:
switch (*f) {
case 'l':
f++;
if (*f == 'l') {
len = 'q';
f++;
} else
len = 'l';
break;
case 'j':
len = 'j';
f++;
break;
case 't':
len = 't';
f++;
break;
case 'z':
len = 'z';
f++;
break;
default: break;
}
/* Conversion specifier. */
switch (*f) {
char *s;
size_t slen;
case 'd': case 'i': {
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[D2S_BUFSIZE];
GET_ARG_NUMERIC(val, len);
s = d2s(val, (plus_plus ? '+' : (plus_space ?
' ' : '-')), buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'o': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[O2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = o2s(val, alt_form, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'u': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[U2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = u2s(val, 10, false, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'x': case 'X': {
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[X2S_BUFSIZE];
GET_ARG_NUMERIC(val, len | 0x80);
s = x2s(val, alt_form, *f == 'X', buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
} case 'c': {
unsigned char val;
char buf[2];
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
val = va_arg(ap, int);
buf[0] = val;
buf[1] = '\0';
APPEND_PADDED_S(buf, 1, width, left_justify);
f++;
break;
} case 's':
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
s = va_arg(ap, char *);
slen = (prec == -1) ? strlen(s) : prec;
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
case 'p': {
uintmax_t val;
char buf[X2S_BUFSIZE];
GET_ARG_NUMERIC(val, 'p');
s = x2s(val, true, false, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
}
default: not_implemented();
}
break;
} default: {
APPEND_C(*f);
f++;
break;
}}
}
label_out:
if (i < size)
str[i] = '\0';
else
str[size - 1] = '\0';
ret = i;
#undef APPEND_C
#undef APPEND_S
#undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC
return (ret);
}
JEMALLOC_ATTR(format(printf, 3, 4))
int
malloc_snprintf(char *str, size_t size, const char *format, ...)
{
int ret;
va_list ap;
va_start(ap, format);
ret = malloc_vsnprintf(str, size, format, ap);
va_end(ap);
return (ret);
}
void
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap)
{
char buf[MALLOC_PRINTF_BUFSIZE];
if (write_cb == NULL) {
/*
* The caller did not provide an alternate write_cb callback
* function, so use the default one. malloc_write() is an
* inline function, so use malloc_message() directly here.
*/
write_cb = (je_malloc_message != NULL) ? je_malloc_message :
wrtmessage;
cbopaque = NULL;
}
malloc_vsnprintf(buf, sizeof(buf), format, ap);
write_cb(cbopaque, buf);
}
/*
* Print to a callback function in such a way as to (hopefully) avoid memory
* allocation.
*/
JEMALLOC_ATTR(format(printf, 3, 4))
void
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(write_cb, cbopaque, format, ap);
va_end(ap);
}
/* Print to stderr in such a way as to avoid memory allocation. */
JEMALLOC_ATTR(format(printf, 1, 2))
void
malloc_printf(const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
}

View File

@ -3,11 +3,18 @@
# error "This source file is for zones on Darwin (OS X)."
#endif
/*
* The malloc_default_purgeable_zone function is only available on >= 10.6.
* We need to check whether it is present at runtime, thus the weak_import.
*/
extern malloc_zone_t *malloc_default_purgeable_zone(void)
JEMALLOC_ATTR(weak_import);
/******************************************************************************/
/* Data. */
static malloc_zone_t zone, szone;
static struct malloc_introspection_t zone_introspect, ozone_introspect;
static malloc_zone_t zone;
static struct malloc_introspection_t zone_introspect;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
@ -18,8 +25,10 @@ static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
static void *zone_valloc(malloc_zone_t *zone, size_t size);
static void zone_free(malloc_zone_t *zone, void *ptr);
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
#if (JEMALLOC_ZONE_VERSION >= 6)
#if (JEMALLOC_ZONE_VERSION >= 5)
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
size_t size);
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
size_t size);
@ -28,19 +37,6 @@ static void *zone_destroy(malloc_zone_t *zone);
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
static void zone_force_lock(malloc_zone_t *zone);
static void zone_force_unlock(malloc_zone_t *zone);
static size_t ozone_size(malloc_zone_t *zone, void *ptr);
static void ozone_free(malloc_zone_t *zone, void *ptr);
static void *ozone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
static unsigned ozone_batch_malloc(malloc_zone_t *zone, size_t size,
void **results, unsigned num_requested);
static void ozone_batch_free(malloc_zone_t *zone, void **to_be_freed,
unsigned num);
#if (JEMALLOC_ZONE_VERSION >= 6)
static void ozone_free_definite_size(malloc_zone_t *zone, void *ptr,
size_t size);
#endif
static void ozone_force_lock(malloc_zone_t *zone);
static void ozone_force_unlock(malloc_zone_t *zone);
/******************************************************************************/
/*
@ -60,21 +56,21 @@ zone_size(malloc_zone_t *zone, void *ptr)
* not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size.
*/
return (ivsalloc(ptr));
return (ivsalloc(ptr, config_prof));
}
static void *
zone_malloc(malloc_zone_t *zone, size_t size)
{
return (JEMALLOC_P(malloc)(size));
return (je_malloc(size));
}
static void *
zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
{
return (JEMALLOC_P(calloc)(num, size));
return (je_calloc(num, size));
}
static void *
@ -82,7 +78,7 @@ zone_valloc(malloc_zone_t *zone, size_t size)
{
void *ret = NULL; /* Assignment avoids useless compiler warning. */
JEMALLOC_P(posix_memalign)(&ret, PAGE_SIZE, size);
je_posix_memalign(&ret, PAGE, size);
return (ret);
}
@ -91,33 +87,48 @@ static void
zone_free(malloc_zone_t *zone, void *ptr)
{
JEMALLOC_P(free)(ptr);
if (ivsalloc(ptr, config_prof) != 0) {
je_free(ptr);
return;
}
free(ptr);
}
static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
return (JEMALLOC_P(realloc)(ptr, size));
if (ivsalloc(ptr, config_prof) != 0)
return (je_realloc(ptr, size));
return (realloc(ptr, size));
}
#if (JEMALLOC_ZONE_VERSION >= 6)
#if (JEMALLOC_ZONE_VERSION >= 5)
static void *
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
{
void *ret = NULL; /* Assignment avoids useless compiler warning. */
JEMALLOC_P(posix_memalign)(&ret, alignment, size);
je_posix_memalign(&ret, alignment, size);
return (ret);
}
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
assert(ivsalloc(ptr) == size);
JEMALLOC_P(free)(ptr);
if (ivsalloc(ptr, config_prof) != 0) {
assert(ivsalloc(ptr, config_prof) == size);
je_free(ptr);
return;
}
free(ptr);
}
#endif
@ -133,22 +144,10 @@ zone_destroy(malloc_zone_t *zone)
static size_t
zone_good_size(malloc_zone_t *zone, size_t size)
{
size_t ret;
void *p;
/*
* Actually create an object of the appropriate size, then find out
* how large it could have been without moving up to the next size
* class.
*/
p = JEMALLOC_P(malloc)(size);
if (p != NULL) {
ret = isalloc(p);
JEMALLOC_P(free)(p);
} else
ret = size;
return (ret);
if (size == 0)
size = 1;
return (s2u(size));
}
static void
@ -164,11 +163,12 @@ zone_force_unlock(malloc_zone_t *zone)
{
if (isthreaded)
jemalloc_postfork();
jemalloc_postfork_parent();
}
malloc_zone_t *
create_zone(void)
JEMALLOC_ATTR(constructor)
void
register_zone(void)
{
zone.size = (void *)zone_size;
@ -183,10 +183,15 @@ create_zone(void)
zone.batch_free = NULL;
zone.introspect = &zone_introspect;
zone.version = JEMALLOC_ZONE_VERSION;
#if (JEMALLOC_ZONE_VERSION >= 6)
#if (JEMALLOC_ZONE_VERSION >= 5)
zone.memalign = zone_memalign;
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
zone.free_definite_size = zone_free_definite_size;
#endif
#if (JEMALLOC_ZONE_VERSION >= 8)
zone.pressure_relief = NULL;
#endif
zone_introspect.enumerator = NULL;
zone_introspect.good_size = (void *)zone_good_size;
@ -199,156 +204,45 @@ create_zone(void)
#if (JEMALLOC_ZONE_VERSION >= 6)
zone_introspect.zone_locked = NULL;
#endif
return (&zone);
}
static size_t
ozone_size(malloc_zone_t *zone, void *ptr)
{
size_t ret;
ret = ivsalloc(ptr);
if (ret == 0)
ret = szone.size(zone, ptr);
return (ret);
}
static void
ozone_free(malloc_zone_t *zone, void *ptr)
{
if (ivsalloc(ptr) != 0)
JEMALLOC_P(free)(ptr);
else {
size_t size = szone.size(zone, ptr);
if (size != 0)
(szone.free)(zone, ptr);
}
}
static void *
ozone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
size_t oldsize;
if (ptr == NULL)
return (JEMALLOC_P(malloc)(size));
oldsize = ivsalloc(ptr);
if (oldsize != 0)
return (JEMALLOC_P(realloc)(ptr, size));
else {
oldsize = szone.size(zone, ptr);
if (oldsize == 0)
return (JEMALLOC_P(malloc)(size));
else {
void *ret = JEMALLOC_P(malloc)(size);
if (ret != NULL) {
memcpy(ret, ptr, (oldsize < size) ? oldsize :
size);
(szone.free)(zone, ptr);
}
return (ret);
}
}
}
static unsigned
ozone_batch_malloc(malloc_zone_t *zone, size_t size, void **results,
unsigned num_requested)
{
/* Don't bother implementing this interface, since it isn't required. */
return (0);
}
static void
ozone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num)
{
unsigned i;
for (i = 0; i < num; i++)
ozone_free(zone, to_be_freed[i]);
}
#if (JEMALLOC_ZONE_VERSION >= 6)
static void
ozone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
if (ivsalloc(ptr) != 0) {
assert(ivsalloc(ptr) == size);
JEMALLOC_P(free)(ptr);
} else {
assert(size == szone.size(zone, ptr));
szone.free_definite_size(zone, ptr, size);
}
}
#if (JEMALLOC_ZONE_VERSION >= 7)
zone_introspect.enable_discharge_checking = NULL;
zone_introspect.disable_discharge_checking = NULL;
zone_introspect.discharge = NULL;
#ifdef __BLOCKS__
zone_introspect.enumerate_discharged_pointers = NULL;
#else
zone_introspect.enumerate_unavailable_without_blocks = NULL;
#endif
#endif
static void
ozone_force_lock(malloc_zone_t *zone)
{
/* jemalloc locking is taken care of by the normal jemalloc zone. */
szone.introspect->force_lock(zone);
}
static void
ozone_force_unlock(malloc_zone_t *zone)
{
/* jemalloc locking is taken care of by the normal jemalloc zone. */
szone.introspect->force_unlock(zone);
}
/*
* Overlay the default scalable zone (szone) such that existing allocations are
* drained, and further allocations come from jemalloc. This is necessary
* because Core Foundation directly accesses and uses the szone before the
* jemalloc library is even loaded.
*/
void
szone2ozone(malloc_zone_t *zone)
{
/*
* Stash a copy of the original szone so that we can call its
* functions as needed. Note that the internally, the szone stores its
* bookkeeping data structures immediately following the malloc_zone_t
* header, so when calling szone functions, we need to pass a pointer
* to the original zone structure.
* The default purgeable zone is created lazily by OSX's libc. It uses
* the default zone when it is created for "small" allocations
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
* obviously fails when the default zone is the jemalloc zone, so
* malloc_default_purgeable_zone is called beforehand so that the
* default purgeable zone is created when the default zone is still
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
* to check for the existence of malloc_default_purgeable_zone() at
* run time.
*/
memcpy(&szone, zone, sizeof(malloc_zone_t));
if (malloc_default_purgeable_zone != NULL)
malloc_default_purgeable_zone();
zone->size = (void *)ozone_size;
zone->malloc = (void *)zone_malloc;
zone->calloc = (void *)zone_calloc;
zone->valloc = (void *)zone_valloc;
zone->free = (void *)ozone_free;
zone->realloc = (void *)ozone_realloc;
zone->destroy = (void *)zone_destroy;
zone->zone_name = "jemalloc_ozone";
zone->batch_malloc = ozone_batch_malloc;
zone->batch_free = ozone_batch_free;
zone->introspect = &ozone_introspect;
zone->version = JEMALLOC_ZONE_VERSION;
#if (JEMALLOC_ZONE_VERSION >= 6)
zone->memalign = zone_memalign;
zone->free_definite_size = ozone_free_definite_size;
#endif
/* Register the custom zone. At this point it won't be the default. */
malloc_zone_register(&zone);
ozone_introspect.enumerator = NULL;
ozone_introspect.good_size = (void *)zone_good_size;
ozone_introspect.check = NULL;
ozone_introspect.print = NULL;
ozone_introspect.log = NULL;
ozone_introspect.force_lock = (void *)ozone_force_lock;
ozone_introspect.force_unlock = (void *)ozone_force_unlock;
ozone_introspect.statistics = NULL;
#if (JEMALLOC_ZONE_VERSION >= 6)
ozone_introspect.zone_locked = NULL;
#endif
/*
* Unregister and reregister the default zone. On OSX >= 10.6,
* unregistering takes the last registered zone and places it at the
* location of the specified zone. Unregistering the default zone thus
* makes the last registered one the default. On OSX < 10.6,
* unregistering shifts all registered zones. The first registered zone
* then becomes the default.
*/
do {
malloc_zone_t *default_zone = malloc_default_zone();
malloc_zone_unregister(default_zone);
malloc_zone_register(default_zone);
} while (malloc_default_zone() != &zone);
}