Jemalloc updated to 3.0.0.

Full changelog here:

http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git;a=blob_plain;f=ChangeLog;hb=master

Notable improvements from the point of view of Redis:

1) Bugfixing.
2) Support for Valgrind.
3) Support for OSX Lion, FreeBSD.
This commit is contained in:
antirez
2012-05-15 15:27:12 +02:00
parent 3c72c94aae
commit ad4c0b4117
157 changed files with 42924 additions and 9103 deletions

View File

@ -4,11 +4,9 @@
/******************************************************************************/
/* Data. */
#ifdef JEMALLOC_STATS
uint64_t huge_nmalloc;
uint64_t huge_ndalloc;
size_t huge_allocated;
#endif
malloc_mutex_t huge_mtx;
@ -19,10 +17,18 @@ static extent_tree_t huge;
void *
huge_malloc(size_t size, bool zero)
{
return (huge_palloc(size, chunksize, zero));
}
void *
huge_palloc(size_t size, size_t alignment, bool zero)
{
void *ret;
size_t csize;
extent_node_t *node;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
@ -37,7 +43,12 @@ huge_malloc(size_t size, bool zero)
if (node == NULL)
return (NULL);
ret = chunk_alloc(csize, false, &zero);
/*
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
ret = chunk_alloc(csize, alignment, false, &is_zeroed);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
@ -49,106 +60,19 @@ huge_malloc(size_t size, bool zero)
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_add(csize);
huge_nmalloc++;
huge_allocated += csize;
#endif
if (config_stats) {
stats_cactive_add(csize);
huge_nmalloc++;
huge_allocated += csize;
}
malloc_mutex_unlock(&huge_mtx);
#ifdef JEMALLOC_FILL
if (zero == false) {
if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, csize);
else if (opt_zero)
else if (opt_zero && is_zeroed == false)
memset(ret, 0, csize);
}
#endif
return (ret);
}
/* Only handles large allocations that require more than chunk alignment. */
void *
huge_palloc(size_t size, size_t alignment, bool zero)
{
void *ret;
size_t alloc_size, chunk_size, offset;
extent_node_t *node;
/*
* This allocation requires alignment that is even larger than chunk
* alignment. This means that huge_malloc() isn't good enough.
*
* Allocate almost twice as many chunks as are demanded by the size or
* alignment, in order to assure the alignment can be achieved, then
* unmap leading and trailing chunks.
*/
assert(alignment > chunksize);
chunk_size = CHUNK_CEILING(size);
if (size >= alignment)
alloc_size = chunk_size + alignment - chunksize;
else
alloc_size = (alignment << 1) - chunksize;
/* Allocate an extent node with which to track the chunk. */
node = base_node_alloc();
if (node == NULL)
return (NULL);
ret = chunk_alloc(alloc_size, false, &zero);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
}
offset = (uintptr_t)ret & (alignment - 1);
assert((offset & chunksize_mask) == 0);
assert(offset < alloc_size);
if (offset == 0) {
/* Trim trailing space. */
chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
- chunk_size, true);
} else {
size_t trailsize;
/* Trim leading space. */
chunk_dealloc(ret, alignment - offset, true);
ret = (void *)((uintptr_t)ret + (alignment - offset));
trailsize = alloc_size - (alignment - offset) - chunk_size;
if (trailsize != 0) {
/* Trim trailing space. */
assert(trailsize < alloc_size);
chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
trailsize, true);
}
}
/* Insert node into huge. */
node->addr = ret;
node->size = chunk_size;
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_add(chunk_size);
huge_nmalloc++;
huge_allocated += chunk_size;
#endif
malloc_mutex_unlock(&huge_mtx);
#ifdef JEMALLOC_FILL
if (zero == false) {
if (opt_junk)
memset(ret, 0xa5, chunk_size);
else if (opt_zero)
memset(ret, 0, chunk_size);
}
#endif
return (ret);
}
@ -164,12 +88,10 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
assert(CHUNK_CEILING(oldsize) == oldsize);
#ifdef JEMALLOC_FILL
if (opt_junk && size < oldsize) {
if (config_fill && opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0x5a,
oldsize - size);
}
#endif
return (ptr);
}
@ -218,20 +140,13 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
copysize = (size < oldsize) ? size : oldsize;
#ifdef JEMALLOC_MREMAP
/*
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
* source nor the destination are in swap or dss.
* source nor the destination are in dss.
*/
#ifdef JEMALLOC_MREMAP_FIXED
if (oldsize >= chunksize
# ifdef JEMALLOC_SWAP
&& (swap_enabled == false || (chunk_in_swap(ptr) == false &&
chunk_in_swap(ret) == false))
# endif
# ifdef JEMALLOC_DSS
&& chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
# endif
) {
if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
== false && chunk_in_dss(ret) == false))) {
size_t newsize = huge_salloc(ret);
/*
@ -253,10 +168,9 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
malloc_write("<jemalloc>: Error in mremap(): ");
malloc_write(buf);
malloc_write("\n");
buferror(buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in mremap(): %s\n",
buf);
if (opt_abort)
abort();
memcpy(ret, ptr, copysize);
@ -266,7 +180,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
#endif
{
memcpy(ret, ptr, copysize);
idalloc(ptr);
iqalloc(ptr);
}
return (ret);
}
@ -285,23 +199,16 @@ huge_dalloc(void *ptr, bool unmap)
assert(node->addr == ptr);
extent_tree_ad_remove(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_sub(node->size);
huge_ndalloc++;
huge_allocated -= node->size;
#endif
if (config_stats) {
stats_cactive_sub(node->size);
huge_ndalloc++;
huge_allocated -= node->size;
}
malloc_mutex_unlock(&huge_mtx);
if (unmap) {
/* Unmap chunk. */
#ifdef JEMALLOC_FILL
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
if (opt_junk)
memset(node->addr, 0x5a, node->size);
#endif
#endif
}
if (unmap && config_fill && config_dss && opt_junk)
memset(node->addr, 0x5a, node->size);
chunk_dealloc(node->addr, node->size, unmap);
@ -328,7 +235,6 @@ huge_salloc(const void *ptr)
return (ret);
}
#ifdef JEMALLOC_PROF
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
@ -365,7 +271,6 @@ huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
malloc_mutex_unlock(&huge_mtx);
}
#endif
bool
huge_boot(void)
@ -376,11 +281,32 @@ huge_boot(void)
return (true);
extent_tree_ad_new(&huge);
#ifdef JEMALLOC_STATS
huge_nmalloc = 0;
huge_ndalloc = 0;
huge_allocated = 0;
#endif
if (config_stats) {
huge_nmalloc = 0;
huge_ndalloc = 0;
huge_allocated = 0;
}
return (false);
}
void
huge_prefork(void)
{
malloc_mutex_prefork(&huge_mtx);
}
void
huge_postfork_parent(void)
{
malloc_mutex_postfork_parent(&huge_mtx);
}
void
huge_postfork_child(void)
{
malloc_mutex_postfork_child(&huge_mtx);
}