Update to jemalloc 2.2.5

This commit is contained in:
jbergstroem
2011-11-23 21:36:25 +01:00
committed by antirez
parent 7558b1fe4d
commit a9c2f0f28a
24 changed files with 537 additions and 204 deletions

View File

@ -50,7 +50,7 @@ extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t arena_maxclass; /* Max size class for arenas. */
void *chunk_alloc(size_t size, bool base, bool *zero);
void chunk_dealloc(void *chunk, size_t size);
void chunk_dealloc(void *chunk, size_t size, bool unmap);
bool chunk_boot(void);
#endif /* JEMALLOC_H_EXTERNS */

View File

@ -26,7 +26,7 @@ uint64_t hash(const void *key, size_t len, uint64_t seed);
JEMALLOC_INLINE uint64_t
hash(const void *key, size_t len, uint64_t seed)
{
const uint64_t m = 0xc6a4a7935bd1e995;
const uint64_t m = 0xc6a4a7935bd1e995LLU;
const int r = 47;
uint64_t h = seed ^ (len * m);
const uint64_t *data = (const uint64_t *)key;

View File

@ -33,6 +33,8 @@
#define JEMALLOC_MANGLE
#include "../jemalloc@install_suffix@.h"
#include "jemalloc/internal/private_namespace.h"
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
@ -633,7 +635,11 @@ ipalloc(size_t usize, size_t alignment, bool zero)
if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
ret = arena_malloc(usize, zero);
else {
size_t run_size = 0;
size_t run_size
#ifdef JEMALLOC_CC_SILENCE
= 0
#endif
;
/*
* Ideally we would only ever call sa2u() once per aligned

View File

@ -0,0 +1,195 @@
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
#define arena_new JEMALLOC_N(arena_new)
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_purge_all JEMALLOC_N(arena_purge_all)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arena_salloc_demote JEMALLOC_N(arena_salloc_demote)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index)
#define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index)
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
#define base_alloc JEMALLOC_N(base_alloc)
#define base_boot JEMALLOC_N(base_boot)
#define base_node_alloc JEMALLOC_N(base_node_alloc)
#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
#define bitmap_full JEMALLOC_N(bitmap_full)
#define bitmap_get JEMALLOC_N(bitmap_get)
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups)
#define bitmap_init JEMALLOC_N(bitmap_init)
#define bitmap_set JEMALLOC_N(bitmap_set)
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
#define bitmap_size JEMALLOC_N(bitmap_size)
#define bitmap_unset JEMALLOC_N(bitmap_unset)
#define bt_init JEMALLOC_N(bt_init)
#define buferror JEMALLOC_N(buferror)
#define choose_arena JEMALLOC_N(choose_arena)
#define choose_arena_hard JEMALLOC_N(choose_arena_hard)
#define chunk_alloc JEMALLOC_N(chunk_alloc)
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_alloc_mmap_noreserve JEMALLOC_N(chunk_alloc_mmap_noreserve)
#define chunk_alloc_swap JEMALLOC_N(chunk_alloc_swap)
#define chunk_boot JEMALLOC_N(chunk_boot)
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
#define chunk_dealloc_dss JEMALLOC_N(chunk_dealloc_dss)
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
#define chunk_dealloc_swap JEMALLOC_N(chunk_dealloc_swap)
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_in_swap JEMALLOC_N(chunk_in_swap)
#define chunk_mmap_boot JEMALLOC_N(chunk_mmap_boot)
#define chunk_swap_boot JEMALLOC_N(chunk_swap_boot)
#define chunk_swap_enable JEMALLOC_N(chunk_swap_enable)
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
#define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete)
#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
#define ckh_insert JEMALLOC_N(ckh_insert)
#define ckh_isearch JEMALLOC_N(ckh_isearch)
#define ckh_iter JEMALLOC_N(ckh_iter)
#define ckh_new JEMALLOC_N(ckh_new)
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
#define ckh_rebuild JEMALLOC_N(ckh_rebuild)
#define ckh_remove JEMALLOC_N(ckh_remove)
#define ckh_search JEMALLOC_N(ckh_search)
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
#define create_zone JEMALLOC_N(create_zone)
#define ctl_boot JEMALLOC_N(ctl_boot)
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse)
#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start)
#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last)
#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new)
#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next)
#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch)
#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev)
#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch)
#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove)
#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter)
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
#define hash JEMALLOC_N(hash)
#define huge_boot JEMALLOC_N(huge_boot)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_palloc JEMALLOC_N(huge_palloc)
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
#define huge_ralloc JEMALLOC_N(huge_ralloc)
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iallocm JEMALLOC_N(iallocm)
#define icalloc JEMALLOC_N(icalloc)
#define idalloc JEMALLOC_N(idalloc)
#define imalloc JEMALLOC_N(imalloc)
#define ipalloc JEMALLOC_N(ipalloc)
#define iralloc JEMALLOC_N(iralloc)
#define isalloc JEMALLOC_N(isalloc)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define jemalloc_darwin_init JEMALLOC_N(jemalloc_darwin_init)
#define jemalloc_postfork JEMALLOC_N(jemalloc_postfork)
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
#define malloc_mutex_destroy JEMALLOC_N(malloc_mutex_destroy)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
#define malloc_mutex_trylock JEMALLOC_N(malloc_mutex_trylock)
#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
#define malloc_printf JEMALLOC_N(malloc_printf)
#define malloc_write JEMALLOC_N(malloc_write)
#define mb_write JEMALLOC_N(mb_write)
#define pow2_ceil JEMALLOC_N(pow2_ceil)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
#define prof_free JEMALLOC_N(prof_free)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_idump JEMALLOC_N(prof_idump)
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define pthread_create JEMALLOC_N(pthread_create)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
#define rtree_set JEMALLOC_N(rtree_set)
#define s2u JEMALLOC_N(s2u)
#define sa2u JEMALLOC_N(sa2u)
#define stats_arenas_i_bins_j_index JEMALLOC_N(stats_arenas_i_bins_j_index)
#define stats_arenas_i_index JEMALLOC_N(stats_arenas_i_index)
#define stats_arenas_i_lruns_j_index JEMALLOC_N(stats_arenas_i_lruns_j_index)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
#define stats_print JEMALLOC_N(stats_print)
#define szone2ozone JEMALLOC_N(szone2ozone)
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
#define tcache_boot JEMALLOC_N(tcache_boot)
#define tcache_create JEMALLOC_N(tcache_create)
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
#define tcache_destroy JEMALLOC_N(tcache_destroy)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_get JEMALLOC_N(tcache_get)
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define thread_allocated_get JEMALLOC_N(thread_allocated_get)
#define thread_allocated_get_hard JEMALLOC_N(thread_allocated_get_hard)
#define u2s JEMALLOC_N(u2s)

View File

@ -227,9 +227,60 @@ bool prof_boot2(void);
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#define PROF_ALLOC_PREP(nignore, size, ret) do { \
prof_tdata_t *prof_tdata; \
prof_bt_t bt; \
\
assert(size == s2u(size)); \
\
prof_tdata = PROF_TCACHE_GET(); \
if (prof_tdata == NULL) { \
prof_tdata = prof_tdata_init(); \
if (prof_tdata == NULL) { \
ret = NULL; \
break; \
} \
} \
\
if (opt_prof_active == false) { \
/* Sampling is currently inactive, so avoid sampling. */\
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} else if (opt_lg_prof_sample == 0) { \
/* Don't bother with sampling logic, since sampling */\
/* interval is 1. */\
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore, prof_bt_max); \
ret = prof_lookup(&bt); \
} else { \
if (prof_tdata->threshold == 0) { \
/* Initialize. Seed the prng differently for */\
/* each thread. */\
prof_tdata->prn_state = \
(uint64_t)(uintptr_t)&size; \
prof_sample_threshold_update(prof_tdata); \
} \
\
/* Determine whether to capture a backtrace based on */\
/* whether size is enough for prof_accum to reach */\
/* prof_tdata->threshold. However, delay updating */\
/* these variables until prof_{m,re}alloc(), because */\
/* we don't know for sure that the allocation will */\
/* succeed. */\
/* */\
/* Use subtraction rather than addition to avoid */\
/* potential integer overflow. */\
if (size >= prof_tdata->threshold - \
prof_tdata->accum) { \
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore, prof_bt_max); \
ret = prof_lookup(&bt); \
} else \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} \
} while (0)
#ifndef JEMALLOC_ENABLE_INLINE
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
prof_thr_cnt_t *prof_alloc_prep(size_t size);
prof_ctx_t *prof_ctx_get(const void *ptr);
void prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
bool prof_sample_accum_update(size_t size);
@ -272,71 +323,6 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
+ (uint64_t)1U;
}
JEMALLOC_INLINE prof_thr_cnt_t *
prof_alloc_prep(size_t size)
{
#ifdef JEMALLOC_ENABLE_INLINE
/* This function does not have its own stack frame, because it is inlined. */
# define NIGNORE 1
#else
# define NIGNORE 2
#endif
prof_thr_cnt_t *ret;
prof_tdata_t *prof_tdata;
prof_bt_t bt;
assert(size == s2u(size));
prof_tdata = PROF_TCACHE_GET();
if (prof_tdata == NULL) {
prof_tdata = prof_tdata_init();
if (prof_tdata == NULL)
return (NULL);
}
if (opt_prof_active == false) {
/* Sampling is currently inactive, so avoid sampling. */
ret = (prof_thr_cnt_t *)(uintptr_t)1U;
} else if (opt_lg_prof_sample == 0) {
/*
* Don't bother with sampling logic, since sampling interval is
* 1.
*/
bt_init(&bt, prof_tdata->vec);
prof_backtrace(&bt, NIGNORE, prof_bt_max);
ret = prof_lookup(&bt);
} else {
if (prof_tdata->threshold == 0) {
/*
* Initialize. Seed the prng differently for each
* thread.
*/
prof_tdata->prn_state = (uint64_t)(uintptr_t)&size;
prof_sample_threshold_update(prof_tdata);
}
/*
* Determine whether to capture a backtrace based on whether
* size is enough for prof_accum to reach
* prof_tdata->threshold. However, delay updating these
* variables until prof_{m,re}alloc(), because we don't know
* for sure that the allocation will succeed.
*
* Use subtraction rather than addition to avoid potential
* integer overflow.
*/
if (size >= prof_tdata->threshold - prof_tdata->accum) {
bt_init(&bt, prof_tdata->vec);
prof_backtrace(&bt, NIGNORE, prof_bt_max);
ret = prof_lookup(&bt);
} else
ret = (prof_thr_cnt_t *)(uintptr_t)1U;
}
return (ret);
#undef NIGNORE
}
JEMALLOC_INLINE prof_ctx_t *
prof_ctx_get(const void *ptr)
{
@ -415,7 +401,7 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
* always possible to tell in advance how large an
* object's usable size will be, so there should never
* be a difference between the size passed to
* prof_alloc_prep() and prof_malloc().
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert((uintptr_t)cnt == (uintptr_t)1U);
}
@ -459,7 +445,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
if (prof_sample_accum_update(size)) {
/*
* Don't sample. The size passed to
* prof_alloc_prep() was larger than what
* PROF_ALLOC_PREP() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
* its actual size was insufficient to cross

View File

@ -18,6 +18,15 @@
#undef JEMALLOC_P
#endif
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#undef JEMALLOC_PRIVATE_NAMESPACE
#undef JEMALLOC_N
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.