Jemalloc updated to 3.6.0.

Not a single bug in about 3 months, and our previous version was
too old (3.2.0).
This commit is contained in:
antirez
2014-06-20 14:59:18 +02:00
parent fe596d67e3
commit fceef8e0dd
148 changed files with 16910 additions and 5144 deletions

View File

@ -158,6 +158,7 @@ struct arena_chunk_map_s {
};
typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t;
/* Arena chunk header. */
struct arena_chunk_s {
@ -174,11 +175,12 @@ struct arena_chunk_s {
size_t nruns_avail;
/*
* Number of available run adjacencies. Clean and dirty available runs
* are not coalesced, which causes virtual memory fragmentation. The
* ratio of (nruns_avail-nruns_adjac):nruns_adjac is used for tracking
* this fragmentation.
* */
* Number of available run adjacencies that purging could coalesce.
* Clean and dirty available runs are not coalesced, which causes
* virtual memory fragmentation. The ratio of
* (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this
* fragmentation.
*/
size_t nruns_adjac;
/*
@ -400,12 +402,20 @@ extern arena_bin_info_t arena_bin_info[NBINS];
#define nlclasses (chunk_npages - map_bias)
void arena_purge_all(arena_t *arena);
void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
size_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
uint8_t);
extern arena_redzone_corruption_t *arena_redzone_corruption;
typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
#else
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
#endif
void arena_quarantine_junk_small(void *ptr, size_t usize);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
@ -416,10 +426,18 @@ void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_t *mapelm);
void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind);
#ifdef JEMALLOC_JET
typedef void (arena_dalloc_junk_large_t)(void *, size_t);
extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
#endif
void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
#ifdef JEMALLOC_JET
typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
#endif
bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
@ -442,6 +460,7 @@ void arena_postfork_child(arena_t *arena);
#ifndef JEMALLOC_ENABLE_INLINE
arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbitsp_read(size_t *mapbitsp);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
@ -452,6 +471,7 @@ size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
@ -464,12 +484,15 @@ void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
size_t runind, size_t binind, size_t flags);
void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
prof_ctx_t *arena_prof_ctx_get(const void *ptr);
void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
void arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
size_t arena_salloc(const void *ptr, bool demote);
void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
@ -478,7 +501,7 @@ void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
# ifdef JEMALLOC_ARENA_INLINE_A
JEMALLOC_INLINE arena_chunk_map_t *
JEMALLOC_ALWAYS_INLINE arena_chunk_map_t *
arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
{
@ -488,21 +511,28 @@ arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
return (&chunk->map[pageind-map_bias]);
}
JEMALLOC_INLINE size_t *
JEMALLOC_ALWAYS_INLINE size_t *
arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
{
return (&arena_mapp_get(chunk, pageind)->bits);
}
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbitsp_read(size_t *mapbitsp)
{
return (*mapbitsp);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
{
return (*arena_mapbitsp_get(chunk, pageind));
return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
}
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -512,7 +542,7 @@ arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits & ~PAGE_MASK);
}
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -523,7 +553,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits & ~PAGE_MASK);
}
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -534,7 +564,7 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits >> LG_PAGE);
}
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -546,7 +576,7 @@ arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
return (binind);
}
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -555,7 +585,7 @@ arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits & CHUNK_MAP_DIRTY);
}
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -564,7 +594,7 @@ arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits & CHUNK_MAP_UNZEROED);
}
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -573,7 +603,7 @@ arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits & CHUNK_MAP_LARGE);
}
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -582,86 +612,138 @@ arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits & CHUNK_MAP_ALLOCATED);
}
JEMALLOC_INLINE void
JEMALLOC_ALWAYS_INLINE void
arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
{
*mapbitsp = mapbits;
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
size_t *mapbitsp;
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags);
}
JEMALLOC_INLINE void
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
size_t *mapbitsp;
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
*mapbitsp = size | (*mapbitsp & PAGE_MASK);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK));
}
JEMALLOC_INLINE void
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
size_t *mapbitsp;
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
size_t unzeroed;
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_DIRTY) == flags);
unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | unzeroed |
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags
| unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
}
JEMALLOC_INLINE void
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
size_t binind)
{
size_t *mapbitsp;
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert(binind <= BININD_INVALID);
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
*mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind <<
CHUNK_MAP_BININD_SHIFT);
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
(binind << CHUNK_MAP_BININD_SHIFT));
}
JEMALLOC_INLINE void
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
size_t binind, size_t flags)
{
size_t *mapbitsp;
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
size_t unzeroed;
assert(binind < BININD_INVALID);
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(pageind - runind >= map_bias);
assert((flags & CHUNK_MAP_DIRTY) == flags);
unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
*mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
flags | unzeroed | CHUNK_MAP_ALLOCATED;
unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind <<
CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED);
}
JEMALLOC_INLINE void
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed)
{
size_t *mapbitsp;
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
mapbitsp = arena_mapbitsp_get(chunk, pageind);
*mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
unzeroed);
}
JEMALLOC_INLINE size_t
JEMALLOC_INLINE bool
arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
assert(prof_interval != 0);
arena->prof_accumbytes += accumbytes;
if (arena->prof_accumbytes >= prof_interval) {
arena->prof_accumbytes -= prof_interval;
return (true);
}
return (false);
}
JEMALLOC_INLINE bool
arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
if (prof_interval == 0)
return (false);
return (arena_prof_accum_impl(arena, accumbytes));
}
JEMALLOC_INLINE bool
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
if (prof_interval == 0)
return (false);
{
bool ret;
malloc_mutex_lock(&arena->lock);
ret = arena_prof_accum_impl(arena, accumbytes);
malloc_mutex_unlock(&arena->lock);
return (ret);
}
}
JEMALLOC_ALWAYS_INLINE size_t
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
{
size_t binind;
@ -822,10 +904,10 @@ arena_prof_ctx_get(const void *ptr)
}
JEMALLOC_INLINE void
arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
size_t pageind, mapbits;
size_t pageind;
cassert(config_prof);
assert(ptr != NULL);
@ -833,10 +915,17 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (usize > SMALL_MAXCLASS || (prof_promote &&
((uintptr_t)ctx != (uintptr_t)1U || arena_mapbits_large_get(chunk,
pageind) != 0))) {
assert(arena_mapbits_large_get(chunk, pageind) != 0);
arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
} else {
assert(arena_mapbits_large_get(chunk, pageind) == 0);
if (prof_promote == false) {
size_t mapbits = arena_mapbits_get(chunk, pageind);
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
LG_PAGE));
@ -848,15 +937,14 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
bin_info = &arena_bin_info[binind];
regind = arena_run_regind(run, bin_info, ptr);
*((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset
+ (regind * sizeof(prof_ctx_t *)))) = ctx;
} else
assert((uintptr_t)ctx == (uintptr_t)1U);
} else
arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
*((prof_ctx_t **)((uintptr_t)run +
bin_info->ctx0_offset + (regind * sizeof(prof_ctx_t
*)))) = ctx;
}
}
}
JEMALLOC_INLINE void *
JEMALLOC_ALWAYS_INLINE void *
arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
{
tcache_t *tcache;
@ -887,7 +975,7 @@ arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
}
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(const void *ptr, bool demote)
{
size_t ret;
@ -933,7 +1021,7 @@ arena_salloc(const void *ptr, bool demote)
return (ret);
}
JEMALLOC_INLINE void
JEMALLOC_ALWAYS_INLINE void
arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
{
size_t pageind, mapbits;

View File

@ -7,7 +7,7 @@ typedef enum {
dss_prec_secondary = 2,
dss_prec_limit = 3
} dss_prec_t ;
} dss_prec_t;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"

View File

@ -5,7 +5,7 @@ typedef struct ckh_s ckh_t;
typedef struct ckhc_s ckhc_t;
/* Typedefs to allow easy function pointer passing. */
typedef void ckh_hash_t (const void *, unsigned, size_t *, size_t *);
typedef void ckh_hash_t (const void *, size_t[2]);
typedef bool ckh_keycomp_t (const void *, const void *);
/* Maintain counters used to get an idea of performance. */
@ -17,7 +17,7 @@ typedef bool ckh_keycomp_t (const void *, const void *);
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
@ -75,11 +75,9 @@ bool ckh_insert(ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
void ckh_string_hash(const void *key, unsigned minbits, size_t *hash1,
size_t *hash2);
void ckh_string_hash(const void *key, size_t r_hash[2]);
bool ckh_string_keycomp(const void *k1, const void *k2);
void ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
size_t *hash2);
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
bool ckh_pointer_keycomp(const void *k1, const void *k2);
#endif /* JEMALLOC_H_EXTERNS */

View File

@ -1,3 +1,8 @@
/*
* The following hash function is based on MurmurHash3, placed into the public
* domain by Austin Appleby. See http://code.google.com/p/smhasher/ for
* details.
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
@ -14,56 +19,316 @@
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t hash(const void *key, size_t len, uint64_t seed);
uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
void hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2]);
void hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2]);
void hash(const void *key, size_t len, const uint32_t seed,
size_t r_hash[2]);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
/*
* The following hash function is based on MurmurHash64A(), placed into the
* public domain by Austin Appleby. See http://murmurhash.googlepages.com/ for
* details.
*/
JEMALLOC_INLINE uint64_t
hash(const void *key, size_t len, uint64_t seed)
/******************************************************************************/
/* Internal implementation. */
JEMALLOC_INLINE uint32_t
hash_rotl_32(uint32_t x, int8_t r)
{
const uint64_t m = UINT64_C(0xc6a4a7935bd1e995);
const int r = 47;
uint64_t h = seed ^ (len * m);
const uint64_t *data = (const uint64_t *)key;
const uint64_t *end = data + (len/8);
const unsigned char *data2;
assert(((uintptr_t)key & 0x7) == 0);
return (x << r) | (x >> (32 - r));
}
while(data != end) {
uint64_t k = *data++;
JEMALLOC_INLINE uint64_t
hash_rotl_64(uint64_t x, int8_t r)
{
return (x << r) | (x >> (64 - r));
}
k *= m;
k ^= k >> r;
k *= m;
JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
h ^= k;
h *= m;
}
return (p[i]);
}
data2 = (const unsigned char *)data;
switch(len & 7) {
case 7: h ^= ((uint64_t)(data2[6])) << 48;
case 6: h ^= ((uint64_t)(data2[5])) << 40;
case 5: h ^= ((uint64_t)(data2[4])) << 32;
case 4: h ^= ((uint64_t)(data2[3])) << 24;
case 3: h ^= ((uint64_t)(data2[2])) << 16;
case 2: h ^= ((uint64_t)(data2[1])) << 8;
case 1: h ^= ((uint64_t)(data2[0]));
h *= m;
}
JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
h ^= h >> r;
h *= m;
h ^= h >> r;
return (p[i]);
}
JEMALLOC_INLINE uint32_t
hash_fmix_32(uint32_t h)
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return (h);
}
JEMALLOC_INLINE uint64_t
hash_fmix_64(uint64_t k)
{
k ^= k >> 33;
k *= QU(0xff51afd7ed558ccdLLU);
k ^= k >> 33;
k *= QU(0xc4ceb9fe1a85ec53LLU);
k ^= k >> 33;
return (k);
}
JEMALLOC_INLINE uint32_t
hash_x86_32(const void *key, int len, uint32_t seed)
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
uint32_t h1 = seed;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i);
k1 *= c1;
k1 = hash_rotl_32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = hash_rotl_32(h1, 13);
h1 = h1*5 + 0xe6546b64;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
uint32_t k1 = 0;
switch (len & 3) {
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len;
h1 = hash_fmix_32(h1);
return (h1);
}
UNUSED JEMALLOC_INLINE void
hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2])
{
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
uint32_t h1 = seed;
uint32_t h2 = seed;
uint32_t h3 = seed;
uint32_t h4 = seed;
const uint32_t c1 = 0x239b961b;
const uint32_t c2 = 0xab0e9789;
const uint32_t c3 = 0x38b34ae5;
const uint32_t c4 = 0xa1e38b93;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_32(h1, 19); h1 += h2;
h1 = h1*5 + 0x561ccd1b;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
h2 = hash_rotl_32(h2, 17); h2 += h3;
h2 = h2*5 + 0x0bcaa747;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
h3 = hash_rotl_32(h3, 15); h3 += h4;
h3 = h3*5 + 0x96cd1c35;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
h4 = hash_rotl_32(h4, 13); h4 += h1;
h4 = h4*5 + 0x32ac3b17;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
uint32_t k1 = 0;
uint32_t k2 = 0;
uint32_t k3 = 0;
uint32_t k4 = 0;
switch (len & 15) {
case 15: k4 ^= tail[14] << 16;
case 14: k4 ^= tail[13] << 8;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
case 12: k3 ^= tail[11] << 24;
case 11: k3 ^= tail[10] << 16;
case 10: k3 ^= tail[ 9] << 8;
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
case 8: k2 ^= tail[ 7] << 24;
case 7: k2 ^= tail[ 6] << 16;
case 6: k2 ^= tail[ 5] << 8;
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
case 4: k1 ^= tail[ 3] << 24;
case 3: k1 ^= tail[ 2] << 16;
case 2: k1 ^= tail[ 1] << 8;
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 = hash_fmix_32(h1);
h2 = hash_fmix_32(h2);
h3 = hash_fmix_32(h3);
h4 = hash_fmix_32(h4);
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
r_out[0] = (((uint64_t) h2) << 32) | h1;
r_out[1] = (((uint64_t) h4) << 32) | h3;
}
UNUSED JEMALLOC_INLINE void
hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2])
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
const uint64_t c1 = QU(0x87c37b91114253d5LLU);
const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
/* body */
{
const uint64_t *blocks = (const uint64_t *) (data);
int i;
for (i = 0; i < nblocks; i++) {
uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_64(h1, 27); h1 += h2;
h1 = h1*5 + 0x52dce729;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
h2 = hash_rotl_64(h2, 31); h2 += h1;
h2 = h2*5 + 0x38495ab5;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
uint64_t k1 = 0;
uint64_t k2 = 0;
switch (len & 15) {
case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = hash_fmix_64(h1);
h2 = hash_fmix_64(h2);
h1 += h2;
h2 += h1;
r_out[0] = h1;
r_out[1] = h2;
}
/******************************************************************************/
/* API. */
JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128(key, len, seed, (uint64_t *)r_hash);
#else
uint64_t hashes[2];
hash_x86_128(key, len, seed, hashes);
r_hash[0] = (size_t)hashes[0];
r_hash[1] = (size_t)hashes[1];
#endif
}
#endif
#endif /* JEMALLOC_H_INLINES */

View File

@ -17,14 +17,20 @@ extern size_t huge_allocated;
/* Protects chunk-related data structures. */
extern malloc_mutex_t huge_mtx;
void *huge_malloc(size_t size, bool zero);
void *huge_palloc(size_t size, size_t alignment, bool zero);
void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
void *huge_palloc(size_t size, size_t alignment, bool zero,
dss_prec_t dss_prec);
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_dalloc);
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr);
dss_prec_t huge_dss_prec_get(arena_t *arena);
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
bool huge_boot(void);

View File

@ -1,5 +1,5 @@
#ifndef JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
@ -54,8 +54,7 @@ typedef intptr_t ssize_t;
#endif
#include <fcntl.h>
#define JEMALLOC_NO_DEMANGLE
#include "../jemalloc@install_suffix@.h"
#include "jemalloc_internal_defs.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
@ -66,13 +65,18 @@ typedef intptr_t ssize_t;
#include <valgrind/memcheck.h>
#endif
#include "jemalloc/internal/private_namespace.h"
#ifdef JEMALLOC_CC_SILENCE
#define UNUSED JEMALLOC_ATTR(unused)
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# define JEMALLOC_N(n) jet_##n
# include "jemalloc/internal/public_namespace.h"
# define JEMALLOC_NO_RENAME
# include "../jemalloc@install_suffix@.h"
# undef JEMALLOC_NO_RENAME
#else
#define UNUSED
# define JEMALLOC_N(n) @private_namespace@##n
# include "../jemalloc@install_suffix@.h"
#endif
#include "jemalloc/internal/private_namespace.h"
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
@ -221,28 +225,13 @@ static const bool config_ivsalloc =
* JEMALLOC_H_INLINES : Inline functions.
*/
/******************************************************************************/
#define JEMALLOC_H_TYPES
#define JEMALLOC_H_TYPES
#include "jemalloc/internal/jemalloc_internal_macros.h"
#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
#define ZU(z) ((size_t)z)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#ifdef JEMALLOC_DEBUG
/* Disable inlining to make debugging easier. */
# define JEMALLOC_INLINE
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# define JEMALLOC_INLINE static inline
# ifdef _MSC_VER
# define inline _inline
# endif
#endif
/* Smallest size class to support. */
#define LG_TINY_MIN 3
#define TINY_MIN (1U << LG_TINY_MIN)
@ -270,6 +259,9 @@ static const bool config_ivsalloc =
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
@ -279,7 +271,7 @@ static const bool config_ivsalloc =
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# ifdef __s390x__
# ifdef __s390__
# define LG_QUANTUM 4
# endif
# ifdef __SH4__
@ -359,7 +351,11 @@ static const bool config_ivsalloc =
# include <malloc.h>
# define alloca _alloca
# else
# include <alloca.h>
# ifdef JEMALLOC_HAS_ALLOCA_H
# include <alloca.h>
# else
# include <stdlib.h>
# endif
# endif
# define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * count)
@ -428,15 +424,18 @@ static const bool config_ivsalloc =
} while (0)
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
do {} while (0)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
do {} while (0)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize)
old_rzsize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
#endif
#include "jemalloc/internal/util.h"
@ -463,7 +462,7 @@ static const bool config_ivsalloc =
#undef JEMALLOC_H_TYPES
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
@ -492,14 +491,14 @@ typedef struct {
uint64_t deallocated;
} thread_allocated_t;
/*
* The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* argument.
*/
#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0})
#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0})
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
#define JEMALLOC_H_EXTERNS
#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
extern bool opt_junk;
@ -559,7 +558,7 @@ void jemalloc_postfork_child(void);
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
#define JEMALLOC_H_INLINES
#define JEMALLOC_H_INLINES
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
@ -591,13 +590,14 @@ arena_t *choose_arena(arena_t *arena);
* for allocations.
*/
malloc_tsd_externs(arenas, arena_t *)
malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
arenas_cleanup)
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
s2u(size_t size)
{
@ -612,7 +612,7 @@ s2u(size_t size)
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
sa2u(size_t size, size_t alignment)
{
size_t usize;
@ -733,32 +733,36 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
void *imallocx(size_t size, bool try_tcache, arena_t *arena);
void *imalloct(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size);
void *icallocx(size_t size, bool try_tcache, arena_t *arena);
void *icalloct(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
void idallocx(void *ptr, bool try_tcache);
void idalloct(void *ptr, bool try_tcache);
void idalloc(void *ptr);
void iqallocx(void *ptr, bool try_tcache);
void iqalloct(void *ptr, bool try_tcache);
void iqalloc(void *ptr);
void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena);
void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move);
bool zero);
bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE void *
imallocx(size_t size, bool try_tcache, arena_t *arena)
JEMALLOC_ALWAYS_INLINE void *
imalloct(size_t size, bool try_tcache, arena_t *arena)
{
assert(size != 0);
@ -766,35 +770,35 @@ imallocx(size_t size, bool try_tcache, arena_t *arena)
if (size <= arena_maxclass)
return (arena_malloc(arena, size, false, try_tcache));
else
return (huge_malloc(size, false));
return (huge_malloc(size, false, huge_dss_prec_get(arena)));
}
JEMALLOC_INLINE void *
JEMALLOC_ALWAYS_INLINE void *
imalloc(size_t size)
{
return (imallocx(size, true, NULL));
return (imalloct(size, true, NULL));
}
JEMALLOC_INLINE void *
icallocx(size_t size, bool try_tcache, arena_t *arena)
JEMALLOC_ALWAYS_INLINE void *
icalloct(size_t size, bool try_tcache, arena_t *arena)
{
if (size <= arena_maxclass)
return (arena_malloc(arena, size, true, try_tcache));
else
return (huge_malloc(size, true));
return (huge_malloc(size, true, huge_dss_prec_get(arena)));
}
JEMALLOC_INLINE void *
JEMALLOC_ALWAYS_INLINE void *
icalloc(size_t size)
{
return (icallocx(size, true, NULL));
return (icalloct(size, true, NULL));
}
JEMALLOC_INLINE void *
ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
JEMALLOC_ALWAYS_INLINE void *
ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
void *ret;
@ -809,20 +813,20 @@ ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
ret = arena_palloc(choose_arena(arena), usize,
alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero);
ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
else
ret = huge_palloc(usize, alignment, zero);
ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
return (ret);
}
JEMALLOC_INLINE void *
JEMALLOC_ALWAYS_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero)
{
return (ipallocx(usize, alignment, zero, true, NULL));
return (ipalloct(usize, alignment, zero, true, NULL));
}
/*
@ -830,7 +834,7 @@ ipalloc(size_t usize, size_t alignment, bool zero)
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
{
size_t ret;
@ -849,12 +853,12 @@ isalloc(const void *ptr, bool demote)
return (ret);
}
JEMALLOC_INLINE size_t
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(const void *ptr, bool demote)
{
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
return (0);
return (isalloc(ptr, demote));
@ -882,8 +886,8 @@ p2rz(const void *ptr)
return (u2rz(usize));
}
JEMALLOC_INLINE void
idallocx(void *ptr, bool try_tcache)
JEMALLOC_ALWAYS_INLINE void
idalloct(void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
@ -896,35 +900,67 @@ idallocx(void *ptr, bool try_tcache)
huge_dalloc(ptr, true);
}
JEMALLOC_INLINE void
JEMALLOC_ALWAYS_INLINE void
idalloc(void *ptr)
{
idallocx(ptr, true);
idalloct(ptr, true);
}
JEMALLOC_INLINE void
iqallocx(void *ptr, bool try_tcache)
JEMALLOC_ALWAYS_INLINE void
iqalloct(void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
idallocx(ptr, try_tcache);
idalloct(ptr, try_tcache);
}
JEMALLOC_INLINE void
JEMALLOC_ALWAYS_INLINE void
iqalloc(void *ptr)
{
iqallocx(ptr, true);
iqalloct(ptr, true);
}
JEMALLOC_INLINE void *
irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena)
{
void *p;
size_t usize, copysize;
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
if (p == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
iqalloct(ptr, try_tcache_dalloc);
return (p);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{
void *ret;
size_t oldsize;
assert(ptr != NULL);
@ -934,72 +970,54 @@ irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
size_t usize, copysize;
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
if (no_move)
return (NULL);
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
arena);
if (ret == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller
* has no expectation that the extra bytes will be reliably
* preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
iqallocx(ptr, try_tcache_dalloc);
return (ret);
return (iralloct_realign(ptr, oldsize, size, extra, alignment,
zero, try_tcache_alloc, try_tcache_dalloc, arena));
}
if (no_move) {
if (size <= arena_maxclass) {
return (arena_ralloc_no_move(ptr, oldsize, size,
extra, zero));
} else {
return (huge_ralloc_no_move(ptr, oldsize, size,
extra));
}
if (size + extra <= arena_maxclass) {
return (arena_ralloc(arena, ptr, oldsize, size, extra,
alignment, zero, try_tcache_alloc,
try_tcache_dalloc));
} else {
if (size + extra <= arena_maxclass) {
return (arena_ralloc(arena, ptr, oldsize, size, extra,
alignment, zero, try_tcache_alloc,
try_tcache_dalloc));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero, try_tcache_dalloc));
}
return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
}
}
JEMALLOC_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move)
JEMALLOC_ALWAYS_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
NULL));
return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
}
JEMALLOC_ALWAYS_INLINE bool
ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/* Existing object alignment is inadequate. */
return (true);
}
if (size <= arena_maxclass)
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
else
return (huge_ralloc_no_move(ptr, oldsize, size, extra));
}
malloc_tsd_externs(thread_allocated, thread_allocated_t)
malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
#endif

View File

@ -0,0 +1,205 @@
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
#undef JEMALLOC_PREFIX
#undef JEMALLOC_CPREFIX
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#undef JEMALLOC_PRIVATE_NAMESPACE
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#undef CPU_SPINWAIT
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
#undef JEMALLOC_ATOMIC9
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
#undef JEMALLOC_OSATOMIC
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
#undef JEMALLOC_OSSPIN
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
#undef JEMALLOC_MALLOC_THREAD_CLEANUP
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#undef JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
#undef JEMALLOC_MUTEX_INIT_CB
/* Defined if sbrk() is supported. */
#undef JEMALLOC_HAVE_SBRK
/* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#undef JEMALLOC_CC_SILENCE
/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
#undef JEMALLOC_CODE_COVERAGE
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
#undef JEMALLOC_DEBUG
/* JEMALLOC_STATS enables statistics calculation. */
#undef JEMALLOC_STATS
/* JEMALLOC_PROF enables allocation profiling. */
#undef JEMALLOC_PROF
/* Use libunwind for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBUNWIND
/* Use libgcc for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBGCC
/* Use gcc intrinsics for profile backtracing if defined. */
#undef JEMALLOC_PROF_GCC
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#undef JEMALLOC_TCACHE
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS).
*/
#undef JEMALLOC_DSS
/* Support memory filling (junk/zero/quarantine/redzone). */
#undef JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE
/* Support Valgrind. */
#undef JEMALLOC_VALGRIND
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
#undef STATIC_PAGE_SHIFT
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
*/
#undef JEMALLOC_MUNMAP
/*
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
* disabled by default because it is Linux-specific and it will cause virtual
* memory map holes, much like munmap(2) does.
*/
#undef JEMALLOC_MREMAP
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
#undef JEMALLOC_IVSALLOC
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
#undef JEMALLOC_ZONE_VERSION
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched.
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
* unused, such that they will be discarded rather
* than swapped out.
*/
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_FREE
/*
* Define if operating system has alloca.h header.
*/
#undef JEMALLOC_HAS_ALLOCA_H
/* C99 restrict keyword supported. */
#undef JEMALLOC_HAS_RESTRICT
/* For use by hash code. */
#undef JEMALLOC_BIG_ENDIAN
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#undef LG_SIZEOF_INT
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */

View File

@ -0,0 +1,51 @@
/*
* JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
* functions that are static inline functions if inlining is enabled, and
* single-definition library-private functions if inlining is disabled.
*
* JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
* which case the denoted functions are always static, regardless of whether
* inlining is enabled.
*/
#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
/* Disable inlining to make debugging/profiling easier. */
# define JEMALLOC_ALWAYS_INLINE
# define JEMALLOC_ALWAYS_INLINE_C static
# define JEMALLOC_INLINE
# define JEMALLOC_INLINE_C static
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ALWAYS_INLINE \
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
# define JEMALLOC_ALWAYS_INLINE_C \
static inline JEMALLOC_ATTR(always_inline)
# else
# define JEMALLOC_ALWAYS_INLINE static inline
# define JEMALLOC_ALWAYS_INLINE_C static inline
# endif
# define JEMALLOC_INLINE static inline
# define JEMALLOC_INLINE_C static inline
# ifdef _MSC_VER
# define inline _inline
# endif
#endif
#ifdef JEMALLOC_CC_SILENCE
# define UNUSED JEMALLOC_ATTR(unused)
#else
# define UNUSED
#endif
#define ZU(z) ((size_t)z)
#define QU(q) ((uint64_t)q)
#define QI(q) ((int64_t)q)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#ifndef JEMALLOC_HAS_RESTRICT
# define restrict
#endif

View File

@ -1,367 +0,0 @@
#define a0calloc JEMALLOC_N(a0calloc)
#define a0free JEMALLOC_N(a0free)
#define a0malloc JEMALLOC_N(a0malloc)
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
#define arena_maxclass JEMALLOC_N(arena_maxclass)
#define arena_new JEMALLOC_N(arena_new)
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
#define arena_prefork JEMALLOC_N(arena_prefork)
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
#define arena_purge_all JEMALLOC_N(arena_purge_all)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas JEMALLOC_N(arenas)
#define arenas_booted JEMALLOC_N(arenas_booted)
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
#define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_initialized JEMALLOC_N(arenas_initialized)
#define arenas_lock JEMALLOC_N(arenas_lock)
#define arenas_tls JEMALLOC_N(arenas_tls)
#define arenas_tsd JEMALLOC_N(arenas_tsd)
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
#define atomic_add_u JEMALLOC_N(atomic_add_u)
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
#define atomic_add_z JEMALLOC_N(atomic_add_z)
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
#define base_alloc JEMALLOC_N(base_alloc)
#define base_boot JEMALLOC_N(base_boot)
#define base_calloc JEMALLOC_N(base_calloc)
#define base_node_alloc JEMALLOC_N(base_node_alloc)
#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
#define base_postfork_child JEMALLOC_N(base_postfork_child)
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
#define base_prefork JEMALLOC_N(base_prefork)
#define bitmap_full JEMALLOC_N(bitmap_full)
#define bitmap_get JEMALLOC_N(bitmap_get)
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups)
#define bitmap_init JEMALLOC_N(bitmap_init)
#define bitmap_set JEMALLOC_N(bitmap_set)
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
#define bitmap_size JEMALLOC_N(bitmap_size)
#define bitmap_unset JEMALLOC_N(bitmap_unset)
#define bt_init JEMALLOC_N(bt_init)
#define buferror JEMALLOC_N(buferror)
#define choose_arena JEMALLOC_N(choose_arena)
#define choose_arena_hard JEMALLOC_N(choose_arena_hard)
#define chunk_alloc JEMALLOC_N(chunk_alloc)
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_boot JEMALLOC_N(chunk_boot)
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_npages JEMALLOC_N(chunk_npages)
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
#define chunk_prefork JEMALLOC_N(chunk_prefork)
#define chunk_unmap JEMALLOC_N(chunk_unmap)
#define chunks_mtx JEMALLOC_N(chunks_mtx)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize)
#define chunksize_mask JEMALLOC_N(chunksize_mask)
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
#define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete)
#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
#define ckh_insert JEMALLOC_N(ckh_insert)
#define ckh_isearch JEMALLOC_N(ckh_isearch)
#define ckh_iter JEMALLOC_N(ckh_iter)
#define ckh_new JEMALLOC_N(ckh_new)
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
#define ckh_rebuild JEMALLOC_N(ckh_rebuild)
#define ckh_remove JEMALLOC_N(ckh_remove)
#define ckh_search JEMALLOC_N(ckh_search)
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
#define ctl_boot JEMALLOC_N(ctl_boot)
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
#define ctl_prefork JEMALLOC_N(ctl_prefork)
#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse)
#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start)
#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last)
#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new)
#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next)
#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch)
#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev)
#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch)
#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove)
#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter)
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
#define get_errno JEMALLOC_N(get_errno)
#define hash JEMALLOC_N(hash)
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_mtx JEMALLOC_N(huge_mtx)
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
#define huge_palloc JEMALLOC_N(huge_palloc)
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
#define huge_prefork JEMALLOC_N(huge_prefork)
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
#define huge_ralloc JEMALLOC_N(huge_ralloc)
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iallocm JEMALLOC_N(iallocm)
#define icalloc JEMALLOC_N(icalloc)
#define icallocx JEMALLOC_N(icallocx)
#define idalloc JEMALLOC_N(idalloc)
#define idallocx JEMALLOC_N(idallocx)
#define imalloc JEMALLOC_N(imalloc)
#define imallocx JEMALLOC_N(imallocx)
#define ipalloc JEMALLOC_N(ipalloc)
#define ipallocx JEMALLOC_N(ipallocx)
#define iqalloc JEMALLOC_N(iqalloc)
#define iqallocx JEMALLOC_N(iqallocx)
#define iralloc JEMALLOC_N(iralloc)
#define irallocx JEMALLOC_N(irallocx)
#define isalloc JEMALLOC_N(isalloc)
#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
#define malloc_printf JEMALLOC_N(malloc_printf)
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
#define malloc_write JEMALLOC_N(malloc_write)
#define map_bias JEMALLOC_N(map_bias)
#define mb_write JEMALLOC_N(mb_write)
#define mutex_boot JEMALLOC_N(mutex_boot)
#define narenas_auto JEMALLOC_N(narenas_auto)
#define narenas_total JEMALLOC_N(narenas_total)
#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define opt_abort JEMALLOC_N(opt_abort)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
#define opt_narenas JEMALLOC_N(opt_narenas)
#define opt_prof JEMALLOC_N(opt_prof)
#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
#define opt_prof_active JEMALLOC_N(opt_prof_active)
#define opt_prof_final JEMALLOC_N(opt_prof_final)
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
#define opt_quarantine JEMALLOC_N(opt_quarantine)
#define opt_redzone JEMALLOC_N(opt_redzone)
#define opt_stats_print JEMALLOC_N(opt_stats_print)
#define opt_tcache JEMALLOC_N(opt_tcache)
#define opt_utrace JEMALLOC_N(opt_utrace)
#define opt_valgrind JEMALLOC_N(opt_valgrind)
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
#define p2rz JEMALLOC_N(p2rz)
#define pages_purge JEMALLOC_N(pages_purge)
#define pow2_ceil JEMALLOC_N(pow2_ceil)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
#define prof_free JEMALLOC_N(prof_free)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_idump JEMALLOC_N(prof_idump)
#define prof_interval JEMALLOC_N(prof_interval)
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
#define prof_prefork JEMALLOC_N(prof_prefork)
#define prof_promote JEMALLOC_N(prof_promote)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
#define quarantine JEMALLOC_N(quarantine)
#define quarantine_boot JEMALLOC_N(quarantine_boot)
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
#define register_zone JEMALLOC_N(register_zone)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
#define rtree_prefork JEMALLOC_N(rtree_prefork)
#define rtree_set JEMALLOC_N(rtree_set)
#define s2u JEMALLOC_N(s2u)
#define sa2u JEMALLOC_N(sa2u)
#define set_errno JEMALLOC_N(set_errno)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
#define stats_chunks JEMALLOC_N(stats_chunks)
#define stats_print JEMALLOC_N(stats_print)
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
#define tcache_booted JEMALLOC_N(tcache_booted)
#define tcache_create JEMALLOC_N(tcache_create)
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
#define tcache_destroy JEMALLOC_N(tcache_destroy)
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
#define tcache_flush JEMALLOC_N(tcache_flush)
#define tcache_get JEMALLOC_N(tcache_get)
#define tcache_initialized JEMALLOC_N(tcache_initialized)
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
#define tcache_salloc JEMALLOC_N(tcache_salloc)
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
#define tcache_tls JEMALLOC_N(tcache_tls)
#define tcache_tsd JEMALLOC_N(tcache_tsd)
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
#define u2rz JEMALLOC_N(u2rz)

View File

@ -0,0 +1,5 @@
#!/bin/sh
for symbol in `cat $1` ; do
echo "#define ${symbol} JEMALLOC_N(${symbol})"
done

View File

@ -0,0 +1,413 @@
a0calloc
a0free
a0malloc
arena_alloc_junk_small
arena_bin_index
arena_bin_info
arena_boot
arena_dalloc
arena_dalloc_bin
arena_dalloc_bin_locked
arena_dalloc_junk_large
arena_dalloc_junk_small
arena_dalloc_large
arena_dalloc_large_locked
arena_dalloc_small
arena_dss_prec_get
arena_dss_prec_set
arena_malloc
arena_malloc_large
arena_malloc_small
arena_mapbits_allocated_get
arena_mapbits_binind_get
arena_mapbits_dirty_get
arena_mapbits_get
arena_mapbits_large_binind_set
arena_mapbits_large_get
arena_mapbits_large_set
arena_mapbits_large_size_get
arena_mapbits_small_runind_get
arena_mapbits_small_set
arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
arena_mapbits_unzeroed_set
arena_mapbitsp_get
arena_mapbitsp_read
arena_mapbitsp_write
arena_mapp_get
arena_maxclass
arena_new
arena_palloc
arena_postfork_child
arena_postfork_parent
arena_prefork
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
arena_prof_ctx_get
arena_prof_ctx_set
arena_prof_promoted
arena_ptr_small_binind_get
arena_purge_all
arena_quarantine_junk_small
arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
arena_redzone_corruption
arena_run_regind
arena_salloc
arena_stats_merge
arena_tcache_fill_small
arenas
arenas_booted
arenas_cleanup
arenas_extend
arenas_initialized
arenas_lock
arenas_tls
arenas_tsd
arenas_tsd_boot
arenas_tsd_cleanup_wrapper
arenas_tsd_get
arenas_tsd_get_wrapper
arenas_tsd_init_head
arenas_tsd_set
atomic_add_u
atomic_add_uint32
atomic_add_uint64
atomic_add_z
atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
base_alloc
base_boot
base_calloc
base_node_alloc
base_node_dealloc
base_postfork_child
base_postfork_parent
base_prefork
bitmap_full
bitmap_get
bitmap_info_init
bitmap_info_ngroups
bitmap_init
bitmap_set
bitmap_sfu
bitmap_size
bitmap_unset
bt_init
buferror
choose_arena
choose_arena_hard
chunk_alloc
chunk_alloc_dss
chunk_alloc_mmap
chunk_boot
chunk_dealloc
chunk_dealloc_mmap
chunk_dss_boot
chunk_dss_postfork_child
chunk_dss_postfork_parent
chunk_dss_prec_get
chunk_dss_prec_set
chunk_dss_prefork
chunk_in_dss
chunk_npages
chunk_postfork_child
chunk_postfork_parent
chunk_prefork
chunk_unmap
chunks_mtx
chunks_rtree
chunksize
chunksize_mask
ckh_bucket_search
ckh_count
ckh_delete
ckh_evict_reloc_insert
ckh_insert
ckh_isearch
ckh_iter
ckh_new
ckh_pointer_hash
ckh_pointer_keycomp
ckh_rebuild
ckh_remove
ckh_search
ckh_string_hash
ckh_string_keycomp
ckh_try_bucket_insert
ckh_try_insert
ctl_boot
ctl_bymib
ctl_byname
ctl_nametomib
ctl_postfork_child
ctl_postfork_parent
ctl_prefork
dss_prec_names
extent_tree_ad_first
extent_tree_ad_insert
extent_tree_ad_iter
extent_tree_ad_iter_recurse
extent_tree_ad_iter_start
extent_tree_ad_last
extent_tree_ad_new
extent_tree_ad_next
extent_tree_ad_nsearch
extent_tree_ad_prev
extent_tree_ad_psearch
extent_tree_ad_remove
extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
extent_tree_szad_first
extent_tree_szad_insert
extent_tree_szad_iter
extent_tree_szad_iter_recurse
extent_tree_szad_iter_start
extent_tree_szad_last
extent_tree_szad_new
extent_tree_szad_next
extent_tree_szad_nsearch
extent_tree_szad_prev
extent_tree_szad_psearch
extent_tree_szad_remove
extent_tree_szad_reverse_iter
extent_tree_szad_reverse_iter_recurse
extent_tree_szad_reverse_iter_start
extent_tree_szad_search
get_errno
hash
hash_fmix_32
hash_fmix_64
hash_get_block_32
hash_get_block_64
hash_rotl_32
hash_rotl_64
hash_x64_128
hash_x86_128
hash_x86_32
huge_allocated
huge_boot
huge_dalloc
huge_dalloc_junk
huge_dss_prec_get
huge_malloc
huge_mtx
huge_ndalloc
huge_nmalloc
huge_palloc
huge_postfork_child
huge_postfork_parent
huge_prefork
huge_prof_ctx_get
huge_prof_ctx_set
huge_ralloc
huge_ralloc_no_move
huge_salloc
iallocm
icalloc
icalloct
idalloc
idalloct
imalloc
imalloct
ipalloc
ipalloct
iqalloc
iqalloct
iralloc
iralloct
iralloct_realign
isalloc
isthreaded
ivsalloc
ixalloc
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
malloc_cprintf
malloc_mutex_init
malloc_mutex_lock
malloc_mutex_postfork_child
malloc_mutex_postfork_parent
malloc_mutex_prefork
malloc_mutex_unlock
malloc_printf
malloc_snprintf
malloc_strtoumax
malloc_tsd_boot
malloc_tsd_cleanup_register
malloc_tsd_dalloc
malloc_tsd_malloc
malloc_tsd_no_cleanup
malloc_vcprintf
malloc_vsnprintf
malloc_write
map_bias
mb_write
mutex_boot
narenas_auto
narenas_total
narenas_total_get
ncpus
nhbins
opt_abort
opt_dss
opt_junk
opt_lg_chunk
opt_lg_dirty_mult
opt_lg_prof_interval
opt_lg_prof_sample
opt_lg_tcache_max
opt_narenas
opt_prof
opt_prof_accum
opt_prof_active
opt_prof_final
opt_prof_gdump
opt_prof_leak
opt_prof_prefix
opt_quarantine
opt_redzone
opt_stats_print
opt_tcache
opt_utrace
opt_valgrind
opt_xmalloc
opt_zero
p2rz
pages_purge
pow2_ceil
prof_backtrace
prof_boot0
prof_boot1
prof_boot2
prof_bt_count
prof_ctx_get
prof_ctx_set
prof_dump_open
prof_free
prof_gdump
prof_idump
prof_interval
prof_lookup
prof_malloc
prof_mdump
prof_postfork_child
prof_postfork_parent
prof_prefork
prof_promote
prof_realloc
prof_sample_accum_update
prof_sample_threshold_update
prof_tdata_booted
prof_tdata_cleanup
prof_tdata_get
prof_tdata_init
prof_tdata_initialized
prof_tdata_tls
prof_tdata_tsd
prof_tdata_tsd_boot
prof_tdata_tsd_cleanup_wrapper
prof_tdata_tsd_get
prof_tdata_tsd_get_wrapper
prof_tdata_tsd_init_head
prof_tdata_tsd_set
quarantine
quarantine_alloc_hook
quarantine_boot
quarantine_booted
quarantine_cleanup
quarantine_init
quarantine_tls
quarantine_tsd
quarantine_tsd_boot
quarantine_tsd_cleanup_wrapper
quarantine_tsd_get
quarantine_tsd_get_wrapper
quarantine_tsd_init_head
quarantine_tsd_set
register_zone
rtree_delete
rtree_get
rtree_get_locked
rtree_new
rtree_postfork_child
rtree_postfork_parent
rtree_prefork
rtree_set
s2u
sa2u
set_errno
small_size2bin
stats_cactive
stats_cactive_add
stats_cactive_get
stats_cactive_sub
stats_chunks
stats_print
tcache_alloc_easy
tcache_alloc_large
tcache_alloc_small
tcache_alloc_small_hard
tcache_arena_associate
tcache_arena_dissociate
tcache_bin_flush_large
tcache_bin_flush_small
tcache_bin_info
tcache_boot0
tcache_boot1
tcache_booted
tcache_create
tcache_dalloc_large
tcache_dalloc_small
tcache_destroy
tcache_enabled_booted
tcache_enabled_get
tcache_enabled_initialized
tcache_enabled_set
tcache_enabled_tls
tcache_enabled_tsd
tcache_enabled_tsd_boot
tcache_enabled_tsd_cleanup_wrapper
tcache_enabled_tsd_get
tcache_enabled_tsd_get_wrapper
tcache_enabled_tsd_init_head
tcache_enabled_tsd_set
tcache_event
tcache_event_hard
tcache_flush
tcache_get
tcache_initialized
tcache_maxclass
tcache_salloc
tcache_stats_merge
tcache_thread_cleanup
tcache_tls
tcache_tsd
tcache_tsd_boot
tcache_tsd_cleanup_wrapper
tcache_tsd_get
tcache_tsd_get_wrapper
tcache_tsd_init_head
tcache_tsd_set
thread_allocated_booted
thread_allocated_initialized
thread_allocated_tls
thread_allocated_tsd
thread_allocated_tsd_boot
thread_allocated_tsd_cleanup_wrapper
thread_allocated_tsd_get
thread_allocated_tsd_get_wrapper
thread_allocated_tsd_init_head
thread_allocated_tsd_set
tsd_init_check_recursion
tsd_init_finish
u2rz

View File

@ -0,0 +1,5 @@
#!/bin/sh
for symbol in `cat $1` ; do
echo "#undef ${symbol}"
done

View File

@ -25,7 +25,7 @@
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/
#define prng32(r, lg_range, state, a, c) do { \
#define prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32); \
\
@ -35,7 +35,7 @@
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \
#define prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\

View File

@ -8,7 +8,11 @@ typedef struct prof_ctx_s prof_ctx_t;
typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
#define PROF_PREFIX_DEFAULT "jeprof"
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
@ -129,6 +133,7 @@ struct prof_ctx_s {
* limbo due to one of:
* - Initializing per thread counters associated with this ctx.
* - Preparing to destroy this ctx.
* - Dumping a heap profile that includes this ctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* ctx.
*/
@ -145,7 +150,11 @@ struct prof_ctx_s {
* this context.
*/
ql_head(prof_thr_cnt_t) cnts_ql;
/* Linkage for list of contexts to be dumped. */
ql_elm(prof_ctx_t) dump_link;
};
typedef ql_head(prof_ctx_t) prof_ctx_list_t;
struct prof_tdata_s {
/*
@ -195,7 +204,12 @@ extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_accum; /* Report cumulative bytes. */
extern char opt_prof_prefix[PATH_MAX + 1];
extern char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX +
#endif
1];
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
@ -215,6 +229,11 @@ extern bool prof_promote;
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt, unsigned nignore);
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
#ifdef JEMALLOC_JET
size_t prof_bt_count(void);
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open;
#endif
void prof_idump(void);
bool prof_mdump(const char *filename);
void prof_gdump(void);
@ -237,7 +256,7 @@ void prof_postfork_child(void);
\
assert(size == s2u(size)); \
\
prof_tdata = prof_tdata_get(); \
prof_tdata = prof_tdata_get(true); \
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
if (prof_tdata != NULL) \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
@ -286,14 +305,14 @@ void prof_postfork_child(void);
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
prof_tdata_t *prof_tdata_get(void);
prof_tdata_t *prof_tdata_get(bool create);
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
prof_ctx_t *prof_ctx_get(const void *ptr);
void prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
bool prof_sample_accum_update(size_t size);
void prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt);
void prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
size_t old_size, prof_ctx_t *old_ctx);
void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt);
void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
size_t old_usize, prof_ctx_t *old_ctx);
void prof_free(const void *ptr, size_t size);
#endif
@ -304,17 +323,15 @@ malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
prof_tdata_cleanup)
JEMALLOC_INLINE prof_tdata_t *
prof_tdata_get(void)
prof_tdata_get(bool create)
{
prof_tdata_t *prof_tdata;
cassert(config_prof);
prof_tdata = *prof_tdata_tsd_get();
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) {
if (prof_tdata == NULL)
prof_tdata = prof_tdata_init();
}
if (create && prof_tdata == NULL)
prof_tdata = prof_tdata_init();
return (prof_tdata);
}
@ -322,6 +339,20 @@ prof_tdata_get(void)
JEMALLOC_INLINE void
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
{
/*
* The body of this function is compiled out unless heap profiling is
* enabled, so that it is possible to compile jemalloc with floating
* point support completely disabled. Avoiding floating point code is
* important on memory-constrained systems, but it also enables a
* workaround for versions of glibc that don't properly save/restore
* floating point registers during dynamic lazy symbol loading (which
* internally calls into whatever malloc implementation happens to be
* integrated into the application). Note that some compilers (e.g.
* gcc 4.8) may use floating point registers for fast memory moves, so
* jemalloc must be compiled with such optimizations disabled (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
#ifdef JEMALLOC_PROF
uint64_t r;
double u;
@ -343,7 +374,7 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
* (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
* (http://luc.devroye.org/rnbookindex.html)
*/
prng64(r, 53, prof_tdata->prng_state,
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
@ -351,6 +382,7 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
prof_tdata->threshold = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+ (uint64_t)1U;
#endif
}
JEMALLOC_INLINE prof_ctx_t *
@ -373,7 +405,7 @@ prof_ctx_get(const void *ptr)
}
JEMALLOC_INLINE void
prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
@ -383,7 +415,7 @@ prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
arena_prof_ctx_set(ptr, ctx);
arena_prof_ctx_set(ptr, usize, ctx);
} else
huge_prof_ctx_set(ptr, ctx);
}
@ -397,7 +429,7 @@ prof_sample_accum_update(size_t size)
/* Sampling logic is unnecessary if the interval is 1. */
assert(opt_lg_prof_sample != 0);
prof_tdata = *prof_tdata_tsd_get();
prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (true);
@ -418,20 +450,20 @@ prof_sample_accum_update(size_t size)
}
JEMALLOC_INLINE void
prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt)
{
cassert(config_prof);
assert(ptr != NULL);
assert(size == isalloc(ptr, true));
assert(usize == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
if (prof_sample_accum_update(size)) {
if (prof_sample_accum_update(usize)) {
/*
* Don't sample. For malloc()-like allocation, it is
* always possible to tell in advance how large an
* object's usable size will be, so there should never
* be a difference between the size passed to
* be a difference between the usize passed to
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert((uintptr_t)cnt == (uintptr_t)1U);
@ -439,17 +471,17 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, cnt->ctx);
prof_ctx_set(ptr, usize, cnt->ctx);
cnt->epoch++;
/*********/
mb_write();
/*********/
cnt->cnts.curobjs++;
cnt->cnts.curbytes += size;
cnt->cnts.curbytes += usize;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
cnt->cnts.accumbytes += size;
cnt->cnts.accumbytes += usize;
}
/*********/
mb_write();
@ -459,12 +491,12 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
mb_write();
/*********/
} else
prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
}
JEMALLOC_INLINE void
prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
size_t old_size, prof_ctx_t *old_ctx)
prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
size_t old_usize, prof_ctx_t *old_ctx)
{
prof_thr_cnt_t *told_cnt;
@ -472,15 +504,15 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
if (ptr != NULL) {
assert(size == isalloc(ptr, true));
assert(usize == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
if (prof_sample_accum_update(size)) {
if (prof_sample_accum_update(usize)) {
/*
* Don't sample. The size passed to
* Don't sample. The usize passed to
* PROF_ALLOC_PREP() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
* its actual size was insufficient to cross
* its actual usize was insufficient to cross
* the sample threshold.
*/
cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
@ -497,7 +529,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
*/
malloc_mutex_lock(old_ctx->lock);
old_ctx->cnt_merged.curobjs--;
old_ctx->cnt_merged.curbytes -= old_size;
old_ctx->cnt_merged.curbytes -= old_usize;
malloc_mutex_unlock(old_ctx->lock);
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
}
@ -507,23 +539,23 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, cnt->ctx);
prof_ctx_set(ptr, usize, cnt->ctx);
cnt->epoch++;
} else if (ptr != NULL)
prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
/*********/
mb_write();
/*********/
if ((uintptr_t)told_cnt > (uintptr_t)1U) {
told_cnt->cnts.curobjs--;
told_cnt->cnts.curbytes -= old_size;
told_cnt->cnts.curbytes -= old_usize;
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
cnt->cnts.curobjs++;
cnt->cnts.curbytes += size;
cnt->cnts.curbytes += usize;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
cnt->cnts.accumbytes += size;
cnt->cnts.accumbytes += usize;
}
}
/*********/

View File

@ -0,0 +1,6 @@
#!/bin/sh
for nm in `cat $1` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "#define je_${n} JEMALLOC_N(${n})"
done

View File

@ -0,0 +1,6 @@
#!/bin/sh
for nm in `cat $1` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "#undef je_${n}"
done

View File

@ -1,61 +1,61 @@
/*
* List definitions.
*/
#define ql_head(a_type) \
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define ql_head_initializer(a_head) {NULL}
#define ql_head_initializer(a_head) {NULL}
#define ql_elm(a_type) qr(a_type)
#define ql_elm(a_type) qr(a_type)
/* List functions. */
#define ql_new(a_head) do { \
#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_last(a_head, a_field) \
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define ql_next(a_head, a_elm, a_field) \
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define ql_after_insert(a_qlelm, a_elm, a_field) \
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
#define ql_head_insert(a_head, a_elm, a_field) do { \
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) do { \
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_remove(a_head, a_elm, a_field) do { \
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
@ -66,18 +66,18 @@ struct { \
} \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) do { \
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) do { \
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_foreach(a_var, a_head, a_field) \
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)

View File

@ -1,28 +1,28 @@
/* Ring definitions. */
#define qr(a_type) \
#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
#define qr_new(a_qr, a_field) do { \
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) \
#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
@ -31,7 +31,7 @@ struct { \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
@ -42,10 +42,10 @@ struct { \
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
#define qr_split(a_qr_a, a_qr_b, a_field) \
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_remove(a_qr, a_field) do { \
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
@ -54,13 +54,13 @@ struct { \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_foreach(var, a_qr, a_field) \
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define qr_reverse_foreach(var, a_qr, a_field) \
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \

View File

@ -1,6 +1,9 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct quarantine_obj_s quarantine_obj_t;
typedef struct quarantine_s quarantine_t;
/* Default per thread quarantine size if valgrind is enabled. */
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
@ -8,17 +11,57 @@
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct quarantine_obj_s {
void *ptr;
size_t usize;
};
struct quarantine_s {
size_t curbytes;
size_t curobjs;
size_t first;
#define LG_MAXOBJS_INIT 10
size_t lg_maxobjs;
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
quarantine_t *quarantine_init(size_t lg_maxobjs);
void quarantine(void *ptr);
void quarantine_cleanup(void *arg);
bool quarantine_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *)
void quarantine_alloc_hook(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
malloc_tsd_externs(quarantine, quarantine_t *)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL,
quarantine_cleanup)
JEMALLOC_ALWAYS_INLINE void
quarantine_alloc_hook(void)
{
quarantine_t *quarantine;
assert(config_fill && opt_quarantine);
quarantine = *quarantine_tsd_get();
if (quarantine == NULL)
quarantine_init(LG_MAXOBJS_INIT);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -22,10 +22,6 @@
#ifndef RB_H_
#define RB_H_
#if 0
__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 204493 2010-02-28 22:57:13Z jasone $");
#endif
#ifdef RB_COMPACT
/* Node structure. */
#define rb_node(a_type) \

View File

@ -14,17 +14,18 @@ typedef struct rtree_s rtree_t;
* Size of each radix tree node (must be a power of 2). This impacts tree
* depth.
*/
#if (LG_SIZEOF_PTR == 2)
# define RTREE_NODESIZE (1U << 14)
#else
# define RTREE_NODESIZE CACHELINE
#endif
#define RTREE_NODESIZE (1U << 16)
typedef void *(rtree_alloc_t)(size_t);
typedef void (rtree_dalloc_t)(void *);
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct rtree_s {
rtree_alloc_t *alloc;
rtree_dalloc_t *dalloc;
malloc_mutex_t mutex;
void **root;
unsigned height;
@ -35,7 +36,8 @@ struct rtree_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rtree_t *rtree_new(unsigned bits);
rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
void rtree_delete(rtree_t *rtree);
void rtree_prefork(rtree_t *rtree);
void rtree_postfork_parent(rtree_t *rtree);
void rtree_postfork_child(rtree_t *rtree);
@ -45,20 +47,20 @@ void rtree_postfork_child(rtree_t *rtree);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
#ifndef JEMALLOC_DEBUG
void *rtree_get_locked(rtree_t *rtree, uintptr_t key);
#ifdef JEMALLOC_DEBUG
uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
#endif
void *rtree_get(rtree_t *rtree, uintptr_t key);
bool rtree_set(rtree_t *rtree, uintptr_t key, void *val);
uint8_t rtree_get(rtree_t *rtree, uintptr_t key);
bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#define RTREE_GET_GENERATE(f) \
/* The least significant bits of the key are ignored. */ \
JEMALLOC_INLINE void * \
JEMALLOC_INLINE uint8_t \
f(rtree_t *rtree, uintptr_t key) \
{ \
void *ret; \
uint8_t ret; \
uintptr_t subkey; \
unsigned i, lshift, height, bits; \
void **node, **child; \
@ -68,12 +70,12 @@ f(rtree_t *rtree, uintptr_t key) \
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
return (NULL); \
return (0); \
} \
} \
\
@ -84,7 +86,10 @@ f(rtree_t *rtree, uintptr_t key) \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
ret = node[subkey]; \
{ \
uint8_t *leaf = (uint8_t *)node; \
ret = leaf[subkey]; \
} \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
@ -123,7 +128,7 @@ RTREE_GET_GENERATE(rtree_get)
#undef RTREE_GET_VALIDATE
JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, void *val)
rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
{
uintptr_t subkey;
unsigned i, lshift, height, bits;
@ -138,14 +143,14 @@ rtree_set(rtree_t *rtree, uintptr_t key, void *val)
bits);
child = (void**)node[subkey];
if (child == NULL) {
child = (void**)base_alloc(sizeof(void *) <<
rtree->level2bits[i+1]);
size_t size = ((i + 1 < height - 1) ? sizeof(void *)
: (sizeof(uint8_t))) << rtree->level2bits[i+1];
child = (void**)rtree->alloc(size);
if (child == NULL) {
malloc_mutex_unlock(&rtree->mutex);
return (true);
}
memset(child, 0, sizeof(void *) <<
rtree->level2bits[i+1]);
memset(child, 0, size);
node[subkey] = child;
}
}
@ -153,7 +158,10 @@ rtree_set(rtree_t *rtree, uintptr_t key, void *val)
/* node is a leaf, so it contains values rather than node pointers. */
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
node[subkey] = val;
{
uint8_t *leaf = (uint8_t *)node;
leaf[subkey] = val;
}
malloc_mutex_unlock(&rtree->mutex);
return (false);

View File

@ -140,11 +140,11 @@ void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
/* Map of thread-specific caches. */
malloc_tsd_externs(tcache, tcache_t *)
malloc_tsd_funcs(JEMALLOC_INLINE, tcache, tcache_t *, NULL,
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL,
tcache_thread_cleanup)
/* Per thread flag that allows thread caches to be disabled. */
malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
malloc_tsd_funcs(JEMALLOC_INLINE, tcache_enabled, tcache_enabled_t,
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
tcache_enabled_default, malloc_tsd_no_cleanup)
JEMALLOC_INLINE void
@ -206,7 +206,7 @@ tcache_enabled_set(bool enabled)
}
}
JEMALLOC_INLINE tcache_t *
JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(bool create)
{
tcache_t *tcache;
@ -258,7 +258,7 @@ tcache_get(bool create)
return (tcache);
}
JEMALLOC_INLINE void
JEMALLOC_ALWAYS_INLINE void
tcache_event(tcache_t *tcache)
{
@ -271,7 +271,7 @@ tcache_event(tcache_t *tcache)
tcache_event_hard(tcache);
}
JEMALLOC_INLINE void *
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_easy(tcache_bin_t *tbin)
{
void *ret;
@ -287,7 +287,7 @@ tcache_alloc_easy(tcache_bin_t *tbin)
return (ret);
}
JEMALLOC_INLINE void *
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
{
void *ret;
@ -297,6 +297,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
binind = SMALL_SIZE2BIN(size);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
size = arena_bin_info[binind].reg_size;
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
ret = tcache_alloc_small_hard(tcache, tbin, binind);
@ -313,6 +314,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
} else if (opt_zero)
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
@ -330,7 +332,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
return (ret);
}
JEMALLOC_INLINE void *
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
{
void *ret;
@ -367,6 +369,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
else if (opt_zero)
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
@ -382,7 +385,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
return (ret);
}
JEMALLOC_INLINE void
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
{
tcache_bin_t *tbin;
@ -406,7 +409,7 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
tcache_event(tcache);
}
JEMALLOC_INLINE void
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
{
size_t binind;

View File

@ -6,6 +6,12 @@
typedef bool (*malloc_tsd_cleanup_t)(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
typedef struct tsd_init_block_s tsd_init_block_t;
typedef struct tsd_init_head_s tsd_init_head_t;
#endif
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* are four macros that support (at least) three use cases: file-private,
@ -75,12 +81,13 @@ extern __thread a_type a_name##_tls; \
extern pthread_key_t a_name##_tsd; \
extern bool a_name##_booted;
#elif (defined(_WIN32))
#define malloc_tsd_externs(a_name, a_type) \
#define malloc_tsd_externs(a_name, a_type) \
extern DWORD a_name##_tsd; \
extern bool a_name##_booted;
#else
#define malloc_tsd_externs(a_name, a_type) \
extern pthread_key_t a_name##_tsd; \
extern tsd_init_head_t a_name##_tsd_init_head; \
extern bool a_name##_booted;
#endif
@ -105,6 +112,10 @@ a_attr bool a_name##_booted = false;
#else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr pthread_key_t a_name##_tsd; \
a_attr tsd_init_head_t a_name##_tsd_init_head = { \
ql_head_initializer(blocks), \
MALLOC_MUTEX_INITIALIZER \
}; \
a_attr bool a_name##_booted = false;
#endif
@ -333,8 +344,14 @@ a_name##_tsd_get_wrapper(void) \
pthread_getspecific(a_name##_tsd); \
\
if (wrapper == NULL) { \
tsd_init_block_t block; \
wrapper = tsd_init_check_recursion( \
&a_name##_tsd_init_head, &block); \
if (wrapper) \
return (wrapper); \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
block.data = wrapper; \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
@ -350,6 +367,7 @@ a_name##_tsd_get_wrapper(void) \
" TSD for "#a_name"\n"); \
abort(); \
} \
tsd_init_finish(&a_name##_tsd_init_head, &block); \
} \
return (wrapper); \
} \
@ -379,6 +397,19 @@ a_name##_tsd_set(a_type *val) \
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
struct tsd_init_block_s {
ql_elm(tsd_init_block_t) link;
pthread_t thread;
void *data;
};
struct tsd_init_head_s {
ql_head(tsd_init_block_t) blocks;
malloc_mutex_t lock;
};
#endif
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
@ -388,6 +419,12 @@ void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *);
void malloc_tsd_cleanup_register(bool (*f)(void));
void malloc_tsd_boot(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void *tsd_init_check_recursion(tsd_init_head_t *head,
tsd_init_block_t *block);
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/

View File

@ -14,7 +14,7 @@
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
#define JEMALLOC_CONCAT(...) __VA_ARGS__
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
/*
* Silence compiler warnings due to uninitialized values. This is used
@ -42,12 +42,6 @@
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
if ((c) == false) \
assert(false); \
} while (0)
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
@ -69,10 +63,18 @@
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (config_debug && !(e)) \
not_implemented(); \
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
if ((c) == false) \
not_reached(); \
} while (0)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
@ -82,8 +84,9 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int buferror(char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base);
int buferror(int err, char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *restrict nptr,
char **restrict endptr, int base);
void malloc_write(const char *s);
/*
@ -107,7 +110,6 @@ void malloc_printf(const char *format, ...)
#ifndef JEMALLOC_ENABLE_INLINE
size_t pow2_ceil(size_t x);
void malloc_write(const char *s);
void set_errno(int errnum);
int get_errno(void);
#endif

View File

@ -1,157 +0,0 @@
#ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "@jemalloc_version@"
#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
#include "jemalloc_defs@install_suffix@.h"
#ifdef JEMALLOC_EXPERIMENTAL
#define ALLOCM_LG_ALIGN(la) (la)
#if LG_SIZEOF_PTR == 2
#define ALLOCM_ALIGN(a) (ffs(a)-1)
#else
#define ALLOCM_ALIGN(a) ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
#endif
#define ALLOCM_ZERO ((int)0x40)
#define ALLOCM_NO_MOVE ((int)0x80)
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
#define ALLOCM_SUCCESS 0
#define ALLOCM_ERR_OOM 1
#define ALLOCM_ERR_NOT_MOVED 2
#endif
/*
* The je_ prefix on the following public symbol declarations is an artifact of
* namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see below).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size,
int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size,
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
#endif
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
#ifndef JEMALLOC_NO_DEMANGLE
#define JEMALLOC_NO_DEMANGLE
#endif
#define malloc_conf je_malloc_conf
#define malloc_message je_malloc_message
#define malloc je_malloc
#define calloc je_calloc
#define posix_memalign je_posix_memalign
#define aligned_alloc je_aligned_alloc
#define realloc je_realloc
#define free je_free
#define malloc_usable_size je_malloc_usable_size
#define malloc_stats_print je_malloc_stats_print
#define mallctl je_mallctl
#define mallctlnametomib je_mallctlnametomib
#define mallctlbymib je_mallctlbymib
#define memalign je_memalign
#define valloc je_valloc
#ifdef JEMALLOC_EXPERIMENTAL
#define allocm je_allocm
#define rallocm je_rallocm
#define sallocm je_sallocm
#define dallocm je_dallocm
#define nallocm je_nallocm
#endif
#endif
/*
* The je_* macros can be used as stable alternative names for the public
* jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily meant
* for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_malloc_usable_size
#undef je_malloc_stats_print
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_memalign
#undef je_valloc
#ifdef JEMALLOC_EXPERIMENTAL
#undef je_allocm
#undef je_rallocm
#undef je_sallocm
#undef je_dallocm
#undef je_nallocm
#endif
#endif
#ifdef __cplusplus
};
#endif
#endif /* JEMALLOC_H_ */

28
deps/jemalloc/include/jemalloc/jemalloc.sh vendored Executable file
View File

@ -0,0 +1,28 @@
#!/bin/sh
objroot=$1
cat <<EOF
#ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
EOF
for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \
jemalloc_protos.h jemalloc_mangle.h ; do
cat "${objroot}include/jemalloc/${hdr}" \
| grep -v 'Generated from .* by configure\.' \
| sed -e 's/^#define /#define /g' \
| sed -e 's/ $//g'
echo
done
cat <<EOF
#ifdef __cplusplus
};
#endif
#endif /* JEMALLOC_H_ */
EOF

View File

@ -1,222 +1,12 @@
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
#undef JEMALLOC_PREFIX
#undef JEMALLOC_CPREFIX
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_malloc_usable_size
#undef je_malloc_stats_print
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_memalign
#undef je_valloc
#undef je_allocm
#undef je_rallocm
#undef je_sallocm
#undef je_dallocm
#undef je_nallocm
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#undef JEMALLOC_PRIVATE_NAMESPACE
#undef JEMALLOC_N
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#undef CPU_SPINWAIT
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
#undef JEMALLOC_ATOMIC9
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
#undef JEMALLOC_OSATOMIC
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
#undef JEMALLOC_OSSPIN
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
#undef JEMALLOC_MALLOC_THREAD_CLEANUP
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#undef JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
#undef JEMALLOC_MUTEX_INIT_CB
/* Defined if __attribute__((...)) syntax is supported. */
#undef JEMALLOC_HAVE_ATTR
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
/* Defined if sbrk() is supported. */
#undef JEMALLOC_HAVE_SBRK
/* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#undef JEMALLOC_CC_SILENCE
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
#undef JEMALLOC_DEBUG
/* JEMALLOC_STATS enables statistics calculation. */
#undef JEMALLOC_STATS
/* JEMALLOC_PROF enables allocation profiling. */
#undef JEMALLOC_PROF
/* Use libunwind for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBUNWIND
/* Use libgcc for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBGCC
/* Use gcc intrinsics for profile backtracing if defined. */
#undef JEMALLOC_PROF_GCC
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#undef JEMALLOC_TCACHE
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS).
*/
#undef JEMALLOC_DSS
/* Support memory filling (junk/zero/quarantine/redzone). */
#undef JEMALLOC_FILL
/* Support the experimental API. */
#undef JEMALLOC_EXPERIMENTAL
/* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE
/* Support Valgrind. */
#undef JEMALLOC_VALGRIND
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
#undef STATIC_PAGE_SHIFT
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
*/
#undef JEMALLOC_MUNMAP
/*
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
* disabled by default because it is Linux-specific and it will cause virtual
* memory map holes, much like munmap(2) does.
*/
#undef JEMALLOC_MREMAP
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
#undef JEMALLOC_IVSALLOC
/*
* Define overrides for non-standard allocator-related functions if they
* are present on the system.
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
#undef JEMALLOC_OVERRIDE_MEMALIGN
#undef JEMALLOC_OVERRIDE_VALLOC
@ -230,33 +20,5 @@
*/
#undef JEMALLOC_USABLE_SIZE_CONST
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
#undef JEMALLOC_ZONE_VERSION
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched.
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
* unused, such that they will be discarded rather
* than swapped out.
*/
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_FREE
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#undef LG_SIZEOF_PTR
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#undef LG_SIZEOF_INT
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T

View File

@ -0,0 +1,61 @@
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "@jemalloc_version@"
#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
# define MALLOCX_ALIGN(a) \
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
#ifdef JEMALLOC_EXPERIMENTAL
# define ALLOCM_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define ALLOCM_ALIGN(a) (ffs(a)-1)
# else
# define ALLOCM_ALIGN(a) \
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# endif
# define ALLOCM_ZERO ((int)0x40)
# define ALLOCM_NO_MOVE ((int)0x80)
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
# define ALLOCM_SUCCESS 0
# define ALLOCM_ERR_OOM 1
# define ALLOCM_ERR_NOT_MOVED 2
#endif
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif

View File

@ -0,0 +1,45 @@
#!/bin/sh
public_symbols_txt=$1
symbol_prefix=$2
cat <<EOF
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
EOF
for nm in `cat ${public_symbols_txt}` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "# define ${n} ${symbol_prefix}${n}"
done
cat <<EOF
#endif
/*
* The ${symbol_prefix}* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
EOF
for nm in `cat ${public_symbols_txt}` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "# undef ${symbol_prefix}${n}"
done
cat <<EOF
#endif
EOF

View File

@ -0,0 +1,58 @@
/*
* The @je_@ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h).
*/
extern JEMALLOC_EXPORT const char *@je_@malloc_conf;
extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT void *@je_@malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *@je_@calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int @je_@posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *@je_@aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *@je_@realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void @je_@free(void *ptr);
JEMALLOC_EXPORT void *@je_@mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *@je_@rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t @je_@xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t @je_@sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void @je_@dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t @je_@nallocx(size_t size, int flags);
JEMALLOC_EXPORT int @je_@mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int @je_@mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int @je_@mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void @je_@malloc_stats_print(void (*write_cb)(void *,
const char *), void *@je_@cbopaque, const char *opts);
JEMALLOC_EXPORT size_t @je_@malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * @je_@memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * @je_@valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_EXPORT int @je_@allocm(void **ptr, size_t *rsize, size_t size,
int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@rallocm(void **ptr, size_t *rsize, size_t size,
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@sallocm(const void *ptr, size_t *rsize, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@dallocm(void *ptr, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@nallocm(size_t *rsize, size_t size, int flags);
#endif

View File

@ -0,0 +1,22 @@
#!/bin/sh
public_symbols_txt=$1
cat <<EOF
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
EOF
for nm in `cat ${public_symbols_txt}` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
echo "# define je_${n} ${m}"
done
cat <<EOF
#endif
EOF