version 1.19, 2008/06/23 10:46:25 |
version 1.43, 2017/11/16 13:54:00 |
Line 143 __RCSID("$NetBSD$"); |
|
Line 143 __RCSID("$NetBSD$"); |
|
#ifdef __FreeBSD__ |
#ifdef __FreeBSD__ |
#include <machine/atomic.h> |
#include <machine/atomic.h> |
#include <machine/cpufunc.h> |
#include <machine/cpufunc.h> |
#endif |
|
#include <machine/vmparam.h> |
#include <machine/vmparam.h> |
|
#endif |
|
|
#include <errno.h> |
#include <errno.h> |
#include <limits.h> |
#include <limits.h> |
Line 163 __RCSID("$NetBSD$"); |
|
Line 163 __RCSID("$NetBSD$"); |
|
# include <reentrant.h> |
# include <reentrant.h> |
# include "extern.h" |
# include "extern.h" |
|
|
#define STRERROR_R(a, b, c) __strerror_r(a, b, c); |
#define STRERROR_R(a, b, c) strerror_r_ss(a, b, c); |
/* |
|
* A non localized version of strerror, that avoids bringing in |
|
* stdio and the locale code. All the malloc messages are in English |
|
* so why bother? |
|
*/ |
|
static int |
|
__strerror_r(int e, char *s, size_t l) |
|
{ |
|
int rval; |
|
size_t slen; |
|
|
|
if (e >= 0 && e < sys_nerr) { |
|
slen = strlcpy(s, sys_errlist[e], l); |
|
rval = 0; |
|
} else { |
|
slen = snprintf_ss(s, l, "Unknown error %u", e); |
|
rval = EINVAL; |
|
} |
|
return slen >= l ? ERANGE : rval; |
|
} |
|
#endif |
#endif |
|
|
#ifdef __FreeBSD__ |
#ifdef __FreeBSD__ |
Line 216 __strerror_r(int e, char *s, size_t l) |
|
Line 196 __strerror_r(int e, char *s, size_t l) |
|
#define STRERROR_BUF 64 |
#define STRERROR_BUF 64 |
|
|
/* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */ |
/* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */ |
|
|
|
/* |
|
* If you touch the TINY_MIN_2POW definition for any architecture, please |
|
* make sure to adjust the corresponding definition for JEMALLOC_TINY_MIN_2POW |
|
* in the gcc 4.8 tree in dist/gcc/tree-ssa-ccp.c and verify that a native |
|
* gcc is still buildable! |
|
*/ |
|
|
#ifdef __i386__ |
#ifdef __i386__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
Line 225 __strerror_r(int e, char *s, size_t l) |
|
Line 213 __strerror_r(int e, char *s, size_t l) |
|
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 3 |
# define SIZEOF_PTR_2POW 3 |
#endif |
#endif |
|
#ifdef __aarch64__ |
|
# define QUANTUM_2POW_MIN 4 |
|
# define SIZEOF_PTR_2POW 3 |
|
# define NO_TLS |
|
#endif |
#ifdef __alpha__ |
#ifdef __alpha__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 3 |
# define SIZEOF_PTR_2POW 3 |
|
# define TINY_MIN_2POW 3 |
# define NO_TLS |
# define NO_TLS |
#endif |
#endif |
#ifdef __sparc64__ |
#ifdef __sparc64__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 3 |
# define SIZEOF_PTR_2POW 3 |
|
# define TINY_MIN_2POW 3 |
# define NO_TLS |
# define NO_TLS |
#endif |
#endif |
#ifdef __amd64__ |
#ifdef __amd64__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 3 |
# define SIZEOF_PTR_2POW 3 |
|
# define TINY_MIN_2POW 3 |
#endif |
#endif |
#ifdef __arm__ |
#ifdef __arm__ |
# define QUANTUM_2POW_MIN 3 |
# define QUANTUM_2POW_MIN 3 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
# define USE_BRK |
# define USE_BRK |
|
# ifdef __ARM_EABI__ |
|
# define TINY_MIN_2POW 3 |
|
# endif |
# define NO_TLS |
# define NO_TLS |
#endif |
#endif |
#ifdef __powerpc__ |
#ifdef __powerpc__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
# define USE_BRK |
# define USE_BRK |
|
# define TINY_MIN_2POW 3 |
#endif |
#endif |
#if defined(__sparc__) && !defined(__sparc64__) |
#if defined(__sparc__) && !defined(__sparc64__) |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
# define USE_BRK |
# define USE_BRK |
#endif |
#endif |
|
#ifdef __or1k__ |
|
# define QUANTUM_2POW_MIN 4 |
|
# define SIZEOF_PTR_2POW 2 |
|
# define USE_BRK |
|
#endif |
#ifdef __vax__ |
#ifdef __vax__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
Line 270 __strerror_r(int e, char *s, size_t l) |
|
Line 275 __strerror_r(int e, char *s, size_t l) |
|
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
# define USE_BRK |
# define USE_BRK |
#endif |
#endif |
#ifdef __mips__ |
#if defined(__mips__) || defined(__riscv__) |
# define QUANTUM_2POW_MIN 4 |
# ifdef _LP64 |
|
# define SIZEOF_PTR_2POW 3 |
|
# define TINY_MIN_2POW 3 |
|
# else |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
# define USE_BRK |
# endif |
|
# define QUANTUM_2POW_MIN 4 |
|
# define USE_BRK |
#endif |
#endif |
#ifdef __hppa__ |
#ifdef __hppa__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
|
# define TINY_MIN_2POW 4 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
# define USE_BRK |
# define USE_BRK |
#endif |
#endif |
Line 288 __strerror_r(int e, char *s, size_t l) |
|
Line 299 __strerror_r(int e, char *s, size_t l) |
|
# define SIZEOF_INT_2POW 2 |
# define SIZEOF_INT_2POW 2 |
#endif |
#endif |
|
|
/* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */ |
|
#if (!defined(PIC) && !defined(NO_TLS)) |
|
# define NO_TLS |
|
#endif |
|
|
|
/* |
/* |
* Size and alignment of memory chunks that are allocated by the OS's virtual |
* Size and alignment of memory chunks that are allocated by the OS's virtual |
* memory system. |
* memory system. |
Line 308 __strerror_r(int e, char *s, size_t l) |
|
Line 314 __strerror_r(int e, char *s, size_t l) |
|
#define CACHELINE ((size_t)(1 << CACHELINE_2POW)) |
#define CACHELINE ((size_t)(1 << CACHELINE_2POW)) |
|
|
/* Smallest size class to support. */ |
/* Smallest size class to support. */ |
#define TINY_MIN_2POW 1 |
#ifndef TINY_MIN_2POW |
|
#define TINY_MIN_2POW 2 |
|
#endif |
|
|
/* |
/* |
* Maximum size class that is a multiple of the quantum, but not (necessarily) |
* Maximum size class that is a multiple of the quantum, but not (necessarily) |
Line 319 __strerror_r(int e, char *s, size_t l) |
|
Line 327 __strerror_r(int e, char *s, size_t l) |
|
#define SMALL_MAX_DEFAULT (1 << SMALL_MAX_2POW_DEFAULT) |
#define SMALL_MAX_DEFAULT (1 << SMALL_MAX_2POW_DEFAULT) |
|
|
/* |
/* |
* Maximum desired run header overhead. Runs are sized as small as possible |
* RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized |
* such that this setting is still honored, without violating other constraints. |
* as small as possible such that this setting is still honored, without |
* The goal is to make runs as small as possible without exceeding a per run |
* violating other constraints. The goal is to make runs as small as possible |
* external fragmentation threshold. |
* without exceeding a per run external fragmentation threshold. |
* |
* |
* Note that it is possible to set this low enough that it cannot be honored |
* We use binary fixed point math for overhead computations, where the binary |
* for some/all object sizes, since there is one bit of header overhead per |
* point is implicitly RUN_BFP bits to the left. |
* object (plus a constant). In such cases, this constraint is relaxed. |
|
* |
* |
* RUN_MAX_OVRHD_RELAX specifies the maximum number of bits per region of |
* Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be |
* overhead for which RUN_MAX_OVRHD is relaxed. |
* honored for some/all object sizes, since there is one bit of header overhead |
|
* per object (plus a constant). This constraint is relaxed (ignored) for runs |
|
* that are so small that the per-region overhead is greater than: |
|
* |
|
* (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP)) |
*/ |
*/ |
#define RUN_MAX_OVRHD 0.015 |
#define RUN_BFP 12 |
#define RUN_MAX_OVRHD_RELAX 1.5 |
/* \/ Implicit binary fixed point. */ |
|
#define RUN_MAX_OVRHD 0x0000003dU |
|
#define RUN_MAX_OVRHD_RELAX 0x00001800U |
|
|
/* Put a cap on small object run size. This overrides RUN_MAX_OVRHD. */ |
/* Put a cap on small object run size. This overrides RUN_MAX_OVRHD. */ |
#define RUN_MAX_SMALL_2POW 15 |
#define RUN_MAX_SMALL_2POW 15 |
Line 360 static malloc_mutex_t init_lock = {_SPIN |
|
Line 373 static malloc_mutex_t init_lock = {_SPIN |
|
/* Set to true once the allocator has been initialized. */ |
/* Set to true once the allocator has been initialized. */ |
static bool malloc_initialized = false; |
static bool malloc_initialized = false; |
|
|
|
#ifdef _REENTRANT |
/* Used to avoid initialization races. */ |
/* Used to avoid initialization races. */ |
static mutex_t init_lock = MUTEX_INITIALIZER; |
static mutex_t init_lock = MUTEX_INITIALIZER; |
#endif |
#endif |
|
#endif |
|
|
/******************************************************************************/ |
/******************************************************************************/ |
/* |
/* |
Line 672 static size_t arena_maxclass; /* Max si |
|
Line 687 static size_t arena_maxclass; /* Max si |
|
* Chunks. |
* Chunks. |
*/ |
*/ |
|
|
|
#ifdef _REENTRANT |
/* Protects chunk-related data structures. */ |
/* Protects chunk-related data structures. */ |
static malloc_mutex_t chunks_mtx; |
static malloc_mutex_t chunks_mtx; |
|
#endif |
|
|
/* Tree of chunks that are stand-alone huge allocations. */ |
/* Tree of chunks that are stand-alone huge allocations. */ |
static chunk_tree_t huge; |
static chunk_tree_t huge; |
Line 724 static void *base_pages; |
|
Line 741 static void *base_pages; |
|
static void *base_next_addr; |
static void *base_next_addr; |
static void *base_past_addr; /* Addr immediately past base_pages. */ |
static void *base_past_addr; /* Addr immediately past base_pages. */ |
static chunk_node_t *base_chunk_nodes; /* LIFO cache of chunk nodes. */ |
static chunk_node_t *base_chunk_nodes; /* LIFO cache of chunk nodes. */ |
|
#ifdef _REENTRANT |
static malloc_mutex_t base_mtx; |
static malloc_mutex_t base_mtx; |
|
#endif |
#ifdef MALLOC_STATS |
#ifdef MALLOC_STATS |
static size_t base_mapped; |
static size_t base_mapped; |
#endif |
#endif |
Line 741 static size_t base_mapped; |
|
Line 760 static size_t base_mapped; |
|
static arena_t **arenas; |
static arena_t **arenas; |
static unsigned narenas; |
static unsigned narenas; |
static unsigned next_arena; |
static unsigned next_arena; |
|
#ifdef _REENTRANT |
static malloc_mutex_t arenas_mtx; /* Protects arenas initialization. */ |
static malloc_mutex_t arenas_mtx; /* Protects arenas initialization. */ |
|
#endif |
|
|
#ifndef NO_TLS |
|
/* |
/* |
* Map of pthread_self() --> arenas[???], used for selecting an arena to use |
* Map of pthread_self() --> arenas[???], used for selecting an arena to use |
* for allocations. |
* for allocations. |
*/ |
*/ |
static __thread arena_t *arenas_map; |
#ifndef NO_TLS |
#define get_arenas_map() (arenas_map) |
static __thread arena_t **arenas_map; |
#define set_arenas_map(x) (arenas_map = x) |
#else |
|
static arena_t **arenas_map; |
|
#endif |
|
|
|
#if !defined(NO_TLS) || !defined(_REENTRANT) |
|
# define get_arenas_map() (arenas_map) |
|
# define set_arenas_map(x) (arenas_map = x) |
#else |
#else |
static thread_key_t arenas_map_key; |
|
#define get_arenas_map() thr_getspecific(arenas_map_key) |
static thread_key_t arenas_map_key = -1; |
#define set_arenas_map(x) thr_setspecific(arenas_map_key, x) |
|
|
static inline arena_t ** |
|
get_arenas_map(void) |
|
{ |
|
if (!__isthreaded) |
|
return arenas_map; |
|
|
|
if (arenas_map_key == -1) { |
|
(void)thr_keycreate(&arenas_map_key, NULL); |
|
if (arenas_map != NULL) { |
|
thr_setspecific(arenas_map_key, arenas_map); |
|
arenas_map = NULL; |
|
} |
|
} |
|
|
|
return thr_getspecific(arenas_map_key); |
|
} |
|
|
|
static __inline void |
|
set_arenas_map(arena_t **a) |
|
{ |
|
if (!__isthreaded) { |
|
arenas_map = a; |
|
return; |
|
} |
|
|
|
if (arenas_map_key == -1) { |
|
(void)thr_keycreate(&arenas_map_key, NULL); |
|
if (arenas_map != NULL) { |
|
_DIAGASSERT(arenas_map == a); |
|
arenas_map = NULL; |
|
} |
|
} |
|
|
|
thr_setspecific(arenas_map_key, a); |
|
} |
#endif |
#endif |
|
|
#ifdef MALLOC_STATS |
#ifdef MALLOC_STATS |
Line 811 static void wrtmessage(const char *p1, c |
|
Line 872 static void wrtmessage(const char *p1, c |
|
#ifdef MALLOC_STATS |
#ifdef MALLOC_STATS |
static void malloc_printf(const char *format, ...); |
static void malloc_printf(const char *format, ...); |
#endif |
#endif |
static char *umax2s(uintmax_t x, char *s); |
static char *size_t2s(size_t x, char *s); |
static bool base_pages_alloc(size_t minsize); |
static bool base_pages_alloc(size_t minsize); |
static void *base_alloc(size_t size); |
static void *base_alloc(size_t size); |
static chunk_node_t *base_chunk_node_alloc(void); |
static chunk_node_t *base_chunk_node_alloc(void); |
Line 973 malloc_printf(const char *format, ...) |
|
Line 1034 malloc_printf(const char *format, ...) |
|
|
|
/* |
/* |
* We don't want to depend on vsnprintf() for production builds, since that can |
* We don't want to depend on vsnprintf() for production builds, since that can |
* cause unnecessary bloat for static binaries. umax2s() provides minimal |
* cause unnecessary bloat for static binaries. size_t2s() provides minimal |
* integer printing functionality, so that malloc_printf() use can be limited to |
* integer printing functionality, so that malloc_printf() use can be limited to |
* MALLOC_STATS code. |
* MALLOC_STATS code. |
*/ |
*/ |
#define UMAX2S_BUFSIZE 21 |
#define UMAX2S_BUFSIZE 21 |
static char * |
static char * |
umax2s(uintmax_t x, char *s) |
size_t2s(size_t x, char *s) |
{ |
{ |
unsigned i; |
unsigned i; |
|
|
/* Make sure UMAX2S_BUFSIZE is large enough. */ |
/* Make sure UMAX2S_BUFSIZE is large enough. */ |
/* LINTED */ |
/* LINTED */ |
assert(sizeof(uintmax_t) <= 8); |
assert(sizeof(size_t) <= 8); |
|
|
i = UMAX2S_BUFSIZE - 1; |
i = UMAX2S_BUFSIZE - 1; |
s[i] = '\0'; |
s[i] = '\0'; |
Line 1029 base_pages_alloc(size_t minsize) |
|
Line 1090 base_pages_alloc(size_t minsize) |
|
*/ |
*/ |
incr = (intptr_t)chunksize |
incr = (intptr_t)chunksize |
- (intptr_t)CHUNK_ADDR2OFFSET(brk_cur); |
- (intptr_t)CHUNK_ADDR2OFFSET(brk_cur); |
if (incr < minsize) |
assert(incr >= 0); |
|
if ((size_t)incr < minsize) |
incr += csize; |
incr += csize; |
|
|
brk_prev = sbrk(incr); |
brk_prev = sbrk(incr); |
Line 1364 chunk_alloc(size_t size) |
|
Line 1426 chunk_alloc(size_t size) |
|
*/ |
*/ |
incr = (intptr_t)size |
incr = (intptr_t)size |
- (intptr_t)CHUNK_ADDR2OFFSET(brk_cur); |
- (intptr_t)CHUNK_ADDR2OFFSET(brk_cur); |
if (incr == size) { |
if (incr == (intptr_t)size) { |
ret = brk_cur; |
ret = brk_cur; |
} else { |
} else { |
ret = (void *)((intptr_t)brk_cur + incr); |
ret = (void *)((intptr_t)brk_cur + incr); |
Line 1580 arena_chunk_comp(arena_chunk_t *a, arena |
|
Line 1642 arena_chunk_comp(arena_chunk_t *a, arena |
|
assert(a != NULL); |
assert(a != NULL); |
assert(b != NULL); |
assert(b != NULL); |
|
|
|
if (a->max_frun_npages < b->max_frun_npages) |
|
return -1; |
|
if (a->max_frun_npages > b->max_frun_npages) |
|
return 1; |
|
|
if ((uintptr_t)a < (uintptr_t)b) |
if ((uintptr_t)a < (uintptr_t)b) |
return (-1); |
return (-1); |
else if (a == b) |
else if (a == b) |
Line 1831 arena_chunk_alloc(arena_t *arena) |
|
Line 1898 arena_chunk_alloc(arena_t *arena) |
|
|
|
chunk->arena = arena; |
chunk->arena = arena; |
|
|
/* LINTED */ |
|
RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk); |
|
|
|
/* |
/* |
* Claim that no pages are in use, since the header is merely |
* Claim that no pages are in use, since the header is merely |
* overhead. |
* overhead. |
Line 1853 arena_chunk_alloc(arena_t *arena) |
|
Line 1917 arena_chunk_alloc(arena_t *arena) |
|
chunk->map[chunk_npages - 1].npages = chunk_npages - |
chunk->map[chunk_npages - 1].npages = chunk_npages - |
arena_chunk_header_npages; |
arena_chunk_header_npages; |
chunk->map[chunk_npages - 1].pos = POS_FREE; |
chunk->map[chunk_npages - 1].pos = POS_FREE; |
|
|
|
RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk); |
} |
} |
|
|
return (chunk); |
return (chunk); |
Line 1889 arena_chunk_dealloc(arena_t *arena, aren |
|
Line 1955 arena_chunk_dealloc(arena_t *arena, aren |
|
static arena_run_t * |
static arena_run_t * |
arena_run_alloc(arena_t *arena, size_t size) |
arena_run_alloc(arena_t *arena, size_t size) |
{ |
{ |
arena_chunk_t *chunk; |
arena_chunk_t *chunk, *chunk_tmp; |
arena_run_t *run; |
arena_run_t *run; |
unsigned need_npages, limit_pages, compl_need_npages; |
unsigned need_npages; |
|
|
assert(size <= (chunksize - (arena_chunk_header_npages << |
assert(size <= (chunksize - (arena_chunk_header_npages << |
pagesize_2pow))); |
pagesize_2pow))); |
assert((size & pagesize_mask) == 0); |
assert((size & pagesize_mask) == 0); |
|
|
/* |
/* |
* Search through arena's chunks in address order for a free run that is |
* Search through the arena chunk tree for a large enough free run. |
* large enough. Look for the first fit. |
* Tree order ensures that any exact fit is picked immediately or |
|
* otherwise the lowest address of the next size. |
*/ |
*/ |
need_npages = (unsigned)(size >> pagesize_2pow); |
need_npages = (unsigned)(size >> pagesize_2pow); |
limit_pages = chunk_npages - arena_chunk_header_npages; |
|
compl_need_npages = limit_pages - need_npages; |
|
/* LINTED */ |
/* LINTED */ |
RB_FOREACH(chunk, arena_chunk_tree_s, &arena->chunks) { |
for (;;) { |
|
chunk_tmp = RB_ROOT(&arena->chunks); |
|
chunk = NULL; |
|
while (chunk_tmp) { |
|
if (chunk_tmp->max_frun_npages == need_npages) { |
|
chunk = chunk_tmp; |
|
break; |
|
} |
|
if (chunk_tmp->max_frun_npages < need_npages) { |
|
chunk_tmp = RB_RIGHT(chunk_tmp, link); |
|
continue; |
|
} |
|
chunk = chunk_tmp; |
|
chunk_tmp = RB_LEFT(chunk, link); |
|
} |
|
if (chunk == NULL) |
|
break; |
/* |
/* |
* Avoid searching this chunk if there are not enough |
* At this point, the chunk must have a cached run size large |
* contiguous free pages for there to possibly be a large |
* enough to fit the allocation. |
* enough free run. |
|
*/ |
*/ |
if (chunk->pages_used <= compl_need_npages && |
assert(need_npages <= chunk->max_frun_npages); |
need_npages <= chunk->max_frun_npages) { |
{ |
arena_chunk_map_t *mapelm; |
arena_chunk_map_t *mapelm; |
unsigned i; |
unsigned i; |
unsigned max_frun_npages = 0; |
unsigned max_frun_npages = 0; |
Line 1950 arena_run_alloc(arena_t *arena, size_t s |
|
Line 2030 arena_run_alloc(arena_t *arena, size_t s |
|
* chunk->min_frun_ind was already reset above (if |
* chunk->min_frun_ind was already reset above (if |
* necessary). |
* necessary). |
*/ |
*/ |
|
RB_REMOVE(arena_chunk_tree_s, &arena->chunks, chunk); |
chunk->max_frun_npages = max_frun_npages; |
chunk->max_frun_npages = max_frun_npages; |
|
RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk); |
} |
} |
} |
} |
|
|
Line 2033 arena_run_dalloc(arena_t *arena, arena_r |
|
Line 2115 arena_run_dalloc(arena_t *arena, arena_r |
|
assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE); |
assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE); |
} |
} |
|
|
if (chunk->map[run_ind].npages > chunk->max_frun_npages) |
if (chunk->map[run_ind].npages > chunk->max_frun_npages) { |
|
RB_REMOVE(arena_chunk_tree_s, &arena->chunks, chunk); |
chunk->max_frun_npages = chunk->map[run_ind].npages; |
chunk->max_frun_npages = chunk->map[run_ind].npages; |
|
RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk); |
|
} |
if (run_ind < chunk->min_frun_ind) |
if (run_ind < chunk->min_frun_ind) |
chunk->min_frun_ind = run_ind; |
chunk->min_frun_ind = run_ind; |
|
|
Line 2142 arena_bin_run_size_calc(arena_bin_t *bin |
|
Line 2227 arena_bin_run_size_calc(arena_bin_t *bin |
|
size_t try_run_size, good_run_size; |
size_t try_run_size, good_run_size; |
unsigned good_nregs, good_mask_nelms, good_reg0_offset; |
unsigned good_nregs, good_mask_nelms, good_reg0_offset; |
unsigned try_nregs, try_mask_nelms, try_reg0_offset; |
unsigned try_nregs, try_mask_nelms, try_reg0_offset; |
float max_ovrhd = RUN_MAX_OVRHD; |
|
|
|
assert(min_run_size >= pagesize); |
assert(min_run_size >= pagesize); |
assert(min_run_size <= arena_maxclass); |
assert(min_run_size <= arena_maxclass); |
Line 2160 arena_bin_run_size_calc(arena_bin_t *bin |
|
Line 2244 arena_bin_run_size_calc(arena_bin_t *bin |
|
*/ |
*/ |
try_run_size = min_run_size; |
try_run_size = min_run_size; |
try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) / |
try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) / |
bin->reg_size) + 1); /* Counter-act the first line of the loop. */ |
bin->reg_size) + 1); /* Counter-act try_nregs-- in loop. */ |
do { |
do { |
try_nregs--; |
try_nregs--; |
try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) + |
try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) + |
Line 2194 arena_bin_run_size_calc(arena_bin_t *bin |
|
Line 2278 arena_bin_run_size_calc(arena_bin_t *bin |
|
} while (sizeof(arena_run_t) + (sizeof(unsigned) * |
} while (sizeof(arena_run_t) + (sizeof(unsigned) * |
(try_mask_nelms - 1)) > try_reg0_offset); |
(try_mask_nelms - 1)) > try_reg0_offset); |
} while (try_run_size <= arena_maxclass && try_run_size <= RUN_MAX_SMALL |
} while (try_run_size <= arena_maxclass && try_run_size <= RUN_MAX_SMALL |
&& max_ovrhd > RUN_MAX_OVRHD_RELAX / ((float)(bin->reg_size << 3)) |
&& RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX |
&& ((float)(try_reg0_offset)) / ((float)(try_run_size)) > |
&& (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size); |
max_ovrhd); |
|
|
|
assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1)) |
assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1)) |
<= good_reg0_offset); |
<= good_reg0_offset); |
Line 2855 huge_ralloc(void *ptr, size_t size, size |
|
Line 2938 huge_ralloc(void *ptr, size_t size, size |
|
/* size_t wrap-around */ |
/* size_t wrap-around */ |
return (NULL); |
return (NULL); |
} |
} |
|
|
|
/* |
|
* Remove the old region from the tree now. If mremap() |
|
* returns the region to the system, other thread may |
|
* map it for same huge allocation and insert it to the |
|
* tree before we acquire the mutex lock again. |
|
*/ |
|
malloc_mutex_lock(&chunks_mtx); |
|
key.chunk = __DECONST(void *, ptr); |
|
/* LINTED */ |
|
node = RB_FIND(chunk_tree_s, &huge, &key); |
|
assert(node != NULL); |
|
assert(node->chunk == ptr); |
|
assert(node->size == oldcsize); |
|
RB_REMOVE(chunk_tree_s, &huge, node); |
|
malloc_mutex_unlock(&chunks_mtx); |
|
|
newptr = mremap(ptr, oldcsize, NULL, newcsize, |
newptr = mremap(ptr, oldcsize, NULL, newcsize, |
MAP_ALIGNED(chunksize_2pow)); |
MAP_ALIGNED(chunksize_2pow)); |
if (newptr != MAP_FAILED) { |
if (newptr == MAP_FAILED) { |
|
/* We still own the old region. */ |
|
malloc_mutex_lock(&chunks_mtx); |
|
RB_INSERT(chunk_tree_s, &huge, node); |
|
malloc_mutex_unlock(&chunks_mtx); |
|
} else { |
assert(CHUNK_ADDR2BASE(newptr) == newptr); |
assert(CHUNK_ADDR2BASE(newptr) == newptr); |
|
|
/* update tree */ |
/* Insert new or resized old region. */ |
malloc_mutex_lock(&chunks_mtx); |
malloc_mutex_lock(&chunks_mtx); |
key.chunk = __DECONST(void *, ptr); |
|
/* LINTED */ |
|
node = RB_FIND(chunk_tree_s, &huge, &key); |
|
assert(node != NULL); |
|
assert(node->chunk == ptr); |
|
assert(node->size == oldcsize); |
|
node->size = newcsize; |
node->size = newcsize; |
if (ptr != newptr) { |
node->chunk = newptr; |
RB_REMOVE(chunk_tree_s, &huge, node); |
RB_INSERT(chunk_tree_s, &huge, node); |
node->chunk = newptr; |
|
RB_INSERT(chunk_tree_s, &huge, node); |
|
} |
|
#ifdef MALLOC_STATS |
#ifdef MALLOC_STATS |
huge_nralloc++; |
huge_nralloc++; |
huge_allocated += newcsize - oldcsize; |
huge_allocated += newcsize - oldcsize; |
Line 3201 malloc_print_stats(void) |
|
Line 3297 malloc_print_stats(void) |
|
opt_xmalloc ? "X" : "x", |
opt_xmalloc ? "X" : "x", |
opt_zero ? "Z\n" : "z\n"); |
opt_zero ? "Z\n" : "z\n"); |
|
|
_malloc_message("CPUs: ", umax2s(ncpus, s), "\n", ""); |
_malloc_message("CPUs: ", size_t2s(ncpus, s), "\n", ""); |
_malloc_message("Max arenas: ", umax2s(narenas, s), "\n", ""); |
_malloc_message("Max arenas: ", size_t2s(narenas, s), "\n", ""); |
_malloc_message("Pointer size: ", umax2s(sizeof(void *), s), |
_malloc_message("Pointer size: ", size_t2s(sizeof(void *), s), |
"\n", ""); |
"\n", ""); |
_malloc_message("Quantum size: ", umax2s(quantum, s), "\n", ""); |
_malloc_message("Quantum size: ", size_t2s(quantum, s), "\n", ""); |
_malloc_message("Max small size: ", umax2s(small_max, s), "\n", |
_malloc_message("Max small size: ", size_t2s(small_max, s), "\n", |
""); |
""); |
|
|
_malloc_message("Chunk size: ", umax2s(chunksize, s), "", ""); |
_malloc_message("Chunk size: ", size_t2s(chunksize, s), "", ""); |
_malloc_message(" (2^", umax2s(opt_chunk_2pow, s), ")\n", ""); |
_malloc_message(" (2^", size_t2s((size_t)opt_chunk_2pow, s), |
|
")\n", ""); |
|
|
#ifdef MALLOC_STATS |
#ifdef MALLOC_STATS |
{ |
{ |
Line 3308 malloc_init_hard(void) |
|
Line 3405 malloc_init_hard(void) |
|
ssize_t linklen; |
ssize_t linklen; |
char buf[PATH_MAX + 1]; |
char buf[PATH_MAX + 1]; |
const char *opts = ""; |
const char *opts = ""; |
|
int serrno; |
|
|
malloc_mutex_lock(&init_lock); |
malloc_mutex_lock(&init_lock); |
if (malloc_initialized) { |
if (malloc_initialized) { |
Line 3319 malloc_init_hard(void) |
|
Line 3417 malloc_init_hard(void) |
|
return (false); |
return (false); |
} |
} |
|
|
|
serrno = errno; |
/* Get number of CPUs. */ |
/* Get number of CPUs. */ |
{ |
{ |
int mib[2]; |
int mib[2]; |
Line 3430 malloc_init_hard(void) |
|
Line 3529 malloc_init_hard(void) |
|
opt_chunk_2pow--; |
opt_chunk_2pow--; |
break; |
break; |
case 'K': |
case 'K': |
/* |
if (opt_chunk_2pow + 1 < |
* There must be fewer pages in a chunk than |
(int)(sizeof(size_t) << 3)) |
* can be recorded by the pos field of |
|
* arena_chunk_map_t, in order to make POS_FREE |
|
* special. |
|
*/ |
|
if (opt_chunk_2pow - pagesize_2pow |
|
< (sizeof(uint32_t) << 3) - 1) |
|
opt_chunk_2pow++; |
opt_chunk_2pow++; |
break; |
break; |
case 'n': |
case 'n': |
Line 3504 malloc_init_hard(void) |
|
Line 3597 malloc_init_hard(void) |
|
} |
} |
} |
} |
} |
} |
|
errno = serrno; |
|
|
/* Take care to call atexit() only once. */ |
/* Take care to call atexit() only once. */ |
if (opt_print_stats) { |
if (opt_print_stats) { |
Line 3602 malloc_init_hard(void) |
|
Line 3696 malloc_init_hard(void) |
|
opt_narenas_lshift += 2; |
opt_narenas_lshift += 2; |
} |
} |
|
|
#ifdef NO_TLS |
|
/* Initialize arena key. */ |
|
(void)thr_keycreate(&arenas_map_key, NULL); |
|
#endif |
|
|
|
/* Determine how many arenas to use. */ |
/* Determine how many arenas to use. */ |
narenas = ncpus; |
narenas = ncpus; |
if (opt_narenas_lshift > 0) { |
if (opt_narenas_lshift > 0) { |
Line 3901 _malloc_prefork(void) |
|
Line 3990 _malloc_prefork(void) |
|
if (arenas[i] != NULL) |
if (arenas[i] != NULL) |
malloc_mutex_lock(&arenas[i]->mtx); |
malloc_mutex_lock(&arenas[i]->mtx); |
} |
} |
malloc_mutex_unlock(&arenas_mtx); |
|
|
|
malloc_mutex_lock(&base_mtx); |
malloc_mutex_lock(&base_mtx); |
|
|
Line 3919 _malloc_postfork(void) |
|
Line 4007 _malloc_postfork(void) |
|
|
|
malloc_mutex_unlock(&base_mtx); |
malloc_mutex_unlock(&base_mtx); |
|
|
malloc_mutex_lock(&arenas_mtx); |
|
for (i = 0; i < narenas; i++) { |
for (i = 0; i < narenas; i++) { |
if (arenas[i] != NULL) |
if (arenas[i] != NULL) |
malloc_mutex_unlock(&arenas[i]->mtx); |
malloc_mutex_unlock(&arenas[i]->mtx); |