version 1.3, 2007/10/07 21:45:18 |
version 1.40, 2016/04/12 18:07:08 |
Line 143 __RCSID("$NetBSD$"); |
|
Line 143 __RCSID("$NetBSD$"); |
|
#ifdef __FreeBSD__ |
#ifdef __FreeBSD__ |
#include <machine/atomic.h> |
#include <machine/atomic.h> |
#include <machine/cpufunc.h> |
#include <machine/cpufunc.h> |
#endif |
|
#include <machine/vmparam.h> |
#include <machine/vmparam.h> |
|
#endif |
|
|
#include <errno.h> |
#include <errno.h> |
#include <limits.h> |
#include <limits.h> |
Line 161 __RCSID("$NetBSD$"); |
|
Line 161 __RCSID("$NetBSD$"); |
|
|
|
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
# include <reentrant.h> |
# include <reentrant.h> |
void _malloc_prefork(void); |
# include "extern.h" |
void _malloc_postfork(void); |
|
ssize_t _write(int, const void *, size_t); |
#define STRERROR_R(a, b, c) __strerror_r(a, b, c); |
const char *_getprogname(void); |
/* |
|
* A non localized version of strerror, that avoids bringing in |
|
* stdio and the locale code. All the malloc messages are in English |
|
* so why bother? |
|
*/ |
|
static int |
|
__strerror_r(int e, char *s, size_t l) |
|
{ |
|
int rval; |
|
size_t slen; |
|
|
|
if (e >= 0 && e < sys_nerr) { |
|
slen = strlcpy(s, sys_errlist[e], l); |
|
rval = 0; |
|
} else { |
|
slen = snprintf_ss(s, l, "Unknown error %u", e); |
|
rval = EINVAL; |
|
} |
|
return slen >= l ? ERANGE : rval; |
|
} |
#endif |
#endif |
|
|
#ifdef __FreeBSD__ |
#ifdef __FreeBSD__ |
|
#define STRERROR_R(a, b, c) strerror_r(a, b, c); |
#include "un-namespace.h" |
#include "un-namespace.h" |
#endif |
#endif |
|
|
Line 196 const char *_getprogname(void); |
|
Line 216 const char *_getprogname(void); |
|
#define STRERROR_BUF 64 |
#define STRERROR_BUF 64 |
|
|
/* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */ |
/* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */ |
|
|
|
/* |
|
* If you touch the TINY_MIN_2POW definition for any architecture, please |
|
* make sure to adjust the corresponding definition for JEMALLOC_TINY_MIN_2POW |
|
* in the gcc 4.8 tree in dist/gcc/tree-ssa-ccp.c and verify that a native |
|
* gcc is still buildable! |
|
*/ |
|
|
#ifdef __i386__ |
#ifdef __i386__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
Line 205 const char *_getprogname(void); |
|
Line 233 const char *_getprogname(void); |
|
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 3 |
# define SIZEOF_PTR_2POW 3 |
#endif |
#endif |
|
#ifdef __aarch64__ |
|
# define QUANTUM_2POW_MIN 4 |
|
# define SIZEOF_PTR_2POW 3 |
|
# define NO_TLS |
|
#endif |
#ifdef __alpha__ |
#ifdef __alpha__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 3 |
# define SIZEOF_PTR_2POW 3 |
|
# define TINY_MIN_2POW 3 |
# define NO_TLS |
# define NO_TLS |
#endif |
#endif |
#ifdef __sparc64__ |
#ifdef __sparc64__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 3 |
# define SIZEOF_PTR_2POW 3 |
|
# define TINY_MIN_2POW 3 |
# define NO_TLS |
# define NO_TLS |
#endif |
#endif |
#ifdef __amd64__ |
#ifdef __amd64__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 3 |
# define SIZEOF_PTR_2POW 3 |
|
# define TINY_MIN_2POW 3 |
#endif |
#endif |
#ifdef __arm__ |
#ifdef __arm__ |
# define QUANTUM_2POW_MIN 3 |
# define QUANTUM_2POW_MIN 3 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
# define USE_BRK |
# define USE_BRK |
|
# ifdef __ARM_EABI__ |
|
# define TINY_MIN_2POW 3 |
|
# endif |
# define NO_TLS |
# define NO_TLS |
#endif |
#endif |
#ifdef __powerpc__ |
#ifdef __powerpc__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
# define USE_BRK |
# define USE_BRK |
|
# define TINY_MIN_2POW 3 |
#endif |
#endif |
#if defined(__sparc__) && !defined(__sparc64__) |
#if defined(__sparc__) && !defined(__sparc64__) |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
# define USE_BRK |
# define USE_BRK |
#endif |
#endif |
#ifdef __vax__ |
#ifdef __or1k__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
# define USE_BRK |
# define USE_BRK |
#endif |
#endif |
#ifdef __sh__ |
#ifdef __vax__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
# define USE_BRK |
# define USE_BRK |
#endif |
#endif |
#ifdef __m68k__ |
#ifdef __sh__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
# define USE_BRK |
# define USE_BRK |
#endif |
#endif |
#ifdef __mips__ |
#ifdef __m68k__ |
# define QUANTUM_2POW_MIN 4 |
# define QUANTUM_2POW_MIN 4 |
# define SIZEOF_PTR_2POW 2 |
# define SIZEOF_PTR_2POW 2 |
# define USE_BRK |
# define USE_BRK |
#endif |
#endif |
|
#if defined(__mips__) || defined(__riscv__) |
|
# ifdef _LP64 |
|
# define SIZEOF_PTR_2POW 3 |
|
# define TINY_MIN_2POW 3 |
|
# else |
|
# define SIZEOF_PTR_2POW 2 |
|
# endif |
|
# define QUANTUM_2POW_MIN 4 |
|
# define USE_BRK |
|
#endif |
|
#ifdef __hppa__ |
|
# define QUANTUM_2POW_MIN 4 |
|
# define SIZEOF_PTR_2POW 2 |
|
# define USE_BRK |
|
#endif |
|
|
#define SIZEOF_PTR (1 << SIZEOF_PTR_2POW) |
#define SIZEOF_PTR (1 << SIZEOF_PTR_2POW) |
|
|
Line 263 const char *_getprogname(void); |
|
Line 318 const char *_getprogname(void); |
|
# define SIZEOF_INT_2POW 2 |
# define SIZEOF_INT_2POW 2 |
#endif |
#endif |
|
|
/* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */ |
|
#if (!defined(PIC) && !defined(NO_TLS)) |
|
# define NO_TLS |
|
#endif |
|
|
|
/* |
/* |
* Size and alignment of memory chunks that are allocated by the OS's virtual |
* Size and alignment of memory chunks that are allocated by the OS's virtual |
* memory system. |
* memory system. |
Line 283 const char *_getprogname(void); |
|
Line 333 const char *_getprogname(void); |
|
#define CACHELINE ((size_t)(1 << CACHELINE_2POW)) |
#define CACHELINE ((size_t)(1 << CACHELINE_2POW)) |
|
|
/* Smallest size class to support. */ |
/* Smallest size class to support. */ |
#define TINY_MIN_2POW 1 |
#ifndef TINY_MIN_2POW |
|
#define TINY_MIN_2POW 2 |
|
#endif |
|
|
/* |
/* |
* Maximum size class that is a multiple of the quantum, but not (necessarily) |
* Maximum size class that is a multiple of the quantum, but not (necessarily) |
Line 294 const char *_getprogname(void); |
|
Line 346 const char *_getprogname(void); |
|
#define SMALL_MAX_DEFAULT (1 << SMALL_MAX_2POW_DEFAULT) |
#define SMALL_MAX_DEFAULT (1 << SMALL_MAX_2POW_DEFAULT) |
|
|
/* |
/* |
* Maximum desired run header overhead. Runs are sized as small as possible |
* RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized |
* such that this setting is still honored, without violating other constraints. |
* as small as possible such that this setting is still honored, without |
* The goal is to make runs as small as possible without exceeding a per run |
* violating other constraints. The goal is to make runs as small as possible |
* external fragmentation threshold. |
* without exceeding a per run external fragmentation threshold. |
* |
* |
* Note that it is possible to set this low enough that it cannot be honored |
* We use binary fixed point math for overhead computations, where the binary |
* for some/all object sizes, since there is one bit of header overhead per |
* point is implicitly RUN_BFP bits to the left. |
* object (plus a constant). In such cases, this constraint is relaxed. |
|
* |
* |
* RUN_MAX_OVRHD_RELAX specifies the maximum number of bits per region of |
* Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be |
* overhead for which RUN_MAX_OVRHD is relaxed. |
* honored for some/all object sizes, since there is one bit of header overhead |
|
* per object (plus a constant). This constraint is relaxed (ignored) for runs |
|
* that are so small that the per-region overhead is greater than: |
|
* |
|
* (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP)) |
*/ |
*/ |
#define RUN_MAX_OVRHD 0.015 |
#define RUN_BFP 12 |
#define RUN_MAX_OVRHD_RELAX 1.5 |
/* \/ Implicit binary fixed point. */ |
|
#define RUN_MAX_OVRHD 0x0000003dU |
|
#define RUN_MAX_OVRHD_RELAX 0x00001800U |
|
|
/* Put a cap on small object run size. This overrides RUN_MAX_OVRHD. */ |
/* Put a cap on small object run size. This overrides RUN_MAX_OVRHD. */ |
#define RUN_MAX_SMALL_2POW 15 |
#define RUN_MAX_SMALL_2POW 15 |
Line 335 static malloc_mutex_t init_lock = {_SPIN |
|
Line 392 static malloc_mutex_t init_lock = {_SPIN |
|
/* Set to true once the allocator has been initialized. */ |
/* Set to true once the allocator has been initialized. */ |
static bool malloc_initialized = false; |
static bool malloc_initialized = false; |
|
|
|
#ifdef _REENTRANT |
/* Used to avoid initialization races. */ |
/* Used to avoid initialization races. */ |
static mutex_t init_lock = MUTEX_INITIALIZER; |
static mutex_t init_lock = MUTEX_INITIALIZER; |
#endif |
#endif |
|
#endif |
|
|
/******************************************************************************/ |
/******************************************************************************/ |
/* |
/* |
Line 620 static unsigned ncpus; |
|
Line 679 static unsigned ncpus; |
|
/* VM page size. */ |
/* VM page size. */ |
static size_t pagesize; |
static size_t pagesize; |
static size_t pagesize_mask; |
static size_t pagesize_mask; |
static size_t pagesize_2pow; |
static int pagesize_2pow; |
|
|
/* Various bin-related settings. */ |
/* Various bin-related settings. */ |
static size_t bin_maxclass; /* Max size class for bins. */ |
static size_t bin_maxclass; /* Max size class for bins. */ |
Line 637 static size_t quantum_mask; /* (quantum |
|
Line 696 static size_t quantum_mask; /* (quantum |
|
/* Various chunk-related settings. */ |
/* Various chunk-related settings. */ |
static size_t chunksize; |
static size_t chunksize; |
static size_t chunksize_mask; /* (chunksize - 1). */ |
static size_t chunksize_mask; /* (chunksize - 1). */ |
|
static int chunksize_2pow; |
static unsigned chunk_npages; |
static unsigned chunk_npages; |
static unsigned arena_chunk_header_npages; |
static unsigned arena_chunk_header_npages; |
static size_t arena_maxclass; /* Max size class for arenas. */ |
static size_t arena_maxclass; /* Max size class for arenas. */ |
Line 646 static size_t arena_maxclass; /* Max si |
|
Line 706 static size_t arena_maxclass; /* Max si |
|
* Chunks. |
* Chunks. |
*/ |
*/ |
|
|
|
#ifdef _REENTRANT |
/* Protects chunk-related data structures. */ |
/* Protects chunk-related data structures. */ |
static malloc_mutex_t chunks_mtx; |
static malloc_mutex_t chunks_mtx; |
|
#endif |
|
|
/* Tree of chunks that are stand-alone huge allocations. */ |
/* Tree of chunks that are stand-alone huge allocations. */ |
static chunk_tree_t huge; |
static chunk_tree_t huge; |
Line 674 static void *brk_max; |
|
Line 736 static void *brk_max; |
|
/* Huge allocation statistics. */ |
/* Huge allocation statistics. */ |
static uint64_t huge_nmalloc; |
static uint64_t huge_nmalloc; |
static uint64_t huge_ndalloc; |
static uint64_t huge_ndalloc; |
|
static uint64_t huge_nralloc; |
static size_t huge_allocated; |
static size_t huge_allocated; |
#endif |
#endif |
|
|
Line 697 static void *base_pages; |
|
Line 760 static void *base_pages; |
|
static void *base_next_addr; |
static void *base_next_addr; |
static void *base_past_addr; /* Addr immediately past base_pages. */ |
static void *base_past_addr; /* Addr immediately past base_pages. */ |
static chunk_node_t *base_chunk_nodes; /* LIFO cache of chunk nodes. */ |
static chunk_node_t *base_chunk_nodes; /* LIFO cache of chunk nodes. */ |
|
#ifdef _REENTRANT |
static malloc_mutex_t base_mtx; |
static malloc_mutex_t base_mtx; |
|
#endif |
#ifdef MALLOC_STATS |
#ifdef MALLOC_STATS |
static size_t base_mapped; |
static size_t base_mapped; |
#endif |
#endif |
Line 714 static size_t base_mapped; |
|
Line 779 static size_t base_mapped; |
|
static arena_t **arenas; |
static arena_t **arenas; |
static unsigned narenas; |
static unsigned narenas; |
static unsigned next_arena; |
static unsigned next_arena; |
|
#ifdef _REENTRANT |
static malloc_mutex_t arenas_mtx; /* Protects arenas initialization. */ |
static malloc_mutex_t arenas_mtx; /* Protects arenas initialization. */ |
|
#endif |
|
|
#ifndef NO_TLS |
|
/* |
/* |
* Map of pthread_self() --> arenas[???], used for selecting an arena to use |
* Map of pthread_self() --> arenas[???], used for selecting an arena to use |
* for allocations. |
* for allocations. |
*/ |
*/ |
static __thread arena_t *arenas_map; |
#ifndef NO_TLS |
#define get_arenas_map() (arenas_map) |
static __thread arena_t **arenas_map; |
#define set_arenas_map(x) (arenas_map = x) |
#else |
|
static arena_t **arenas_map; |
|
#endif |
|
|
|
#if !defined(NO_TLS) || !defined(_REENTRANT) |
|
# define get_arenas_map() (arenas_map) |
|
# define set_arenas_map(x) (arenas_map = x) |
#else |
#else |
static thread_key_t arenas_map_key; |
|
#define get_arenas_map() thr_getspecific(arenas_map_key) |
static thread_key_t arenas_map_key = -1; |
#define set_arenas_map(x) thr_setspecific(arenas_map_key, x) |
|
|
static inline arena_t ** |
|
get_arenas_map(void) |
|
{ |
|
if (!__isthreaded) |
|
return arenas_map; |
|
|
|
if (arenas_map_key == -1) { |
|
(void)thr_keycreate(&arenas_map_key, NULL); |
|
if (arenas_map != NULL) { |
|
thr_setspecific(arenas_map_key, arenas_map); |
|
arenas_map = NULL; |
|
} |
|
} |
|
|
|
return thr_getspecific(arenas_map_key); |
|
} |
|
|
|
static __inline void |
|
set_arenas_map(arena_t **a) |
|
{ |
|
if (!__isthreaded) { |
|
arenas_map = a; |
|
return; |
|
} |
|
|
|
if (arenas_map_key == -1) { |
|
(void)thr_keycreate(&arenas_map_key, NULL); |
|
if (arenas_map != NULL) { |
|
_DIAGASSERT(arenas_map == a); |
|
arenas_map = NULL; |
|
} |
|
} |
|
|
|
thr_setspecific(arenas_map_key, a); |
|
} |
#endif |
#endif |
|
|
#ifdef MALLOC_STATS |
#ifdef MALLOC_STATS |
Line 750 static bool opt_junk = false; |
|
Line 857 static bool opt_junk = false; |
|
#endif |
#endif |
static bool opt_hint = false; |
static bool opt_hint = false; |
static bool opt_print_stats = false; |
static bool opt_print_stats = false; |
static size_t opt_quantum_2pow = QUANTUM_2POW_MIN; |
static int opt_quantum_2pow = QUANTUM_2POW_MIN; |
static size_t opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT; |
static int opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT; |
static size_t opt_chunk_2pow = CHUNK_2POW_DEFAULT; |
static int opt_chunk_2pow = CHUNK_2POW_DEFAULT; |
static bool opt_utrace = false; |
static bool opt_utrace = false; |
static bool opt_sysv = false; |
static bool opt_sysv = false; |
static bool opt_xmalloc = false; |
static bool opt_xmalloc = false; |
Line 784 static void wrtmessage(const char *p1, c |
|
Line 891 static void wrtmessage(const char *p1, c |
|
#ifdef MALLOC_STATS |
#ifdef MALLOC_STATS |
static void malloc_printf(const char *format, ...); |
static void malloc_printf(const char *format, ...); |
#endif |
#endif |
static char *umax2s(uintmax_t x, char *s); |
static char *size_t2s(size_t x, char *s); |
static bool base_pages_alloc(size_t minsize); |
static bool base_pages_alloc(size_t minsize); |
static void *base_alloc(size_t size); |
static void *base_alloc(size_t size); |
static chunk_node_t *base_chunk_node_alloc(void); |
static chunk_node_t *base_chunk_node_alloc(void); |
Line 793 static void base_chunk_node_dealloc(chun |
|
Line 900 static void base_chunk_node_dealloc(chun |
|
static void stats_print(arena_t *arena); |
static void stats_print(arena_t *arena); |
#endif |
#endif |
static void *pages_map(void *addr, size_t size); |
static void *pages_map(void *addr, size_t size); |
|
static void *pages_map_align(void *addr, size_t size, int align); |
static void pages_unmap(void *addr, size_t size); |
static void pages_unmap(void *addr, size_t size); |
static void *chunk_alloc(size_t size); |
static void *chunk_alloc(size_t size); |
static void chunk_dealloc(void *chunk, size_t size); |
static void chunk_dealloc(void *chunk, size_t size); |
static arena_t *choose_arena_hard(void); |
|
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size); |
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size); |
static arena_chunk_t *arena_chunk_alloc(arena_t *arena); |
static arena_chunk_t *arena_chunk_alloc(arena_t *arena); |
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); |
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); |
|
|
wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4) |
wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4) |
{ |
{ |
|
|
_write(STDERR_FILENO, p1, strlen(p1)); |
write(STDERR_FILENO, p1, strlen(p1)); |
_write(STDERR_FILENO, p2, strlen(p2)); |
write(STDERR_FILENO, p2, strlen(p2)); |
_write(STDERR_FILENO, p3, strlen(p3)); |
write(STDERR_FILENO, p3, strlen(p3)); |
_write(STDERR_FILENO, p4, strlen(p4)); |
write(STDERR_FILENO, p4, strlen(p4)); |
} |
} |
|
|
void (*_malloc_message)(const char *p1, const char *p2, const char *p3, |
void (*_malloc_message)(const char *p1, const char *p2, const char *p3, |
Line 946 malloc_printf(const char *format, ...) |
|
Line 1053 malloc_printf(const char *format, ...) |
|
|
|
/* |
/* |
* We don't want to depend on vsnprintf() for production builds, since that can |
* We don't want to depend on vsnprintf() for production builds, since that can |
* cause unnecessary bloat for static binaries. umax2s() provides minimal |
* cause unnecessary bloat for static binaries. size_t2s() provides minimal |
* integer printing functionality, so that malloc_printf() use can be limited to |
* integer printing functionality, so that malloc_printf() use can be limited to |
* MALLOC_STATS code. |
* MALLOC_STATS code. |
*/ |
*/ |
#define UMAX2S_BUFSIZE 21 |
#define UMAX2S_BUFSIZE 21 |
static char * |
static char * |
umax2s(uintmax_t x, char *s) |
size_t2s(size_t x, char *s) |
{ |
{ |
unsigned i; |
unsigned i; |
|
|
/* Make sure UMAX2S_BUFSIZE is large enough. */ |
/* Make sure UMAX2S_BUFSIZE is large enough. */ |
assert(sizeof(uintmax_t) <= 8); |
/* LINTED */ |
|
assert(sizeof(size_t) <= 8); |
|
|
i = UMAX2S_BUFSIZE - 1; |
i = UMAX2S_BUFSIZE - 1; |
s[i] = '\0'; |
s[i] = '\0'; |
Line 1001 base_pages_alloc(size_t minsize) |
|
Line 1109 base_pages_alloc(size_t minsize) |
|
*/ |
*/ |
incr = (intptr_t)chunksize |
incr = (intptr_t)chunksize |
- (intptr_t)CHUNK_ADDR2OFFSET(brk_cur); |
- (intptr_t)CHUNK_ADDR2OFFSET(brk_cur); |
if (incr < minsize) |
assert(incr >= 0); |
|
if ((size_t)incr < minsize) |
incr += csize; |
incr += csize; |
|
|
brk_prev = sbrk(incr); |
brk_prev = sbrk(incr); |
Line 1176 stats_print(arena_t *arena) |
|
Line 1285 stats_print(arena_t *arena) |
|
* Begin chunk management functions. |
* Begin chunk management functions. |
*/ |
*/ |
|
|
|
#ifndef lint |
static inline int |
static inline int |
chunk_comp(chunk_node_t *a, chunk_node_t *b) |
chunk_comp(chunk_node_t *a, chunk_node_t *b) |
{ |
{ |
Line 1192 chunk_comp(chunk_node_t *a, chunk_node_t |
|
Line 1302 chunk_comp(chunk_node_t *a, chunk_node_t |
|
} |
} |
|
|
/* Generate red-black tree code for chunks. */ |
/* Generate red-black tree code for chunks. */ |
#ifndef lint |
|
RB_GENERATE_STATIC(chunk_tree_s, chunk_node_s, link, chunk_comp); |
RB_GENERATE_STATIC(chunk_tree_s, chunk_node_s, link, chunk_comp); |
#endif |
#endif |
|
|
static void * |
static void * |
pages_map(void *addr, size_t size) |
pages_map_align(void *addr, size_t size, int align) |
{ |
{ |
void *ret; |
void *ret; |
|
|
Line 1205 pages_map(void *addr, size_t size) |
|
Line 1314 pages_map(void *addr, size_t size) |
|
* We don't use MAP_FIXED here, because it can cause the *replacement* |
* We don't use MAP_FIXED here, because it can cause the *replacement* |
* of existing mappings, and we only want to create new mappings. |
* of existing mappings, and we only want to create new mappings. |
*/ |
*/ |
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, |
ret = mmap(addr, size, PROT_READ | PROT_WRITE, |
-1, 0); |
MAP_PRIVATE | MAP_ANON | MAP_ALIGNED(align), -1, 0); |
assert(ret != NULL); |
assert(ret != NULL); |
|
|
if (ret == MAP_FAILED) |
if (ret == MAP_FAILED) |
Line 1218 pages_map(void *addr, size_t size) |
|
Line 1327 pages_map(void *addr, size_t size) |
|
if (munmap(ret, size) == -1) { |
if (munmap(ret, size) == -1) { |
char buf[STRERROR_BUF]; |
char buf[STRERROR_BUF]; |
|
|
strerror_r(errno, buf, sizeof(buf)); |
STRERROR_R(errno, buf, sizeof(buf)); |
_malloc_message(_getprogname(), |
_malloc_message(getprogname(), |
": (malloc) Error in munmap(): ", buf, "\n"); |
": (malloc) Error in munmap(): ", buf, "\n"); |
if (opt_abort) |
if (opt_abort) |
abort(); |
abort(); |
Line 1232 pages_map(void *addr, size_t size) |
|
Line 1341 pages_map(void *addr, size_t size) |
|
return (ret); |
return (ret); |
} |
} |
|
|
|
static void * |
|
pages_map(void *addr, size_t size) |
|
{ |
|
|
|
return pages_map_align(addr, size, 0); |
|
} |
|
|
static void |
static void |
pages_unmap(void *addr, size_t size) |
pages_unmap(void *addr, size_t size) |
{ |
{ |
Line 1239 pages_unmap(void *addr, size_t size) |
|
Line 1355 pages_unmap(void *addr, size_t size) |
|
if (munmap(addr, size) == -1) { |
if (munmap(addr, size) == -1) { |
char buf[STRERROR_BUF]; |
char buf[STRERROR_BUF]; |
|
|
strerror_r(errno, buf, sizeof(buf)); |
STRERROR_R(errno, buf, sizeof(buf)); |
_malloc_message(_getprogname(), |
_malloc_message(getprogname(), |
": (malloc) Error in munmap(): ", buf, "\n"); |
": (malloc) Error in munmap(): ", buf, "\n"); |
if (opt_abort) |
if (opt_abort) |
abort(); |
abort(); |
Line 1299 chunk_alloc(size_t size) |
|
Line 1415 chunk_alloc(size_t size) |
|
* anywhere. Beware of size_t wrap-around. |
* anywhere. Beware of size_t wrap-around. |
*/ |
*/ |
if (size + chunksize > size) { |
if (size + chunksize > size) { |
if ((ret = pages_map(NULL, size + chunksize)) != NULL) { |
if ((ret = pages_map_align(NULL, size, chunksize_2pow)) |
size_t offset = CHUNK_ADDR2OFFSET(ret); |
!= NULL) { |
|
|
/* |
|
* Success. Clean up unneeded leading/trailing space. |
|
*/ |
|
if (offset != 0) { |
|
/* Leading space. */ |
|
pages_unmap(ret, chunksize - offset); |
|
|
|
ret = (void *)((uintptr_t)ret + (chunksize - |
|
offset)); |
|
|
|
/* Trailing space. */ |
|
pages_unmap((void *)((uintptr_t)ret + size), |
|
offset); |
|
} else { |
|
/* Trailing space only. */ |
|
pages_unmap((void *)((uintptr_t)ret + size), |
|
chunksize); |
|
} |
|
goto RETURN; |
goto RETURN; |
} |
} |
} |
} |
Line 1348 chunk_alloc(size_t size) |
|
Line 1445 chunk_alloc(size_t size) |
|
*/ |
*/ |
incr = (intptr_t)size |
incr = (intptr_t)size |
- (intptr_t)CHUNK_ADDR2OFFSET(brk_cur); |
- (intptr_t)CHUNK_ADDR2OFFSET(brk_cur); |
if (incr == size) { |
if (incr == (intptr_t)size) { |
ret = brk_cur; |
ret = brk_cur; |
} else { |
} else { |
ret = (void *)((intptr_t)brk_cur + incr); |
ret = (void *)((intptr_t)brk_cur + incr); |
Line 1506 chunk_dealloc(void *chunk, size_t size) |
|
Line 1603 chunk_dealloc(void *chunk, size_t size) |
|
*/ |
*/ |
|
|
/* |
/* |
* Choose an arena based on a per-thread value (fast-path code, calls slow-path |
* Choose an arena based on a per-thread and (optimistically) per-CPU value. |
* code if necessary). |
* |
|
* We maintain at least one block of arenas. Usually there are more. |
|
* The blocks are $ncpu arenas in size. Whole blocks are 'hashed' |
|
* amongst threads. To accomplish this, next_arena advances only in |
|
* ncpu steps. |
*/ |
*/ |
static inline arena_t * |
static __noinline arena_t * |
choose_arena(void) |
choose_arena_hard(void) |
{ |
{ |
arena_t *ret; |
unsigned i, curcpu; |
|
arena_t **map; |
|
|
/* |
/* Initialize the current block of arenas and advance to next. */ |
* We can only use TLS if this is a PIC library, since for the static |
malloc_mutex_lock(&arenas_mtx); |
* library version, libc's malloc is used by TLS allocation, which |
assert(next_arena % ncpus == 0); |
* introduces a bootstrapping issue. |
assert(narenas % ncpus == 0); |
*/ |
map = &arenas[next_arena]; |
if (__isthreaded == false) { |
set_arenas_map(map); |
/* |
for (i = 0; i < ncpus; i++) { |
* Avoid the overhead of TLS for single-threaded operation. If the |
if (arenas[next_arena] == NULL) |
* app switches to threaded mode, the initial thread may end up |
arenas_extend(next_arena); |
* being assigned to some other arena, but this one-time switch |
next_arena = (next_arena + 1) % narenas; |
* shouldn't cause significant issues. |
|
*/ |
|
return (arenas[0]); |
|
} |
} |
|
malloc_mutex_unlock(&arenas_mtx); |
|
|
ret = get_arenas_map(); |
/* |
if (ret == NULL) |
* If we were unable to allocate an arena above, then default to |
ret = choose_arena_hard(); |
* the first arena, which is always present. |
|
*/ |
assert(ret != NULL); |
curcpu = thr_curcpu(); |
return (ret); |
if (map[curcpu] != NULL) |
|
return map[curcpu]; |
|
return arenas[0]; |
} |
} |
|
|
/* |
static inline arena_t * |
* Choose an arena based on a per-thread value (slow-path code only, called |
choose_arena(void) |
* only by choose_arena()). |
|
*/ |
|
static arena_t * |
|
choose_arena_hard(void) |
|
{ |
{ |
arena_t *ret; |
unsigned curcpu; |
|
arena_t **map; |
|
|
assert(__isthreaded); |
map = get_arenas_map(); |
|
curcpu = thr_curcpu(); |
|
if (__predict_true(map != NULL && map[curcpu] != NULL)) |
|
return map[curcpu]; |
|
|
/* Assign one of the arenas to this thread, in a round-robin fashion. */ |
return choose_arena_hard(); |
malloc_mutex_lock(&arenas_mtx); |
|
ret = arenas[next_arena]; |
|
if (ret == NULL) |
|
ret = arenas_extend(next_arena); |
|
if (ret == NULL) { |
|
/* |
|
* Make sure that this function never returns NULL, so that |
|
* choose_arena() doesn't have to check for a NULL return |
|
* value. |
|
*/ |
|
ret = arenas[0]; |
|
} |
|
next_arena = (next_arena + 1) % narenas; |
|
malloc_mutex_unlock(&arenas_mtx); |
|
set_arenas_map(ret); |
|
|
|
return (ret); |
|
} |
} |
|
|
|
#ifndef lint |
static inline int |
static inline int |
arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b) |
arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b) |
{ |
{ |
Line 1575 arena_chunk_comp(arena_chunk_t *a, arena |
|
Line 1661 arena_chunk_comp(arena_chunk_t *a, arena |
|
assert(a != NULL); |
assert(a != NULL); |
assert(b != NULL); |
assert(b != NULL); |
|
|
|
if (a->max_frun_npages < b->max_frun_npages) |
|
return -1; |
|
if (a->max_frun_npages > b->max_frun_npages) |
|
return 1; |
|
|
if ((uintptr_t)a < (uintptr_t)b) |
if ((uintptr_t)a < (uintptr_t)b) |
return (-1); |
return (-1); |
else if (a == b) |
else if (a == b) |
Line 1584 arena_chunk_comp(arena_chunk_t *a, arena |
|
Line 1675 arena_chunk_comp(arena_chunk_t *a, arena |
|
} |
} |
|
|
/* Generate red-black tree code for arena chunks. */ |
/* Generate red-black tree code for arena chunks. */ |
#ifndef lint |
|
RB_GENERATE_STATIC(arena_chunk_tree_s, arena_chunk_s, link, arena_chunk_comp); |
RB_GENERATE_STATIC(arena_chunk_tree_s, arena_chunk_s, link, arena_chunk_comp); |
#endif |
#endif |
|
|
|
#ifndef lint |
static inline int |
static inline int |
arena_run_comp(arena_run_t *a, arena_run_t *b) |
arena_run_comp(arena_run_t *a, arena_run_t *b) |
{ |
{ |
Line 1604 arena_run_comp(arena_run_t *a, arena_run |
|
Line 1695 arena_run_comp(arena_run_t *a, arena_run |
|
} |
} |
|
|
/* Generate red-black tree code for arena runs. */ |
/* Generate red-black tree code for arena runs. */ |
#ifndef lint |
|
RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp); |
RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp); |
#endif |
#endif |
|
|
Line 1663 arena_run_reg_alloc(arena_run_t *run, ar |
|
Line 1753 arena_run_reg_alloc(arena_run_t *run, ar |
|
} |
} |
} |
} |
/* Not reached. */ |
/* Not reached. */ |
|
/* LINTED */ |
assert(0); |
assert(0); |
return (NULL); |
return (NULL); |
} |
} |
Line 1705 arena_run_reg_dalloc(arena_run_t *run, a |
|
Line 1796 arena_run_reg_dalloc(arena_run_t *run, a |
|
}; |
}; |
unsigned diff, regind, elm, bit; |
unsigned diff, regind, elm, bit; |
|
|
|
/* LINTED */ |
assert(run->magic == ARENA_RUN_MAGIC); |
assert(run->magic == ARENA_RUN_MAGIC); |
assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3 |
assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3 |
>= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN)); |
>= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN)); |
Line 1741 arena_run_reg_dalloc(arena_run_t *run, a |
|
Line 1833 arena_run_reg_dalloc(arena_run_t *run, a |
|
* The page size is too large for us to use the lookup |
* The page size is too large for us to use the lookup |
* table. Use real division. |
* table. Use real division. |
*/ |
*/ |
regind = diff / size; |
regind = (unsigned)(diff / size); |
} |
} |
} else if (size <= ((sizeof(size_invs) / sizeof(unsigned)) |
} else if (size <= ((sizeof(size_invs) / sizeof(unsigned)) |
<< QUANTUM_2POW_MIN) + 2) { |
<< QUANTUM_2POW_MIN) + 2) { |
Line 1754 arena_run_reg_dalloc(arena_run_t *run, a |
|
Line 1846 arena_run_reg_dalloc(arena_run_t *run, a |
|
* if the user increases small_max via the 'S' runtime |
* if the user increases small_max via the 'S' runtime |
* configuration option. |
* configuration option. |
*/ |
*/ |
regind = diff / size; |
regind = (unsigned)(diff / size); |
}; |
}; |
assert(diff == regind * size); |
assert(diff == regind * size); |
assert(regind < bin->nregs); |
assert(regind < bin->nregs); |
Line 1780 arena_run_split(arena_t *arena, arena_ru |
|
Line 1872 arena_run_split(arena_t *arena, arena_ru |
|
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) |
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) |
>> pagesize_2pow); |
>> pagesize_2pow); |
total_pages = chunk->map[run_ind].npages; |
total_pages = chunk->map[run_ind].npages; |
need_pages = (size >> pagesize_2pow); |
need_pages = (unsigned)(size >> pagesize_2pow); |
assert(need_pages <= total_pages); |
assert(need_pages <= total_pages); |
rem_pages = total_pages - need_pages; |
rem_pages = total_pages - need_pages; |
|
|
Line 1825 arena_chunk_alloc(arena_t *arena) |
|
Line 1917 arena_chunk_alloc(arena_t *arena) |
|
|
|
chunk->arena = arena; |
chunk->arena = arena; |
|
|
/* LINTED */ |
|
RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk); |
|
|
|
/* |
/* |
* Claim that no pages are in use, since the header is merely |
* Claim that no pages are in use, since the header is merely |
* overhead. |
* overhead. |
Line 1847 arena_chunk_alloc(arena_t *arena) |
|
Line 1936 arena_chunk_alloc(arena_t *arena) |
|
chunk->map[chunk_npages - 1].npages = chunk_npages - |
chunk->map[chunk_npages - 1].npages = chunk_npages - |
arena_chunk_header_npages; |
arena_chunk_header_npages; |
chunk->map[chunk_npages - 1].pos = POS_FREE; |
chunk->map[chunk_npages - 1].pos = POS_FREE; |
|
|
|
RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk); |
} |
} |
|
|
return (chunk); |
return (chunk); |
Line 1883 arena_chunk_dealloc(arena_t *arena, aren |
|
Line 1974 arena_chunk_dealloc(arena_t *arena, aren |
|
static arena_run_t * |
static arena_run_t * |
arena_run_alloc(arena_t *arena, size_t size) |
arena_run_alloc(arena_t *arena, size_t size) |
{ |
{ |
arena_chunk_t *chunk; |
arena_chunk_t *chunk, *chunk_tmp; |
arena_run_t *run; |
arena_run_t *run; |
unsigned need_npages, limit_pages, compl_need_npages; |
unsigned need_npages; |
|
|
assert(size <= (chunksize - (arena_chunk_header_npages << |
assert(size <= (chunksize - (arena_chunk_header_npages << |
pagesize_2pow))); |
pagesize_2pow))); |
assert((size & pagesize_mask) == 0); |
assert((size & pagesize_mask) == 0); |
|
|
/* |
/* |
* Search through arena's chunks in address order for a free run that is |
* Search through the arena chunk tree for a large enough free run. |
* large enough. Look for the first fit. |
* Tree order ensures that any exact fit is picked immediately or |
|
* otherwise the lowest address of the next size. |
*/ |
*/ |
need_npages = (size >> pagesize_2pow); |
need_npages = (unsigned)(size >> pagesize_2pow); |
limit_pages = chunk_npages - arena_chunk_header_npages; |
|
compl_need_npages = limit_pages - need_npages; |
|
/* LINTED */ |
/* LINTED */ |
RB_FOREACH(chunk, arena_chunk_tree_s, &arena->chunks) { |
for (;;) { |
|
chunk_tmp = RB_ROOT(&arena->chunks); |
|
chunk = NULL; |
|
while (chunk_tmp) { |
|
if (chunk_tmp->max_frun_npages == need_npages) { |
|
chunk = chunk_tmp; |
|
break; |
|
} |
|
if (chunk_tmp->max_frun_npages < need_npages) { |
|
chunk_tmp = RB_RIGHT(chunk_tmp, link); |
|
continue; |
|
} |
|
chunk = chunk_tmp; |
|
chunk_tmp = RB_LEFT(chunk, link); |
|
} |
|
if (chunk == NULL) |
|
break; |
/* |
/* |
* Avoid searching this chunk if there are not enough |
* At this point, the chunk must have a cached run size large |
* contiguous free pages for there to possibly be a large |
* enough to fit the allocation. |
* enough free run. |
|
*/ |
*/ |
if (chunk->pages_used <= compl_need_npages && |
assert(need_npages <= chunk->max_frun_npages); |
need_npages <= chunk->max_frun_npages) { |
{ |
arena_chunk_map_t *mapelm; |
arena_chunk_map_t *mapelm; |
unsigned i; |
unsigned i; |
unsigned max_frun_npages = 0; |
unsigned max_frun_npages = 0; |
Line 1944 arena_run_alloc(arena_t *arena, size_t s |
|
Line 2049 arena_run_alloc(arena_t *arena, size_t s |
|
* chunk->min_frun_ind was already reset above (if |
* chunk->min_frun_ind was already reset above (if |
* necessary). |
* necessary). |
*/ |
*/ |
|
RB_REMOVE(arena_chunk_tree_s, &arena->chunks, chunk); |
chunk->max_frun_npages = max_frun_npages; |
chunk->max_frun_npages = max_frun_npages; |
|
RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk); |
} |
} |
} |
} |
|
|
Line 1973 arena_run_dalloc(arena_t *arena, arena_r |
|
Line 2080 arena_run_dalloc(arena_t *arena, arena_r |
|
>> pagesize_2pow); |
>> pagesize_2pow); |
assert(run_ind >= arena_chunk_header_npages); |
assert(run_ind >= arena_chunk_header_npages); |
assert(run_ind < (chunksize >> pagesize_2pow)); |
assert(run_ind < (chunksize >> pagesize_2pow)); |
run_pages = (size >> pagesize_2pow); |
run_pages = (unsigned)(size >> pagesize_2pow); |
assert(run_pages == chunk->map[run_ind].npages); |
assert(run_pages == chunk->map[run_ind].npages); |
|
|
/* Subtract pages from count of pages used in chunk. */ |
/* Subtract pages from count of pages used in chunk. */ |
Line 2027 arena_run_dalloc(arena_t *arena, arena_r |
|
Line 2134 arena_run_dalloc(arena_t *arena, arena_r |
|
assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE); |
assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE); |
} |
} |
|
|
if (chunk->map[run_ind].npages > chunk->max_frun_npages) |
if (chunk->map[run_ind].npages > chunk->max_frun_npages) { |
|
RB_REMOVE(arena_chunk_tree_s, &arena->chunks, chunk); |
chunk->max_frun_npages = chunk->map[run_ind].npages; |
chunk->max_frun_npages = chunk->map[run_ind].npages; |
|
RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk); |
|
} |
if (run_ind < chunk->min_frun_ind) |
if (run_ind < chunk->min_frun_ind) |
chunk->min_frun_ind = run_ind; |
chunk->min_frun_ind = run_ind; |
|
|
Line 2136 arena_bin_run_size_calc(arena_bin_t *bin |
|
Line 2246 arena_bin_run_size_calc(arena_bin_t *bin |
|
size_t try_run_size, good_run_size; |
size_t try_run_size, good_run_size; |
unsigned good_nregs, good_mask_nelms, good_reg0_offset; |
unsigned good_nregs, good_mask_nelms, good_reg0_offset; |
unsigned try_nregs, try_mask_nelms, try_reg0_offset; |
unsigned try_nregs, try_mask_nelms, try_reg0_offset; |
float max_ovrhd = RUN_MAX_OVRHD; |
|
|
|
assert(min_run_size >= pagesize); |
assert(min_run_size >= pagesize); |
assert(min_run_size <= arena_maxclass); |
assert(min_run_size <= arena_maxclass); |
Line 2153 arena_bin_run_size_calc(arena_bin_t *bin |
|
Line 2262 arena_bin_run_size_calc(arena_bin_t *bin |
|
* header's mask length and the number of regions. |
* header's mask length and the number of regions. |
*/ |
*/ |
try_run_size = min_run_size; |
try_run_size = min_run_size; |
try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->reg_size) |
try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) / |
+ 1; /* Counter-act the first line of the loop. */ |
bin->reg_size) + 1); /* Counter-act try_nregs-- in loop. */ |
do { |
do { |
try_nregs--; |
try_nregs--; |
try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) + |
try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) + |
((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0); |
((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0); |
try_reg0_offset = try_run_size - (try_nregs * bin->reg_size); |
try_reg0_offset = (unsigned)(try_run_size - |
|
(try_nregs * bin->reg_size)); |
} while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1)) |
} while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1)) |
> try_reg0_offset); |
> try_reg0_offset); |
|
|
Line 2175 arena_bin_run_size_calc(arena_bin_t *bin |
|
Line 2285 arena_bin_run_size_calc(arena_bin_t *bin |
|
|
|
/* Try more aggressive settings. */ |
/* Try more aggressive settings. */ |
try_run_size += pagesize; |
try_run_size += pagesize; |
try_nregs = ((try_run_size - sizeof(arena_run_t)) / |
try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) / |
bin->reg_size) + 1; /* Counter-act try_nregs-- in loop. */ |
bin->reg_size) + 1); /* Counter-act try_nregs-- in loop. */ |
do { |
do { |
try_nregs--; |
try_nregs--; |
try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) + |
try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) + |
((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ? |
((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ? |
1 : 0); |
1 : 0); |
try_reg0_offset = try_run_size - (try_nregs * |
try_reg0_offset = (unsigned)(try_run_size - (try_nregs * |
bin->reg_size); |
bin->reg_size)); |
} while (sizeof(arena_run_t) + (sizeof(unsigned) * |
} while (sizeof(arena_run_t) + (sizeof(unsigned) * |
(try_mask_nelms - 1)) > try_reg0_offset); |
(try_mask_nelms - 1)) > try_reg0_offset); |
} while (try_run_size <= arena_maxclass && try_run_size <= RUN_MAX_SMALL |
} while (try_run_size <= arena_maxclass && try_run_size <= RUN_MAX_SMALL |
&& max_ovrhd > RUN_MAX_OVRHD_RELAX / ((float)(bin->reg_size << 3)) |
&& RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX |
&& ((float)(try_reg0_offset)) / ((float)(try_run_size)) > |
&& (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size); |
max_ovrhd); |
|
|
|
assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1)) |
assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1)) |
<= good_reg0_offset); |
<= good_reg0_offset); |
Line 2319 arena_palloc(arena_t *arena, size_t alig |
|
Line 2428 arena_palloc(arena_t *arena, size_t alig |
|
assert((size & pagesize_mask) == 0); |
assert((size & pagesize_mask) == 0); |
assert((alignment & pagesize_mask) == 0); |
assert((alignment & pagesize_mask) == 0); |
|
|
npages = size >> pagesize_2pow; |
npages = (unsigned)(size >> pagesize_2pow); |
|
|
malloc_mutex_lock(&arena->mtx); |
malloc_mutex_lock(&arena->mtx); |
ret = (void *)arena_run_alloc(arena, alloc_size); |
ret = (void *)arena_run_alloc(arena, alloc_size); |
Line 2334 arena_palloc(arena_t *arena, size_t alig |
|
Line 2443 arena_palloc(arena_t *arena, size_t alig |
|
assert((offset & pagesize_mask) == 0); |
assert((offset & pagesize_mask) == 0); |
assert(offset < alloc_size); |
assert(offset < alloc_size); |
if (offset == 0) { |
if (offset == 0) { |
pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> |
pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >> |
pagesize_2pow); |
pagesize_2pow); |
|
|
/* Update the map for the run to be kept. */ |
/* Update the map for the run to be kept. */ |
Line 2345 arena_palloc(arena_t *arena, size_t alig |
|
Line 2454 arena_palloc(arena_t *arena, size_t alig |
|
|
|
/* Trim trailing space. */ |
/* Trim trailing space. */ |
arena_palloc_trim(arena, chunk, pageind + npages, |
arena_palloc_trim(arena, chunk, pageind + npages, |
(alloc_size - size) >> pagesize_2pow); |
(unsigned)((alloc_size - size) >> pagesize_2pow)); |
} else { |
} else { |
size_t leadsize, trailsize; |
size_t leadsize, trailsize; |
|
|
leadsize = alignment - offset; |
leadsize = alignment - offset; |
ret = (void *)((uintptr_t)ret + leadsize); |
ret = (void *)((uintptr_t)ret + leadsize); |
pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> |
pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >> |
pagesize_2pow); |
pagesize_2pow); |
|
|
/* Update the map for the run to be kept. */ |
/* Update the map for the run to be kept. */ |
Line 2361 arena_palloc(arena_t *arena, size_t alig |
|
Line 2470 arena_palloc(arena_t *arena, size_t alig |
|
} |
} |
|
|
/* Trim leading space. */ |
/* Trim leading space. */ |
arena_palloc_trim(arena, chunk, pageind - (leadsize >> |
arena_palloc_trim(arena, chunk, |
pagesize_2pow), leadsize >> pagesize_2pow); |
(unsigned)(pageind - (leadsize >> pagesize_2pow)), |
|
(unsigned)(leadsize >> pagesize_2pow)); |
|
|
trailsize = alloc_size - leadsize - size; |
trailsize = alloc_size - leadsize - size; |
if (trailsize != 0) { |
if (trailsize != 0) { |
/* Trim trailing space. */ |
/* Trim trailing space. */ |
assert(trailsize < alloc_size); |
assert(trailsize < alloc_size); |
arena_palloc_trim(arena, chunk, pageind + npages, |
arena_palloc_trim(arena, chunk, pageind + npages, |
trailsize >> pagesize_2pow); |
(unsigned)(trailsize >> pagesize_2pow)); |
} |
} |
} |
} |
|
|
Line 2403 arena_salloc(const void *ptr) |
|
Line 2513 arena_salloc(const void *ptr) |
|
* affects this function, so we don't need to lock. |
* affects this function, so we don't need to lock. |
*/ |
*/ |
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); |
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); |
pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow); |
pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >> |
|
pagesize_2pow); |
mapelm = &chunk->map[pageind]; |
mapelm = &chunk->map[pageind]; |
if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind << |
if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind << |
pagesize_2pow)) { |
pagesize_2pow)) { |
Line 2483 arena_dalloc(arena_t *arena, arena_chunk |
|
Line 2594 arena_dalloc(arena_t *arena, arena_chunk |
|
assert(ptr != NULL); |
assert(ptr != NULL); |
assert(CHUNK_ADDR2BASE(ptr) != ptr); |
assert(CHUNK_ADDR2BASE(ptr) != ptr); |
|
|
pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow); |
pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >> |
|
pagesize_2pow); |
mapelm = &chunk->map[pageind]; |
mapelm = &chunk->map[pageind]; |
if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind << |
if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind << |
pagesize_2pow)) { |
pagesize_2pow)) { |
Line 2667 arenas_extend(unsigned ind) |
|
Line 2779 arenas_extend(unsigned ind) |
|
* by using arenas[0]. In practice, this is an extremely unlikely |
* by using arenas[0]. In practice, this is an extremely unlikely |
* failure. |
* failure. |
*/ |
*/ |
_malloc_message(_getprogname(), |
_malloc_message(getprogname(), |
": (malloc) Error initializing arena\n", "", ""); |
": (malloc) Error initializing arena\n", "", ""); |
if (opt_abort) |
if (opt_abort) |
abort(); |
abort(); |
Line 2827 huge_ralloc(void *ptr, size_t size, size |
|
Line 2939 huge_ralloc(void *ptr, size_t size, size |
|
return (ptr); |
return (ptr); |
} |
} |
|
|
|
if (CHUNK_ADDR2BASE(ptr) == ptr |
|
#ifdef USE_BRK |
|
&& ((uintptr_t)ptr < (uintptr_t)brk_base |
|
|| (uintptr_t)ptr >= (uintptr_t)brk_max) |
|
#endif |
|
) { |
|
chunk_node_t *node, key; |
|
void *newptr; |
|
size_t oldcsize; |
|
size_t newcsize; |
|
|
|
newcsize = CHUNK_CEILING(size); |
|
oldcsize = CHUNK_CEILING(oldsize); |
|
assert(oldcsize != newcsize); |
|
if (newcsize == 0) { |
|
/* size_t wrap-around */ |
|
return (NULL); |
|
} |
|
|
|
/* |
|
* Remove the old region from the tree now. If mremap() |
|
* returns the region to the system, other thread may |
|
* map it for same huge allocation and insert it to the |
|
* tree before we acquire the mutex lock again. |
|
*/ |
|
malloc_mutex_lock(&chunks_mtx); |
|
key.chunk = __DECONST(void *, ptr); |
|
/* LINTED */ |
|
node = RB_FIND(chunk_tree_s, &huge, &key); |
|
assert(node != NULL); |
|
assert(node->chunk == ptr); |
|
assert(node->size == oldcsize); |
|
RB_REMOVE(chunk_tree_s, &huge, node); |
|
malloc_mutex_unlock(&chunks_mtx); |
|
|
|
newptr = mremap(ptr, oldcsize, NULL, newcsize, |
|
MAP_ALIGNED(chunksize_2pow)); |
|
if (newptr == MAP_FAILED) { |
|
/* We still own the old region. */ |
|
malloc_mutex_lock(&chunks_mtx); |
|
RB_INSERT(chunk_tree_s, &huge, node); |
|
malloc_mutex_unlock(&chunks_mtx); |
|
} else { |
|
assert(CHUNK_ADDR2BASE(newptr) == newptr); |
|
|
|
/* Insert new or resized old region. */ |
|
malloc_mutex_lock(&chunks_mtx); |
|
node->size = newcsize; |
|
node->chunk = newptr; |
|
RB_INSERT(chunk_tree_s, &huge, node); |
|
#ifdef MALLOC_STATS |
|
huge_nralloc++; |
|
huge_allocated += newcsize - oldcsize; |
|
if (newcsize > oldcsize) { |
|
stats_chunks.curchunks += |
|
(newcsize - oldcsize) / chunksize; |
|
if (stats_chunks.curchunks > |
|
stats_chunks.highchunks) |
|
stats_chunks.highchunks = |
|
stats_chunks.curchunks; |
|
} else { |
|
stats_chunks.curchunks -= |
|
(oldcsize - newcsize) / chunksize; |
|
} |
|
#endif |
|
malloc_mutex_unlock(&chunks_mtx); |
|
|
|
if (opt_junk && size < oldsize) { |
|
memset((void *)((uintptr_t)newptr + size), 0x5a, |
|
newcsize - size); |
|
} else if (opt_zero && size > oldsize) { |
|
memset((void *)((uintptr_t)newptr + oldsize), 0, |
|
size - oldsize); |
|
} |
|
return (newptr); |
|
} |
|
} |
|
|
/* |
/* |
* If we get here, then size and oldsize are different enough that we |
* If we get here, then size and oldsize are different enough that we |
* need to use a different size class. In that case, fall back to |
* need to use a different size class. In that case, fall back to |
Line 3126 malloc_print_stats(void) |
|
Line 3316 malloc_print_stats(void) |
|
opt_xmalloc ? "X" : "x", |
opt_xmalloc ? "X" : "x", |
opt_zero ? "Z\n" : "z\n"); |
opt_zero ? "Z\n" : "z\n"); |
|
|
_malloc_message("CPUs: ", umax2s(ncpus, s), "\n", ""); |
_malloc_message("CPUs: ", size_t2s(ncpus, s), "\n", ""); |
_malloc_message("Max arenas: ", umax2s(narenas, s), "\n", ""); |
_malloc_message("Max arenas: ", size_t2s(narenas, s), "\n", ""); |
_malloc_message("Pointer size: ", umax2s(sizeof(void *), s), |
_malloc_message("Pointer size: ", size_t2s(sizeof(void *), s), |
"\n", ""); |
"\n", ""); |
_malloc_message("Quantum size: ", umax2s(quantum, s), "\n", ""); |
_malloc_message("Quantum size: ", size_t2s(quantum, s), "\n", ""); |
_malloc_message("Max small size: ", umax2s(small_max, s), "\n", |
_malloc_message("Max small size: ", size_t2s(small_max, s), "\n", |
""); |
""); |
|
|
_malloc_message("Chunk size: ", umax2s(chunksize, s), "", ""); |
_malloc_message("Chunk size: ", size_t2s(chunksize, s), "", ""); |
_malloc_message(" (2^", umax2s(opt_chunk_2pow, s), ")\n", ""); |
_malloc_message(" (2^", size_t2s((size_t)opt_chunk_2pow, s), |
|
")\n", ""); |
|
|
#ifdef MALLOC_STATS |
#ifdef MALLOC_STATS |
{ |
{ |
Line 3188 malloc_print_stats(void) |
|
Line 3379 malloc_print_stats(void) |
|
|
|
/* Print chunk stats. */ |
/* Print chunk stats. */ |
malloc_printf( |
malloc_printf( |
"huge: nmalloc ndalloc allocated\n"); |
"huge: nmalloc ndalloc " |
malloc_printf(" %12llu %12llu %12zu\n", |
"nralloc allocated\n"); |
huge_nmalloc, huge_ndalloc, huge_allocated |
malloc_printf(" %12llu %12llu %12llu %12zu\n", |
* chunksize); |
huge_nmalloc, huge_ndalloc, huge_nralloc, |
|
huge_allocated); |
|
|
/* Print stats for each arena. */ |
/* Print stats for each arena. */ |
for (i = 0; i < narenas; i++) { |
for (i = 0; i < narenas; i++) { |
|
|
malloc_init_hard(void) |
malloc_init_hard(void) |
{ |
{ |
unsigned i, j; |
unsigned i, j; |
int linklen; |
ssize_t linklen; |
char buf[PATH_MAX + 1]; |
char buf[PATH_MAX + 1]; |
const char *opts = ""; |
const char *opts = ""; |
|
int serrno; |
|
|
malloc_mutex_lock(&init_lock); |
malloc_mutex_lock(&init_lock); |
if (malloc_initialized) { |
if (malloc_initialized) { |
Line 3243 malloc_init_hard(void) |
|
Line 3436 malloc_init_hard(void) |
|
return (false); |
return (false); |
} |
} |
|
|
|
serrno = errno; |
/* Get number of CPUs. */ |
/* Get number of CPUs. */ |
{ |
{ |
int mib[2]; |
int mib[2]; |
Line 3293 malloc_init_hard(void) |
|
Line 3487 malloc_init_hard(void) |
|
} |
} |
break; |
break; |
case 1: |
case 1: |
if (issetugid() == 0 && (opts = |
if ((opts = getenv("MALLOC_OPTIONS")) != NULL && |
getenv("MALLOC_OPTIONS")) != NULL) { |
issetugid() == 0) { |
/* |
/* |
* Do nothing; opts is already initialized to |
* Do nothing; opts is already initialized to |
* the value of the MALLOC_OPTIONS environment |
* the value of the MALLOC_OPTIONS environment |
Line 3320 malloc_init_hard(void) |
|
Line 3514 malloc_init_hard(void) |
|
break; |
break; |
default: |
default: |
/* NOTREACHED */ |
/* NOTREACHED */ |
|
/* LINTED */ |
assert(false); |
assert(false); |
} |
} |
|
|
Line 3353 malloc_init_hard(void) |
|
Line 3548 malloc_init_hard(void) |
|
opt_chunk_2pow--; |
opt_chunk_2pow--; |
break; |
break; |
case 'K': |
case 'K': |
/* |
if (opt_chunk_2pow + 1 < |
* There must be fewer pages in a chunk than |
(int)(sizeof(size_t) << 3)) |
* can be recorded by the pos field of |
|
* arena_chunk_map_t, in order to make POS_FREE |
|
* special. |
|
*/ |
|
if (opt_chunk_2pow - pagesize_2pow |
|
< (sizeof(uint32_t) << 3) - 1) |
|
opt_chunk_2pow++; |
opt_chunk_2pow++; |
break; |
break; |
case 'n': |
case 'n': |
Line 3420 malloc_init_hard(void) |
|
Line 3609 malloc_init_hard(void) |
|
|
|
cbuf[0] = opts[j]; |
cbuf[0] = opts[j]; |
cbuf[1] = '\0'; |
cbuf[1] = '\0'; |
_malloc_message(_getprogname(), |
_malloc_message(getprogname(), |
": (malloc) Unsupported character in " |
": (malloc) Unsupported character in " |
"malloc options: '", cbuf, "'\n"); |
"malloc options: '", cbuf, "'\n"); |
} |
} |
} |
} |
} |
} |
} |
} |
|
errno = serrno; |
|
|
/* Take care to call atexit() only once. */ |
/* Take care to call atexit() only once. */ |
if (opt_print_stats) { |
if (opt_print_stats) { |
Line 3442 malloc_init_hard(void) |
|
Line 3632 malloc_init_hard(void) |
|
/* Set bin-related variables. */ |
/* Set bin-related variables. */ |
bin_maxclass = (pagesize >> 1); |
bin_maxclass = (pagesize >> 1); |
assert(opt_quantum_2pow >= TINY_MIN_2POW); |
assert(opt_quantum_2pow >= TINY_MIN_2POW); |
ntbins = opt_quantum_2pow - TINY_MIN_2POW; |
ntbins = (unsigned)(opt_quantum_2pow - TINY_MIN_2POW); |
assert(ntbins <= opt_quantum_2pow); |
assert(ntbins <= opt_quantum_2pow); |
nqbins = (small_max >> opt_quantum_2pow); |
nqbins = (unsigned)(small_max >> opt_quantum_2pow); |
nsbins = pagesize_2pow - opt_small_max_2pow - 1; |
nsbins = (unsigned)(pagesize_2pow - opt_small_max_2pow - 1); |
|
|
/* Set variables according to the value of opt_quantum_2pow. */ |
/* Set variables according to the value of opt_quantum_2pow. */ |
quantum = (1 << opt_quantum_2pow); |
quantum = (1 << opt_quantum_2pow); |
Line 3459 malloc_init_hard(void) |
|
Line 3649 malloc_init_hard(void) |
|
/* Set variables according to the value of opt_chunk_2pow. */ |
/* Set variables according to the value of opt_chunk_2pow. */ |
chunksize = (1LU << opt_chunk_2pow); |
chunksize = (1LU << opt_chunk_2pow); |
chunksize_mask = chunksize - 1; |
chunksize_mask = chunksize - 1; |
chunk_npages = (chunksize >> pagesize_2pow); |
chunksize_2pow = (unsigned)opt_chunk_2pow; |
|
chunk_npages = (unsigned)(chunksize >> pagesize_2pow); |
{ |
{ |
unsigned header_size; |
unsigned header_size; |
|
|
header_size = sizeof(arena_chunk_t) + (sizeof(arena_chunk_map_t) |
header_size = (unsigned)(sizeof(arena_chunk_t) + |
* (chunk_npages - 1)); |
(sizeof(arena_chunk_map_t) * (chunk_npages - 1))); |
arena_chunk_header_npages = (header_size >> pagesize_2pow); |
arena_chunk_header_npages = (header_size >> pagesize_2pow); |
if ((header_size & pagesize_mask) != 0) |
if ((header_size & pagesize_mask) != 0) |
arena_chunk_header_npages++; |
arena_chunk_header_npages++; |
Line 3496 malloc_init_hard(void) |
|
Line 3687 malloc_init_hard(void) |
|
#ifdef MALLOC_STATS |
#ifdef MALLOC_STATS |
huge_nmalloc = 0; |
huge_nmalloc = 0; |
huge_ndalloc = 0; |
huge_ndalloc = 0; |
|
huge_nralloc = 0; |
huge_allocated = 0; |
huge_allocated = 0; |
#endif |
#endif |
RB_INIT(&old_chunks); |
RB_INIT(&old_chunks); |
Line 3523 malloc_init_hard(void) |
|
Line 3715 malloc_init_hard(void) |
|
opt_narenas_lshift += 2; |
opt_narenas_lshift += 2; |
} |
} |
|
|
#ifdef NO_TLS |
|
/* Initialize arena key. */ |
|
(void)thr_keycreate(&arenas_map_key, NULL); |
|
#endif |
|
|
|
/* Determine how many arenas to use. */ |
/* Determine how many arenas to use. */ |
narenas = ncpus; |
narenas = ncpus; |
if (opt_narenas_lshift > 0) { |
if (opt_narenas_lshift > 0) { |
Line 3538 malloc_init_hard(void) |
|
Line 3725 malloc_init_hard(void) |
|
* can handle. |
* can handle. |
*/ |
*/ |
if (narenas * sizeof(arena_t *) > chunksize) |
if (narenas * sizeof(arena_t *) > chunksize) |
narenas = chunksize / sizeof(arena_t *); |
narenas = (unsigned)(chunksize / sizeof(arena_t *)); |
} else if (opt_narenas_lshift < 0) { |
} else if (opt_narenas_lshift < 0) { |
if ((narenas << opt_narenas_lshift) < narenas) |
if ((narenas << opt_narenas_lshift) < narenas) |
narenas <<= opt_narenas_lshift; |
narenas <<= opt_narenas_lshift; |
Line 3610 malloc(size_t size) |
|
Line 3797 malloc(size_t size) |
|
RETURN: |
RETURN: |
if (ret == NULL) { |
if (ret == NULL) { |
if (opt_xmalloc) { |
if (opt_xmalloc) { |
_malloc_message(_getprogname(), |
_malloc_message(getprogname(), |
": (malloc) Error in malloc(): out of memory\n", "", |
": (malloc) Error in malloc(): out of memory\n", "", |
""); |
""); |
abort(); |
abort(); |
|
|
return (ret); |
return (ret); |
} |
} |
|
|
/* XXXAD */ |
|
int posix_memalign(void **memptr, size_t alignment, size_t size); |
|
|
|
int |
int |
posix_memalign(void **memptr, size_t alignment, size_t size) |
posix_memalign(void **memptr, size_t alignment, size_t size) |
{ |
{ |
Line 3638 posix_memalign(void **memptr, size_t ali |
|
Line 3822 posix_memalign(void **memptr, size_t ali |
|
if (((alignment - 1) & alignment) != 0 |
if (((alignment - 1) & alignment) != 0 |
|| alignment < sizeof(void *)) { |
|| alignment < sizeof(void *)) { |
if (opt_xmalloc) { |
if (opt_xmalloc) { |
_malloc_message(_getprogname(), |
_malloc_message(getprogname(), |
": (malloc) Error in posix_memalign(): " |
": (malloc) Error in posix_memalign(): " |
"invalid alignment\n", "", ""); |
"invalid alignment\n", "", ""); |
abort(); |
abort(); |
Line 3653 posix_memalign(void **memptr, size_t ali |
|
Line 3837 posix_memalign(void **memptr, size_t ali |
|
|
|
if (result == NULL) { |
if (result == NULL) { |
if (opt_xmalloc) { |
if (opt_xmalloc) { |
_malloc_message(_getprogname(), |
_malloc_message(getprogname(), |
": (malloc) Error in posix_memalign(): out of memory\n", |
": (malloc) Error in posix_memalign(): out of memory\n", |
"", ""); |
"", ""); |
abort(); |
abort(); |
Line 3708 calloc(size_t num, size_t size) |
|
Line 3892 calloc(size_t num, size_t size) |
|
RETURN: |
RETURN: |
if (ret == NULL) { |
if (ret == NULL) { |
if (opt_xmalloc) { |
if (opt_xmalloc) { |
_malloc_message(_getprogname(), |
_malloc_message(getprogname(), |
": (malloc) Error in calloc(): out of memory\n", "", |
": (malloc) Error in calloc(): out of memory\n", "", |
""); |
""); |
abort(); |
abort(); |
Line 3743 realloc(void *ptr, size_t size) |
|
Line 3927 realloc(void *ptr, size_t size) |
|
|
|
if (ret == NULL) { |
if (ret == NULL) { |
if (opt_xmalloc) { |
if (opt_xmalloc) { |
_malloc_message(_getprogname(), |
_malloc_message(getprogname(), |
": (malloc) Error in realloc(): out of " |
": (malloc) Error in realloc(): out of " |
"memory\n", "", ""); |
"memory\n", "", ""); |
abort(); |
abort(); |
Line 3758 realloc(void *ptr, size_t size) |
|
Line 3942 realloc(void *ptr, size_t size) |
|
|
|
if (ret == NULL) { |
if (ret == NULL) { |
if (opt_xmalloc) { |
if (opt_xmalloc) { |
_malloc_message(_getprogname(), |
_malloc_message(getprogname(), |
": (malloc) Error in realloc(): out of " |
": (malloc) Error in realloc(): out of " |
"memory\n", "", ""); |
"memory\n", "", ""); |
abort(); |
abort(); |
Line 3825 _malloc_prefork(void) |
|
Line 4009 _malloc_prefork(void) |
|
if (arenas[i] != NULL) |
if (arenas[i] != NULL) |
malloc_mutex_lock(&arenas[i]->mtx); |
malloc_mutex_lock(&arenas[i]->mtx); |
} |
} |
malloc_mutex_unlock(&arenas_mtx); |
|
|
|
malloc_mutex_lock(&base_mtx); |
malloc_mutex_lock(&base_mtx); |
|
|
Line 3843 _malloc_postfork(void) |
|
Line 4026 _malloc_postfork(void) |
|
|
|
malloc_mutex_unlock(&base_mtx); |
malloc_mutex_unlock(&base_mtx); |
|
|
malloc_mutex_lock(&arenas_mtx); |
|
for (i = 0; i < narenas; i++) { |
for (i = 0; i < narenas; i++) { |
if (arenas[i] != NULL) |
if (arenas[i] != NULL) |
malloc_mutex_unlock(&arenas[i]->mtx); |
malloc_mutex_unlock(&arenas[i]->mtx); |