[BACK]Return to jemalloc.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / lib / libc / stdlib

Annotation of src/lib/libc/stdlib/jemalloc.c, Revision 1.34.2.3

1.34.2.3! martin      1: /*     $NetBSD: jemalloc.c,v 1.34.2.2 2016/05/09 19:34:50 snj Exp $    */
1.2       ad          2:
1.1       ad          3: /*-
                      4:  * Copyright (C) 2006,2007 Jason Evans <jasone@FreeBSD.org>.
                      5:  * All rights reserved.
                      6:  *
                      7:  * Redistribution and use in source and binary forms, with or without
                      8:  * modification, are permitted provided that the following conditions
                      9:  * are met:
                     10:  * 1. Redistributions of source code must retain the above copyright
                     11:  *    notice(s), this list of conditions and the following disclaimer as
                     12:  *    the first lines of this file unmodified other than the possible
                     13:  *    addition of one or more copyright notices.
                     14:  * 2. Redistributions in binary form must reproduce the above copyright
                     15:  *    notice(s), this list of conditions and the following disclaimer in
                     16:  *    the documentation and/or other materials provided with the
                     17:  *    distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
                     20:  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     21:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
                     23:  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
                     26:  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
                     27:  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
                     28:  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
                     29:  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     30:  *
                     31:  *******************************************************************************
                     32:  *
                     33:  * This allocator implementation is designed to provide scalable performance
                     34:  * for multi-threaded programs on multi-processor systems.  The following
                     35:  * features are included for this purpose:
                     36:  *
                     37:  *   + Multiple arenas are used if there are multiple CPUs, which reduces lock
                     38:  *     contention and cache sloshing.
                     39:  *
                     40:  *   + Cache line sharing between arenas is avoided for internal data
                     41:  *     structures.
                     42:  *
                     43:  *   + Memory is managed in chunks and runs (chunks can be split into runs),
                     44:  *     rather than as individual pages.  This provides a constant-time
                     45:  *     mechanism for associating allocations with particular arenas.
                     46:  *
                     47:  * Allocation requests are rounded up to the nearest size class, and no record
                     48:  * of the original request size is maintained.  Allocations are broken into
                     49:  * categories according to size class.  Assuming runtime defaults, 4 kB pages
                     50:  * and a 16 byte quantum, the size classes in each category are as follows:
                     51:  *
                     52:  *   |=====================================|
                     53:  *   | Category | Subcategory    |    Size |
                     54:  *   |=====================================|
                     55:  *   | Small    | Tiny           |       2 |
                     56:  *   |          |                |       4 |
                     57:  *   |          |                |       8 |
                     58:  *   |          |----------------+---------|
                     59:  *   |          | Quantum-spaced |      16 |
                     60:  *   |          |                |      32 |
                     61:  *   |          |                |      48 |
                     62:  *   |          |                |     ... |
                     63:  *   |          |                |     480 |
                     64:  *   |          |                |     496 |
                     65:  *   |          |                |     512 |
                     66:  *   |          |----------------+---------|
                     67:  *   |          | Sub-page       |    1 kB |
                     68:  *   |          |                |    2 kB |
                     69:  *   |=====================================|
                     70:  *   | Large                     |    4 kB |
                     71:  *   |                           |    8 kB |
                     72:  *   |                           |   12 kB |
                     73:  *   |                           |     ... |
                     74:  *   |                           | 1012 kB |
                     75:  *   |                           | 1016 kB |
                     76:  *   |                           | 1020 kB |
                     77:  *   |=====================================|
                     78:  *   | Huge                      |    1 MB |
                     79:  *   |                           |    2 MB |
                     80:  *   |                           |    3 MB |
                     81:  *   |                           |     ... |
                     82:  *   |=====================================|
                     83:  *
                     84:  * A different mechanism is used for each category:
                     85:  *
                     86:  *   Small : Each size class is segregated into its own set of runs.  Each run
                     87:  *           maintains a bitmap of which regions are free/allocated.
                     88:  *
                     89:  *   Large : Each allocation is backed by a dedicated run.  Metadata are stored
                     90:  *           in the associated arena chunk header maps.
                     91:  *
                     92:  *   Huge : Each allocation is backed by a dedicated contiguous set of chunks.
                     93:  *          Metadata are stored in a separate red-black tree.
                     94:  *
                     95:  *******************************************************************************
                     96:  */
                     97:
1.2       ad         98: /* LINTLIBRARY */
                     99:
                    100: #ifdef __NetBSD__
                    101: #  define xutrace(a, b)                utrace("malloc", (a), (b))
                    102: #  define __DECONST(x, y)      ((x)__UNCONST(y))
                    103: #  define NO_TLS
                    104: #else
                    105: #  define xutrace(a, b)                utrace((a), (b))
                    106: #endif /* __NetBSD__ */
                    107:
1.1       ad        108: /*
                    109:  * MALLOC_PRODUCTION disables assertions and statistics gathering.  It also
                    110:  * defaults the A and J runtime options to off.  These settings are appropriate
                    111:  * for production systems.
                    112:  */
1.2       ad        113: #define MALLOC_PRODUCTION
1.1       ad        114:
                    115: #ifndef MALLOC_PRODUCTION
                    116: #  define MALLOC_DEBUG
                    117: #endif
                    118:
                    119: #include <sys/cdefs.h>
1.2       ad        120: /* __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.147 2007/06/15 22:00:16 jasone Exp $"); */
1.34.2.3! martin    121: __RCSID("$NetBSD: jemalloc.c,v 1.34.2.2 2016/05/09 19:34:50 snj Exp $");
1.1       ad        122:
1.2       ad        123: #ifdef __FreeBSD__
1.1       ad        124: #include "libc_private.h"
                    125: #ifdef MALLOC_DEBUG
                    126: #  define _LOCK_DEBUG
                    127: #endif
                    128: #include "spinlock.h"
1.2       ad        129: #endif
1.1       ad        130: #include "namespace.h"
                    131: #include <sys/mman.h>
                    132: #include <sys/param.h>
1.2       ad        133: #ifdef __FreeBSD__
1.1       ad        134: #include <sys/stddef.h>
1.2       ad        135: #endif
1.1       ad        136: #include <sys/time.h>
                    137: #include <sys/types.h>
                    138: #include <sys/sysctl.h>
                    139: #include <sys/tree.h>
                    140: #include <sys/uio.h>
                    141: #include <sys/ktrace.h> /* Must come after several other sys/ includes. */
                    142:
1.2       ad        143: #ifdef __FreeBSD__
1.1       ad        144: #include <machine/atomic.h>
                    145: #include <machine/cpufunc.h>
1.2       ad        146: #endif
1.1       ad        147: #include <machine/vmparam.h>
                    148:
                    149: #include <errno.h>
                    150: #include <limits.h>
                    151: #include <pthread.h>
                    152: #include <sched.h>
                    153: #include <stdarg.h>
                    154: #include <stdbool.h>
                    155: #include <stdio.h>
                    156: #include <stdint.h>
                    157: #include <stdlib.h>
                    158: #include <string.h>
                    159: #include <strings.h>
                    160: #include <unistd.h>
                    161:
1.2       ad        162: #ifdef __NetBSD__
                    163: #  include <reentrant.h>
1.16      christos  164: #  include "extern.h"
                    165:
                    166: #define STRERROR_R(a, b, c)    __strerror_r(a, b, c);
                    167: /*
                    168:  * A non localized version of strerror, that avoids bringing in
                    169:  * stdio and the locale code. All the malloc messages are in English
                    170:  * so why bother?
                    171:  */
                    172: static int
                    173: __strerror_r(int e, char *s, size_t l)
                    174: {
                    175:        int rval;
                    176:        size_t slen;
                    177:
                    178:        if (e >= 0 && e < sys_nerr) {
                    179:                slen = strlcpy(s, sys_errlist[e], l);
                    180:                rval = 0;
                    181:        } else {
                    182:                slen = snprintf_ss(s, l, "Unknown error %u", e);
                    183:                rval = EINVAL;
                    184:        }
                    185:        return slen >= l ? ERANGE : rval;
                    186: }
1.2       ad        187: #endif
                    188:
                    189: #ifdef __FreeBSD__
1.16      christos  190: #define STRERROR_R(a, b, c)    strerror_r(a, b, c);
1.1       ad        191: #include "un-namespace.h"
1.2       ad        192: #endif
1.1       ad        193:
                    194: /* MALLOC_STATS enables statistics calculation. */
                    195: #ifndef MALLOC_PRODUCTION
                    196: #  define MALLOC_STATS
                    197: #endif
                    198:
                    199: #ifdef MALLOC_DEBUG
                    200: #  ifdef NDEBUG
                    201: #    undef NDEBUG
                    202: #  endif
                    203: #else
                    204: #  ifndef NDEBUG
                    205: #    define NDEBUG
                    206: #  endif
                    207: #endif
                    208: #include <assert.h>
                    209:
                    210: #ifdef MALLOC_DEBUG
                    211:    /* Disable inlining to make debugging easier. */
                    212: #  define inline
                    213: #endif
                    214:
                    215: /* Size of stack-allocated buffer passed to strerror_r(). */
                    216: #define        STRERROR_BUF            64
                    217:
                    218: /* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */
1.31      martin    219:
                    220: /*
                    221:  * If you touch the TINY_MIN_2POW definition for any architecture, please
                    222:  * make sure to adjust the corresponding definition for JEMALLOC_TINY_MIN_2POW
                    223:  * in the gcc 4.8 tree in dist/gcc/tree-ssa-ccp.c and verify that a native
                    224:  * gcc is still buildable!
                    225:  */
                    226:
1.1       ad        227: #ifdef __i386__
                    228: #  define QUANTUM_2POW_MIN     4
                    229: #  define SIZEOF_PTR_2POW      2
                    230: #  define USE_BRK
                    231: #endif
                    232: #ifdef __ia64__
                    233: #  define QUANTUM_2POW_MIN     4
                    234: #  define SIZEOF_PTR_2POW      3
                    235: #endif
1.34      matt      236: #ifdef __aarch64__
                    237: #  define QUANTUM_2POW_MIN     4
                    238: #  define SIZEOF_PTR_2POW      3
                    239: #  define NO_TLS
                    240: #endif
1.1       ad        241: #ifdef __alpha__
                    242: #  define QUANTUM_2POW_MIN     4
                    243: #  define SIZEOF_PTR_2POW      3
1.30      skrll     244: #  define TINY_MIN_2POW                3
1.1       ad        245: #  define NO_TLS
                    246: #endif
                    247: #ifdef __sparc64__
                    248: #  define QUANTUM_2POW_MIN     4
                    249: #  define SIZEOF_PTR_2POW      3
1.31      martin    250: #  define TINY_MIN_2POW                3
1.1       ad        251: #  define NO_TLS
                    252: #endif
                    253: #ifdef __amd64__
                    254: #  define QUANTUM_2POW_MIN     4
                    255: #  define SIZEOF_PTR_2POW      3
1.31      martin    256: #  define TINY_MIN_2POW                3
1.1       ad        257: #endif
                    258: #ifdef __arm__
                    259: #  define QUANTUM_2POW_MIN     3
                    260: #  define SIZEOF_PTR_2POW      2
                    261: #  define USE_BRK
1.30      skrll     262: #  ifdef __ARM_EABI__
                    263: #    define TINY_MIN_2POW      3
                    264: #  endif
1.1       ad        265: #  define NO_TLS
                    266: #endif
                    267: #ifdef __powerpc__
                    268: #  define QUANTUM_2POW_MIN     4
                    269: #  define SIZEOF_PTR_2POW      2
                    270: #  define USE_BRK
1.32      martin    271: #  define TINY_MIN_2POW                3
1.1       ad        272: #endif
1.3       he        273: #if defined(__sparc__) && !defined(__sparc64__)
1.2       ad        274: #  define QUANTUM_2POW_MIN     4
                    275: #  define SIZEOF_PTR_2POW      2
                    276: #  define USE_BRK
                    277: #endif
                    278: #ifdef __vax__
                    279: #  define QUANTUM_2POW_MIN     4
                    280: #  define SIZEOF_PTR_2POW      2
                    281: #  define USE_BRK
                    282: #endif
                    283: #ifdef __sh__
                    284: #  define QUANTUM_2POW_MIN     4
                    285: #  define SIZEOF_PTR_2POW      2
                    286: #  define USE_BRK
                    287: #endif
                    288: #ifdef __m68k__
                    289: #  define QUANTUM_2POW_MIN     4
                    290: #  define SIZEOF_PTR_2POW      2
                    291: #  define USE_BRK
                    292: #endif
                    293: #ifdef __mips__
                    294: #  define QUANTUM_2POW_MIN     4
                    295: #  define SIZEOF_PTR_2POW      2
                    296: #  define USE_BRK
                    297: #endif
1.4       ad        298: #ifdef __hppa__
                    299: #  define QUANTUM_2POW_MIN     4
                    300: #  define SIZEOF_PTR_2POW      2
                    301: #  define USE_BRK
                    302: #endif
1.1       ad        303:
                    304: #define        SIZEOF_PTR              (1 << SIZEOF_PTR_2POW)
                    305:
                    306: /* sizeof(int) == (1 << SIZEOF_INT_2POW). */
                    307: #ifndef SIZEOF_INT_2POW
                    308: #  define SIZEOF_INT_2POW      2
                    309: #endif
                    310:
                    311: /*
                    312:  * Size and alignment of memory chunks that are allocated by the OS's virtual
                    313:  * memory system.
                    314:  */
                    315: #define        CHUNK_2POW_DEFAULT      20
                    316:
                    317: /*
                    318:  * Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
                    319:  * so over-estimates are okay (up to a point), but under-estimates will
                    320:  * negatively affect performance.
                    321:  */
                    322: #define        CACHELINE_2POW          6
                    323: #define        CACHELINE               ((size_t)(1 << CACHELINE_2POW))
                    324:
                    325: /* Smallest size class to support. */
1.30      skrll     326: #ifndef TINY_MIN_2POW
                    327: #define        TINY_MIN_2POW           2
                    328: #endif
1.1       ad        329:
                    330: /*
                    331:  * Maximum size class that is a multiple of the quantum, but not (necessarily)
                    332:  * a power of 2.  Above this size, allocations are rounded up to the nearest
                    333:  * power of 2.
                    334:  */
                    335: #define        SMALL_MAX_2POW_DEFAULT  9
                    336: #define        SMALL_MAX_DEFAULT       (1 << SMALL_MAX_2POW_DEFAULT)
                    337:
                    338: /*
1.22      njoly     339:  * RUN_MAX_OVRHD indicates maximum desired run header overhead.  Runs are sized
                    340:  * as small as possible such that this setting is still honored, without
                    341:  * violating other constraints.  The goal is to make runs as small as possible
                    342:  * without exceeding a per run external fragmentation threshold.
1.1       ad        343:  *
1.22      njoly     344:  * We use binary fixed point math for overhead computations, where the binary
                    345:  * point is implicitly RUN_BFP bits to the left.
1.1       ad        346:  *
1.22      njoly     347:  * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
                    348:  * honored for some/all object sizes, since there is one bit of header overhead
                    349:  * per object (plus a constant).  This constraint is relaxed (ignored) for runs
                    350:  * that are so small that the per-region overhead is greater than:
                    351:  *
                    352:  *   (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
1.1       ad        353:  */
1.22      njoly     354: #define RUN_BFP                        12
                    355: /*                              \/   Implicit binary fixed point. */
                    356: #define RUN_MAX_OVRHD          0x0000003dU
                    357: #define RUN_MAX_OVRHD_RELAX    0x00001800U
1.1       ad        358:
                    359: /* Put a cap on small object run size.  This overrides RUN_MAX_OVRHD. */
                    360: #define RUN_MAX_SMALL_2POW     15
                    361: #define RUN_MAX_SMALL          (1 << RUN_MAX_SMALL_2POW)
                    362:
                    363: /******************************************************************************/
                    364:
1.2       ad        365: #ifdef __FreeBSD__
1.1       ad        366: /*
                    367:  * Mutexes based on spinlocks.  We can't use normal pthread mutexes, because
                    368:  * they require malloc()ed memory.
                    369:  */
                    370: typedef struct {
                    371:        spinlock_t      lock;
                    372: } malloc_mutex_t;
                    373:
                    374: /* Set to true once the allocator has been initialized. */
                    375: static bool malloc_initialized = false;
                    376:
                    377: /* Used to avoid initialization races. */
                    378: static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
1.2       ad        379: #else
                    380: #define        malloc_mutex_t  mutex_t
                    381:
                    382: /* Set to true once the allocator has been initialized. */
                    383: static bool malloc_initialized = false;
                    384:
1.34.2.1  martin    385: #ifdef _REENTRANT
1.2       ad        386: /* Used to avoid initialization races. */
                    387: static mutex_t init_lock = MUTEX_INITIALIZER;
                    388: #endif
1.34.2.1  martin    389: #endif
1.1       ad        390:
                    391: /******************************************************************************/
                    392: /*
                    393:  * Statistics data structures.
                    394:  */
                    395:
                    396: #ifdef MALLOC_STATS
                    397:
                    398: typedef struct malloc_bin_stats_s malloc_bin_stats_t;
                    399: struct malloc_bin_stats_s {
                    400:        /*
                    401:         * Number of allocation requests that corresponded to the size of this
                    402:         * bin.
                    403:         */
                    404:        uint64_t        nrequests;
                    405:
                    406:        /* Total number of runs created for this bin's size class. */
                    407:        uint64_t        nruns;
                    408:
                    409:        /*
                    410:         * Total number of runs reused by extracting them from the runs tree for
                    411:         * this bin's size class.
                    412:         */
                    413:        uint64_t        reruns;
                    414:
                    415:        /* High-water mark for this bin. */
                    416:        unsigned long   highruns;
                    417:
                    418:        /* Current number of runs in this bin. */
                    419:        unsigned long   curruns;
                    420: };
                    421:
                    422: typedef struct arena_stats_s arena_stats_t;
                    423: struct arena_stats_s {
                    424:        /* Number of bytes currently mapped. */
                    425:        size_t          mapped;
                    426:
                    427:        /* Per-size-category statistics. */
                    428:        size_t          allocated_small;
                    429:        uint64_t        nmalloc_small;
                    430:        uint64_t        ndalloc_small;
                    431:
                    432:        size_t          allocated_large;
                    433:        uint64_t        nmalloc_large;
                    434:        uint64_t        ndalloc_large;
                    435: };
                    436:
                    437: typedef struct chunk_stats_s chunk_stats_t;
                    438: struct chunk_stats_s {
                    439:        /* Number of chunks that were allocated. */
                    440:        uint64_t        nchunks;
                    441:
                    442:        /* High-water mark for number of chunks allocated. */
                    443:        unsigned long   highchunks;
                    444:
                    445:        /*
                    446:         * Current number of chunks allocated.  This value isn't maintained for
                    447:         * any other purpose, so keep track of it in order to be able to set
                    448:         * highchunks.
                    449:         */
                    450:        unsigned long   curchunks;
                    451: };
                    452:
                    453: #endif /* #ifdef MALLOC_STATS */
                    454:
                    455: /******************************************************************************/
                    456: /*
                    457:  * Chunk data structures.
                    458:  */
                    459:
                    460: /* Tree of chunks. */
                    461: typedef struct chunk_node_s chunk_node_t;
                    462: struct chunk_node_s {
                    463:        /* Linkage for the chunk tree. */
                    464:        RB_ENTRY(chunk_node_s) link;
                    465:
                    466:        /*
                    467:         * Pointer to the chunk that this tree node is responsible for.  In some
                    468:         * (but certainly not all) cases, this data structure is placed at the
                    469:         * beginning of the corresponding chunk, so this field may point to this
                    470:         * node.
                    471:         */
                    472:        void    *chunk;
                    473:
                    474:        /* Total chunk size. */
                    475:        size_t  size;
                    476: };
                    477: typedef struct chunk_tree_s chunk_tree_t;
                    478: RB_HEAD(chunk_tree_s, chunk_node_s);
                    479:
                    480: /******************************************************************************/
                    481: /*
                    482:  * Arena data structures.
                    483:  */
                    484:
                    485: typedef struct arena_s arena_t;
                    486: typedef struct arena_bin_s arena_bin_t;
                    487:
                    488: typedef struct arena_chunk_map_s arena_chunk_map_t;
                    489: struct arena_chunk_map_s {
                    490:        /* Number of pages in run. */
                    491:        uint32_t        npages;
                    492:        /*
                    493:         * Position within run.  For a free run, this is POS_FREE for the first
                    494:         * and last pages.  The POS_FREE special value makes it possible to
                    495:         * quickly coalesce free runs.
                    496:         *
                    497:         * This is the limiting factor for chunksize; there can be at most 2^31
                    498:         * pages in a run.
                    499:         */
                    500: #define POS_FREE ((uint32_t)0xffffffffU)
                    501:        uint32_t        pos;
                    502: };
                    503:
                    504: /* Arena chunk header. */
                    505: typedef struct arena_chunk_s arena_chunk_t;
                    506: struct arena_chunk_s {
                    507:        /* Arena that owns the chunk. */
                    508:        arena_t *arena;
                    509:
                    510:        /* Linkage for the arena's chunk tree. */
                    511:        RB_ENTRY(arena_chunk_s) link;
                    512:
                    513:        /*
                    514:         * Number of pages in use.  This is maintained in order to make
                    515:         * detection of empty chunks fast.
                    516:         */
                    517:        uint32_t pages_used;
                    518:
                    519:        /*
                    520:         * Every time a free run larger than this value is created/coalesced,
                    521:         * this value is increased.  The only way that the value decreases is if
                    522:         * arena_run_alloc() fails to find a free run as large as advertised by
                    523:         * this value.
                    524:         */
                    525:        uint32_t max_frun_npages;
                    526:
                    527:        /*
                    528:         * Every time a free run that starts at an earlier page than this value
                    529:         * is created/coalesced, this value is decreased.  It is reset in a
                    530:         * similar fashion to max_frun_npages.
                    531:         */
                    532:        uint32_t min_frun_ind;
                    533:
                    534:        /*
                    535:         * Map of pages within chunk that keeps track of free/large/small.  For
                    536:         * free runs, only the map entries for the first and last pages are
                    537:         * kept up to date, so that free runs can be quickly coalesced.
                    538:         */
                    539:        arena_chunk_map_t map[1]; /* Dynamically sized. */
                    540: };
                    541: typedef struct arena_chunk_tree_s arena_chunk_tree_t;
                    542: RB_HEAD(arena_chunk_tree_s, arena_chunk_s);
                    543:
                    544: typedef struct arena_run_s arena_run_t;
                    545: struct arena_run_s {
                    546:        /* Linkage for run trees. */
                    547:        RB_ENTRY(arena_run_s) link;
                    548:
                    549: #ifdef MALLOC_DEBUG
                    550:        uint32_t        magic;
                    551: #  define ARENA_RUN_MAGIC 0x384adf93
                    552: #endif
                    553:
                    554:        /* Bin this run is associated with. */
                    555:        arena_bin_t     *bin;
                    556:
                    557:        /* Index of first element that might have a free region. */
                    558:        unsigned        regs_minelm;
                    559:
                    560:        /* Number of free regions in run. */
                    561:        unsigned        nfree;
                    562:
                    563:        /* Bitmask of in-use regions (0: in use, 1: free). */
                    564:        unsigned        regs_mask[1]; /* Dynamically sized. */
                    565: };
                    566: typedef struct arena_run_tree_s arena_run_tree_t;
                    567: RB_HEAD(arena_run_tree_s, arena_run_s);
                    568:
                    569: struct arena_bin_s {
                    570:        /*
                    571:         * Current run being used to service allocations of this bin's size
                    572:         * class.
                    573:         */
                    574:        arena_run_t     *runcur;
                    575:
                    576:        /*
                    577:         * Tree of non-full runs.  This tree is used when looking for an
                    578:         * existing run when runcur is no longer usable.  We choose the
                    579:         * non-full run that is lowest in memory; this policy tends to keep
                    580:         * objects packed well, and it can also help reduce the number of
                    581:         * almost-empty chunks.
                    582:         */
                    583:        arena_run_tree_t runs;
                    584:
                    585:        /* Size of regions in a run for this bin's size class. */
                    586:        size_t          reg_size;
                    587:
                    588:        /* Total size of a run for this bin's size class. */
                    589:        size_t          run_size;
                    590:
                    591:        /* Total number of regions in a run for this bin's size class. */
                    592:        uint32_t        nregs;
                    593:
                    594:        /* Number of elements in a run's regs_mask for this bin's size class. */
                    595:        uint32_t        regs_mask_nelms;
                    596:
                    597:        /* Offset of first region in a run for this bin's size class. */
                    598:        uint32_t        reg0_offset;
                    599:
                    600: #ifdef MALLOC_STATS
                    601:        /* Bin statistics. */
                    602:        malloc_bin_stats_t stats;
                    603: #endif
                    604: };
                    605:
                    606: struct arena_s {
                    607: #ifdef MALLOC_DEBUG
                    608:        uint32_t                magic;
                    609: #  define ARENA_MAGIC 0x947d3d24
                    610: #endif
                    611:
                    612:        /* All operations on this arena require that mtx be locked. */
                    613:        malloc_mutex_t          mtx;
                    614:
                    615: #ifdef MALLOC_STATS
                    616:        arena_stats_t           stats;
                    617: #endif
                    618:
                    619:        /*
                    620:         * Tree of chunks this arena manages.
                    621:         */
                    622:        arena_chunk_tree_t      chunks;
                    623:
                    624:        /*
                    625:         * In order to avoid rapid chunk allocation/deallocation when an arena
                    626:         * oscillates right on the cusp of needing a new chunk, cache the most
                    627:         * recently freed chunk.  This caching is disabled by opt_hint.
                    628:         *
                    629:         * There is one spare chunk per arena, rather than one spare total, in
                    630:         * order to avoid interactions between multiple threads that could make
                    631:         * a single spare inadequate.
                    632:         */
                    633:        arena_chunk_t *spare;
                    634:
                    635:        /*
                    636:         * bins is used to store rings of free regions of the following sizes,
                    637:         * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
                    638:         *
                    639:         *   bins[i] | size |
                    640:         *   --------+------+
                    641:         *        0  |    2 |
                    642:         *        1  |    4 |
                    643:         *        2  |    8 |
                    644:         *   --------+------+
                    645:         *        3  |   16 |
                    646:         *        4  |   32 |
                    647:         *        5  |   48 |
                    648:         *        6  |   64 |
                    649:         *           :      :
                    650:         *           :      :
                    651:         *       33  |  496 |
                    652:         *       34  |  512 |
                    653:         *   --------+------+
                    654:         *       35  | 1024 |
                    655:         *       36  | 2048 |
                    656:         *   --------+------+
                    657:         */
                    658:        arena_bin_t             bins[1]; /* Dynamically sized. */
                    659: };
                    660:
                    661: /******************************************************************************/
                    662: /*
                    663:  * Data.
                    664:  */
                    665:
                    666: /* Number of CPUs. */
                    667: static unsigned                ncpus;
                    668:
                    669: /* VM page size. */
                    670: static size_t          pagesize;
                    671: static size_t          pagesize_mask;
1.9       christos  672: static int             pagesize_2pow;
1.1       ad        673:
                    674: /* Various bin-related settings. */
                    675: static size_t          bin_maxclass; /* Max size class for bins. */
                    676: static unsigned                ntbins; /* Number of (2^n)-spaced tiny bins. */
                    677: static unsigned                nqbins; /* Number of quantum-spaced bins. */
                    678: static unsigned                nsbins; /* Number of (2^n)-spaced sub-page bins. */
                    679: static size_t          small_min;
                    680: static size_t          small_max;
                    681:
                    682: /* Various quantum-related settings. */
                    683: static size_t          quantum;
                    684: static size_t          quantum_mask; /* (quantum - 1). */
                    685:
                    686: /* Various chunk-related settings. */
                    687: static size_t          chunksize;
                    688: static size_t          chunksize_mask; /* (chunksize - 1). */
1.5       yamt      689: static int             chunksize_2pow;
1.1       ad        690: static unsigned                chunk_npages;
                    691: static unsigned                arena_chunk_header_npages;
                    692: static size_t          arena_maxclass; /* Max size class for arenas. */
                    693:
                    694: /********/
                    695: /*
                    696:  * Chunks.
                    697:  */
                    698:
1.34.2.1  martin    699: #ifdef _REENTRANT
1.1       ad        700: /* Protects chunk-related data structures. */
                    701: static malloc_mutex_t  chunks_mtx;
1.34.2.1  martin    702: #endif
1.1       ad        703:
                    704: /* Tree of chunks that are stand-alone huge allocations. */
                    705: static chunk_tree_t    huge;
                    706:
                    707: #ifdef USE_BRK
                    708: /*
                    709:  * Try to use brk for chunk-size allocations, due to address space constraints.
                    710:  */
                    711: /*
                    712:  * Protects sbrk() calls.  This must be separate from chunks_mtx, since
                    713:  * base_pages_alloc() also uses sbrk(), but cannot lock chunks_mtx (doing so
                    714:  * could cause recursive lock acquisition).
                    715:  */
                    716: static malloc_mutex_t  brk_mtx;
                    717: /* Result of first sbrk(0) call. */
                    718: static void            *brk_base;
                    719: /* Current end of brk, or ((void *)-1) if brk is exhausted. */
                    720: static void            *brk_prev;
                    721: /* Current upper limit on brk addresses. */
                    722: static void            *brk_max;
                    723: #endif
                    724:
                    725: #ifdef MALLOC_STATS
                    726: /* Huge allocation statistics. */
                    727: static uint64_t                huge_nmalloc;
                    728: static uint64_t                huge_ndalloc;
1.8       yamt      729: static uint64_t                huge_nralloc;
1.1       ad        730: static size_t          huge_allocated;
                    731: #endif
                    732:
                    733: /*
                    734:  * Tree of chunks that were previously allocated.  This is used when allocating
                    735:  * chunks, in an attempt to re-use address space.
                    736:  */
                    737: static chunk_tree_t    old_chunks;
                    738:
                    739: /****************************/
                    740: /*
                    741:  * base (internal allocation).
                    742:  */
                    743:
                    744: /*
                    745:  * Current pages that are being used for internal memory allocations.  These
                    746:  * pages are carved up in cacheline-size quanta, so that there is no chance of
                    747:  * false cache line sharing.
                    748:  */
                    749: static void            *base_pages;
                    750: static void            *base_next_addr;
                    751: static void            *base_past_addr; /* Addr immediately past base_pages. */
                    752: static chunk_node_t    *base_chunk_nodes; /* LIFO cache of chunk nodes. */
1.34.2.1  martin    753: #ifdef _REENTRANT
1.1       ad        754: static malloc_mutex_t  base_mtx;
1.34.2.1  martin    755: #endif
1.1       ad        756: #ifdef MALLOC_STATS
                    757: static size_t          base_mapped;
                    758: #endif
                    759:
                    760: /********/
                    761: /*
                    762:  * Arenas.
                    763:  */
                    764:
                    765: /*
                    766:  * Arenas that are used to service external requests.  Not all elements of the
                    767:  * arenas array are necessarily used; arenas are created lazily as needed.
                    768:  */
                    769: static arena_t         **arenas;
                    770: static unsigned                narenas;
                    771: static unsigned                next_arena;
1.34.2.1  martin    772: #ifdef _REENTRANT
1.1       ad        773: static malloc_mutex_t  arenas_mtx; /* Protects arenas initialization. */
1.34.2.1  martin    774: #endif
1.1       ad        775:
1.15      ad        776: /*
                    777:  * Map of pthread_self() --> arenas[???], used for selecting an arena to use
                    778:  * for allocations.
                    779:  */
1.34.2.1  martin    780: #ifndef NO_TLS
                    781: static __thread arena_t        **arenas_map;
1.15      ad        782: #else
1.34.2.1  martin    783: static arena_t **arenas_map;
                    784: #endif
                    785:
                    786: #if !defined(NO_TLS) || !defined(_REENTRANT)
                    787: # define       get_arenas_map()        (arenas_map)
                    788: # define       set_arenas_map(x)       (arenas_map = x)
                    789: #else
                    790:
                    791: static thread_key_t arenas_map_key = -1;
                    792:
                    793: static inline arena_t **
                    794: get_arenas_map(void)
                    795: {
                    796:        if (!__isthreaded)
                    797:                return arenas_map;
                    798:
                    799:        if (arenas_map_key == -1) {
                    800:                (void)thr_keycreate(&arenas_map_key, NULL);
                    801:                if (arenas_map != NULL) {
                    802:                        thr_setspecific(arenas_map_key, arenas_map);
                    803:                        arenas_map = NULL;
                    804:                }
                    805:        }
                    806:
                    807:        return thr_getspecific(arenas_map_key);
                    808: }
                    809:
                    810: static __inline void
                    811: set_arenas_map(arena_t **a)
                    812: {
                    813:        if (!__isthreaded) {
                    814:                arenas_map = a;
                    815:                return;
                    816:        }
                    817:
                    818:        if (arenas_map_key == -1) {
                    819:                (void)thr_keycreate(&arenas_map_key, NULL);
                    820:                if (arenas_map != NULL) {
                    821:                        _DIAGASSERT(arenas_map == a);
                    822:                        arenas_map = NULL;
                    823:                }
                    824:        }
                    825:
                    826:        thr_setspecific(arenas_map_key, a);
                    827: }
1.15      ad        828: #endif
1.1       ad        829:
                    830: #ifdef MALLOC_STATS
                    831: /* Chunk statistics. */
                    832: static chunk_stats_t   stats_chunks;
                    833: #endif
                    834:
                    835: /*******************************/
                    836: /*
                    837:  * Runtime configuration options.
                    838:  */
                    839: const char     *_malloc_options;
                    840:
                    841: #ifndef MALLOC_PRODUCTION
                    842: static bool    opt_abort = true;
                    843: static bool    opt_junk = true;
                    844: #else
                    845: static bool    opt_abort = false;
                    846: static bool    opt_junk = false;
                    847: #endif
                    848: static bool    opt_hint = false;
                    849: static bool    opt_print_stats = false;
1.9       christos  850: static int     opt_quantum_2pow = QUANTUM_2POW_MIN;
                    851: static int     opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
                    852: static int     opt_chunk_2pow = CHUNK_2POW_DEFAULT;
1.1       ad        853: static bool    opt_utrace = false;
                    854: static bool    opt_sysv = false;
                    855: static bool    opt_xmalloc = false;
                    856: static bool    opt_zero = false;
                    857: static int32_t opt_narenas_lshift = 0;
                    858:
                    859: typedef struct {
                    860:        void    *p;
                    861:        size_t  s;
                    862:        void    *r;
                    863: } malloc_utrace_t;
                    864:
                    865: #define        UTRACE(a, b, c)                                                 \
                    866:        if (opt_utrace) {                                               \
1.2       ad        867:                malloc_utrace_t ut;                                     \
                    868:                ut.p = a;                                               \
                    869:                ut.s = b;                                               \
                    870:                ut.r = c;                                               \
                    871:                xutrace(&ut, sizeof(ut));                               \
1.1       ad        872:        }
                    873:
                    874: /******************************************************************************/
                    875: /*
                    876:  * Begin function prototypes for non-inline static functions.
                    877:  */
                    878:
                    879: static void    wrtmessage(const char *p1, const char *p2, const char *p3,
                    880:                const char *p4);
                    881: #ifdef MALLOC_STATS
                    882: static void    malloc_printf(const char *format, ...);
                    883: #endif
1.28      christos  884: static char    *size_t2s(size_t x, char *s);
1.1       ad        885: static bool    base_pages_alloc(size_t minsize);
                    886: static void    *base_alloc(size_t size);
                    887: static chunk_node_t *base_chunk_node_alloc(void);
                    888: static void    base_chunk_node_dealloc(chunk_node_t *node);
                    889: #ifdef MALLOC_STATS
                    890: static void    stats_print(arena_t *arena);
                    891: #endif
                    892: static void    *pages_map(void *addr, size_t size);
1.5       yamt      893: static void    *pages_map_align(void *addr, size_t size, int align);
1.1       ad        894: static void    pages_unmap(void *addr, size_t size);
                    895: static void    *chunk_alloc(size_t size);
                    896: static void    chunk_dealloc(void *chunk, size_t size);
                    897: static void    arena_run_split(arena_t *arena, arena_run_t *run, size_t size);
                    898: static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
                    899: static void    arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
                    900: static arena_run_t *arena_run_alloc(arena_t *arena, size_t size);
                    901: static void    arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size);
                    902: static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
                    903: static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
                    904: static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
                    905: static void    *arena_malloc(arena_t *arena, size_t size);
                    906: static void    *arena_palloc(arena_t *arena, size_t alignment, size_t size,
                    907:     size_t alloc_size);
                    908: static size_t  arena_salloc(const void *ptr);
                    909: static void    *arena_ralloc(void *ptr, size_t size, size_t oldsize);
                    910: static void    arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
                    911: static bool    arena_new(arena_t *arena);
                    912: static arena_t *arenas_extend(unsigned ind);
                    913: static void    *huge_malloc(size_t size);
                    914: static void    *huge_palloc(size_t alignment, size_t size);
                    915: static void    *huge_ralloc(void *ptr, size_t size, size_t oldsize);
                    916: static void    huge_dalloc(void *ptr);
                    917: static void    *imalloc(size_t size);
                    918: static void    *ipalloc(size_t alignment, size_t size);
                    919: static void    *icalloc(size_t size);
                    920: static size_t  isalloc(const void *ptr);
                    921: static void    *iralloc(void *ptr, size_t size);
                    922: static void    idalloc(void *ptr);
                    923: static void    malloc_print_stats(void);
                    924: static bool    malloc_init_hard(void);
                    925:
                    926: /*
                    927:  * End function prototypes.
                    928:  */
                    929: /******************************************************************************/
                    930: /*
                    931:  * Begin mutex.
                    932:  */
                    933:
1.15      ad        934: #ifdef __NetBSD__
1.2       ad        935: #define        malloc_mutex_init(m)    mutex_init(m, NULL)
                    936: #define        malloc_mutex_lock(m)    mutex_lock(m)
                    937: #define        malloc_mutex_unlock(m)  mutex_unlock(m)
1.15      ad        938: #else  /* __NetBSD__ */
                    939: static inline void
                    940: malloc_mutex_init(malloc_mutex_t *a_mutex)
                    941: {
                    942:        static const spinlock_t lock = _SPINLOCK_INITIALIZER;
                    943:
                    944:        a_mutex->lock = lock;
                    945: }
                    946:
                    947: static inline void
                    948: malloc_mutex_lock(malloc_mutex_t *a_mutex)
                    949: {
                    950:
                    951:        if (__isthreaded)
                    952:                _SPINLOCK(&a_mutex->lock);
                    953: }
                    954:
                    955: static inline void
                    956: malloc_mutex_unlock(malloc_mutex_t *a_mutex)
                    957: {
                    958:
                    959:        if (__isthreaded)
                    960:                _SPINUNLOCK(&a_mutex->lock);
                    961: }
                    962: #endif /* __NetBSD__ */
1.1       ad        963:
                    964: /*
                    965:  * End mutex.
                    966:  */
                    967: /******************************************************************************/
                    968: /*
                    969:  * Begin Utility functions/macros.
                    970:  */
                    971:
                    972: /* Return the chunk address for allocation address a. */
                    973: #define        CHUNK_ADDR2BASE(a)                                              \
                    974:        ((void *)((uintptr_t)(a) & ~chunksize_mask))
                    975:
                    976: /* Return the chunk offset of address a. */
                    977: #define        CHUNK_ADDR2OFFSET(a)                                            \
                    978:        ((size_t)((uintptr_t)(a) & chunksize_mask))
                    979:
                    980: /* Return the smallest chunk multiple that is >= s. */
                    981: #define        CHUNK_CEILING(s)                                                \
                    982:        (((s) + chunksize_mask) & ~chunksize_mask)
                    983:
                    984: /* Return the smallest cacheline multiple that is >= s. */
                    985: #define        CACHELINE_CEILING(s)                                            \
                    986:        (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
                    987:
                    988: /* Return the smallest quantum multiple that is >= a. */
                    989: #define        QUANTUM_CEILING(a)                                              \
                    990:        (((a) + quantum_mask) & ~quantum_mask)
                    991:
                    992: /* Return the smallest pagesize multiple that is >= s. */
                    993: #define        PAGE_CEILING(s)                                                 \
                    994:        (((s) + pagesize_mask) & ~pagesize_mask)
                    995:
                    996: /* Compute the smallest power of 2 that is >= x. */
                    997: static inline size_t
                    998: pow2_ceil(size_t x)
                    999: {
                   1000:
                   1001:        x--;
                   1002:        x |= x >> 1;
                   1003:        x |= x >> 2;
                   1004:        x |= x >> 4;
                   1005:        x |= x >> 8;
                   1006:        x |= x >> 16;
                   1007: #if (SIZEOF_PTR == 8)
                   1008:        x |= x >> 32;
                   1009: #endif
                   1010:        x++;
                   1011:        return (x);
                   1012: }
                   1013:
                   1014: static void
                   1015: wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
                   1016: {
                   1017:
1.16      christos 1018:        write(STDERR_FILENO, p1, strlen(p1));
                   1019:        write(STDERR_FILENO, p2, strlen(p2));
                   1020:        write(STDERR_FILENO, p3, strlen(p3));
                   1021:        write(STDERR_FILENO, p4, strlen(p4));
1.1       ad       1022: }
                   1023:
                   1024: void   (*_malloc_message)(const char *p1, const char *p2, const char *p3,
                   1025:            const char *p4) = wrtmessage;
                   1026:
                   1027: #ifdef MALLOC_STATS
                   1028: /*
                   1029:  * Print to stderr in such a way as to (hopefully) avoid memory allocation.
                   1030:  */
                   1031: static void
                   1032: malloc_printf(const char *format, ...)
                   1033: {
                   1034:        char buf[4096];
                   1035:        va_list ap;
                   1036:
                   1037:        va_start(ap, format);
                   1038:        vsnprintf(buf, sizeof(buf), format, ap);
                   1039:        va_end(ap);
                   1040:        _malloc_message(buf, "", "", "");
                   1041: }
                   1042: #endif
                   1043:
                   1044: /*
                   1045:  * We don't want to depend on vsnprintf() for production builds, since that can
1.28      christos 1046:  * cause unnecessary bloat for static binaries.  size_t2s() provides minimal
1.1       ad       1047:  * integer printing functionality, so that malloc_printf() use can be limited to
                   1048:  * MALLOC_STATS code.
                   1049:  */
                   1050: #define UMAX2S_BUFSIZE 21
                   1051: static char *
1.28      christos 1052: size_t2s(size_t x, char *s)
1.1       ad       1053: {
                   1054:        unsigned i;
                   1055:
                   1056:        /* Make sure UMAX2S_BUFSIZE is large enough. */
1.7       yamt     1057:        /* LINTED */
1.25      christos 1058:        assert(sizeof(size_t) <= 8);
1.1       ad       1059:
                   1060:        i = UMAX2S_BUFSIZE - 1;
                   1061:        s[i] = '\0';
                   1062:        do {
                   1063:                i--;
1.2       ad       1064:                s[i] = "0123456789"[(int)x % 10];
                   1065:                x /= (uintmax_t)10LL;
1.1       ad       1066:        } while (x > 0);
                   1067:
                   1068:        return (&s[i]);
                   1069: }
                   1070:
                   1071: /******************************************************************************/
                   1072:
                   1073: static bool
                   1074: base_pages_alloc(size_t minsize)
                   1075: {
1.2       ad       1076:        size_t csize = 0;
1.1       ad       1077:
                   1078: #ifdef USE_BRK
                   1079:        /*
                   1080:         * Do special brk allocation here, since base allocations don't need to
                   1081:         * be chunk-aligned.
                   1082:         */
                   1083:        if (brk_prev != (void *)-1) {
                   1084:                void *brk_cur;
                   1085:                intptr_t incr;
                   1086:
                   1087:                if (minsize != 0)
                   1088:                        csize = CHUNK_CEILING(minsize);
                   1089:
                   1090:                malloc_mutex_lock(&brk_mtx);
                   1091:                do {
                   1092:                        /* Get the current end of brk. */
                   1093:                        brk_cur = sbrk(0);
                   1094:
                   1095:                        /*
                   1096:                         * Calculate how much padding is necessary to
                   1097:                         * chunk-align the end of brk.  Don't worry about
                   1098:                         * brk_cur not being chunk-aligned though.
                   1099:                         */
                   1100:                        incr = (intptr_t)chunksize
                   1101:                            - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur);
1.20      lukem    1102:                        assert(incr >= 0);
                   1103:                        if ((size_t)incr < minsize)
1.1       ad       1104:                                incr += csize;
                   1105:
                   1106:                        brk_prev = sbrk(incr);
                   1107:                        if (brk_prev == brk_cur) {
                   1108:                                /* Success. */
                   1109:                                malloc_mutex_unlock(&brk_mtx);
                   1110:                                base_pages = brk_cur;
                   1111:                                base_next_addr = base_pages;
                   1112:                                base_past_addr = (void *)((uintptr_t)base_pages
                   1113:                                    + incr);
                   1114: #ifdef MALLOC_STATS
                   1115:                                base_mapped += incr;
                   1116: #endif
                   1117:                                return (false);
                   1118:                        }
                   1119:                } while (brk_prev != (void *)-1);
                   1120:                malloc_mutex_unlock(&brk_mtx);
                   1121:        }
                   1122:        if (minsize == 0) {
                   1123:                /*
                   1124:                 * Failure during initialization doesn't matter, so avoid
                   1125:                 * falling through to the mmap-based page mapping code.
                   1126:                 */
                   1127:                return (true);
                   1128:        }
                   1129: #endif
                   1130:        assert(minsize != 0);
                   1131:        csize = PAGE_CEILING(minsize);
                   1132:        base_pages = pages_map(NULL, csize);
                   1133:        if (base_pages == NULL)
                   1134:                return (true);
                   1135:        base_next_addr = base_pages;
                   1136:        base_past_addr = (void *)((uintptr_t)base_pages + csize);
                   1137: #ifdef MALLOC_STATS
                   1138:        base_mapped += csize;
                   1139: #endif
                   1140:        return (false);
                   1141: }
                   1142:
                   1143: static void *
                   1144: base_alloc(size_t size)
                   1145: {
                   1146:        void *ret;
                   1147:        size_t csize;
                   1148:
                   1149:        /* Round size up to nearest multiple of the cacheline size. */
                   1150:        csize = CACHELINE_CEILING(size);
                   1151:
                   1152:        malloc_mutex_lock(&base_mtx);
                   1153:
                   1154:        /* Make sure there's enough space for the allocation. */
                   1155:        if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
                   1156:                if (base_pages_alloc(csize)) {
                   1157:                        ret = NULL;
                   1158:                        goto RETURN;
                   1159:                }
                   1160:        }
                   1161:
                   1162:        /* Allocate. */
                   1163:        ret = base_next_addr;
                   1164:        base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
                   1165:
                   1166: RETURN:
                   1167:        malloc_mutex_unlock(&base_mtx);
                   1168:        return (ret);
                   1169: }
                   1170:
                   1171: static chunk_node_t *
                   1172: base_chunk_node_alloc(void)
                   1173: {
                   1174:        chunk_node_t *ret;
                   1175:
                   1176:        malloc_mutex_lock(&base_mtx);
                   1177:        if (base_chunk_nodes != NULL) {
                   1178:                ret = base_chunk_nodes;
1.2       ad       1179:                /* LINTED */
1.1       ad       1180:                base_chunk_nodes = *(chunk_node_t **)ret;
                   1181:                malloc_mutex_unlock(&base_mtx);
                   1182:        } else {
                   1183:                malloc_mutex_unlock(&base_mtx);
                   1184:                ret = (chunk_node_t *)base_alloc(sizeof(chunk_node_t));
                   1185:        }
                   1186:
                   1187:        return (ret);
                   1188: }
                   1189:
                   1190: static void
                   1191: base_chunk_node_dealloc(chunk_node_t *node)
                   1192: {
                   1193:
                   1194:        malloc_mutex_lock(&base_mtx);
1.2       ad       1195:        /* LINTED */
1.1       ad       1196:        *(chunk_node_t **)node = base_chunk_nodes;
                   1197:        base_chunk_nodes = node;
                   1198:        malloc_mutex_unlock(&base_mtx);
                   1199: }
                   1200:
                   1201: /******************************************************************************/
                   1202:
                   1203: #ifdef MALLOC_STATS
                   1204: static void
                   1205: stats_print(arena_t *arena)
                   1206: {
                   1207:        unsigned i;
                   1208:        int gap_start;
                   1209:
                   1210:        malloc_printf(
                   1211:            "          allocated/mapped            nmalloc      ndalloc\n");
1.2       ad       1212:
                   1213:        malloc_printf("small: %12zu %-12s %12llu %12llu\n",
1.1       ad       1214:            arena->stats.allocated_small, "", arena->stats.nmalloc_small,
                   1215:            arena->stats.ndalloc_small);
1.2       ad       1216:        malloc_printf("large: %12zu %-12s %12llu %12llu\n",
1.1       ad       1217:            arena->stats.allocated_large, "", arena->stats.nmalloc_large,
                   1218:            arena->stats.ndalloc_large);
1.2       ad       1219:        malloc_printf("total: %12zu/%-12zu %12llu %12llu\n",
1.1       ad       1220:            arena->stats.allocated_small + arena->stats.allocated_large,
                   1221:            arena->stats.mapped,
                   1222:            arena->stats.nmalloc_small + arena->stats.nmalloc_large,
                   1223:            arena->stats.ndalloc_small + arena->stats.ndalloc_large);
                   1224:
                   1225:        malloc_printf("bins:     bin   size regs pgs  requests   newruns"
                   1226:            "    reruns maxruns curruns\n");
                   1227:        for (i = 0, gap_start = -1; i < ntbins + nqbins + nsbins; i++) {
                   1228:                if (arena->bins[i].stats.nrequests == 0) {
                   1229:                        if (gap_start == -1)
                   1230:                                gap_start = i;
                   1231:                } else {
                   1232:                        if (gap_start != -1) {
                   1233:                                if (i > gap_start + 1) {
                   1234:                                        /* Gap of more than one size class. */
                   1235:                                        malloc_printf("[%u..%u]\n",
                   1236:                                            gap_start, i - 1);
                   1237:                                } else {
                   1238:                                        /* Gap of one size class. */
                   1239:                                        malloc_printf("[%u]\n", gap_start);
                   1240:                                }
                   1241:                                gap_start = -1;
                   1242:                        }
                   1243:                        malloc_printf(
                   1244:                            "%13u %1s %4u %4u %3u %9llu %9llu"
                   1245:                            " %9llu %7lu %7lu\n",
                   1246:                            i,
                   1247:                            i < ntbins ? "T" : i < ntbins + nqbins ? "Q" : "S",
                   1248:                            arena->bins[i].reg_size,
                   1249:                            arena->bins[i].nregs,
                   1250:                            arena->bins[i].run_size >> pagesize_2pow,
                   1251:                            arena->bins[i].stats.nrequests,
                   1252:                            arena->bins[i].stats.nruns,
                   1253:                            arena->bins[i].stats.reruns,
                   1254:                            arena->bins[i].stats.highruns,
                   1255:                            arena->bins[i].stats.curruns);
                   1256:                }
                   1257:        }
                   1258:        if (gap_start != -1) {
                   1259:                if (i > gap_start + 1) {
                   1260:                        /* Gap of more than one size class. */
                   1261:                        malloc_printf("[%u..%u]\n", gap_start, i - 1);
                   1262:                } else {
                   1263:                        /* Gap of one size class. */
                   1264:                        malloc_printf("[%u]\n", gap_start);
                   1265:                }
                   1266:        }
                   1267: }
                   1268: #endif
                   1269:
                   1270: /*
                   1271:  * End Utility functions/macros.
                   1272:  */
                   1273: /******************************************************************************/
                   1274: /*
                   1275:  * Begin chunk management functions.
                   1276:  */
                   1277:
1.7       yamt     1278: #ifndef lint
1.1       ad       1279: static inline int
                   1280: chunk_comp(chunk_node_t *a, chunk_node_t *b)
                   1281: {
                   1282:
                   1283:        assert(a != NULL);
                   1284:        assert(b != NULL);
                   1285:
                   1286:        if ((uintptr_t)a->chunk < (uintptr_t)b->chunk)
                   1287:                return (-1);
                   1288:        else if (a->chunk == b->chunk)
                   1289:                return (0);
                   1290:        else
                   1291:                return (1);
                   1292: }
                   1293:
                   1294: /* Generate red-black tree code for chunks. */
                   1295: RB_GENERATE_STATIC(chunk_tree_s, chunk_node_s, link, chunk_comp);
1.2       ad       1296: #endif
1.1       ad       1297:
                   1298: static void *
1.5       yamt     1299: pages_map_align(void *addr, size_t size, int align)
1.1       ad       1300: {
                   1301:        void *ret;
                   1302:
                   1303:        /*
                   1304:         * We don't use MAP_FIXED here, because it can cause the *replacement*
                   1305:         * of existing mappings, and we only want to create new mappings.
                   1306:         */
1.5       yamt     1307:        ret = mmap(addr, size, PROT_READ | PROT_WRITE,
                   1308:            MAP_PRIVATE | MAP_ANON | MAP_ALIGNED(align), -1, 0);
1.1       ad       1309:        assert(ret != NULL);
                   1310:
                   1311:        if (ret == MAP_FAILED)
                   1312:                ret = NULL;
                   1313:        else if (addr != NULL && ret != addr) {
                   1314:                /*
                   1315:                 * We succeeded in mapping memory, but not in the right place.
                   1316:                 */
                   1317:                if (munmap(ret, size) == -1) {
                   1318:                        char buf[STRERROR_BUF];
                   1319:
1.16      christos 1320:                        STRERROR_R(errno, buf, sizeof(buf));
                   1321:                        _malloc_message(getprogname(),
1.1       ad       1322:                            ": (malloc) Error in munmap(): ", buf, "\n");
                   1323:                        if (opt_abort)
                   1324:                                abort();
                   1325:                }
                   1326:                ret = NULL;
                   1327:        }
                   1328:
                   1329:        assert(ret == NULL || (addr == NULL && ret != addr)
                   1330:            || (addr != NULL && ret == addr));
                   1331:        return (ret);
                   1332: }
                   1333:
1.5       yamt     1334: static void *
                   1335: pages_map(void *addr, size_t size)
                   1336: {
                   1337:
                   1338:        return pages_map_align(addr, size, 0);
                   1339: }
                   1340:
1.1       ad       1341: static void
                   1342: pages_unmap(void *addr, size_t size)
                   1343: {
                   1344:
                   1345:        if (munmap(addr, size) == -1) {
                   1346:                char buf[STRERROR_BUF];
                   1347:
1.16      christos 1348:                STRERROR_R(errno, buf, sizeof(buf));
                   1349:                _malloc_message(getprogname(),
1.1       ad       1350:                    ": (malloc) Error in munmap(): ", buf, "\n");
                   1351:                if (opt_abort)
                   1352:                        abort();
                   1353:        }
                   1354: }
                   1355:
                   1356: static void *
                   1357: chunk_alloc(size_t size)
                   1358: {
                   1359:        void *ret, *chunk;
                   1360:        chunk_node_t *tchunk, *delchunk;
                   1361:
                   1362:        assert(size != 0);
                   1363:        assert((size & chunksize_mask) == 0);
                   1364:
                   1365:        malloc_mutex_lock(&chunks_mtx);
                   1366:
                   1367:        if (size == chunksize) {
                   1368:                /*
                   1369:                 * Check for address ranges that were previously chunks and try
                   1370:                 * to use them.
                   1371:                 */
                   1372:
1.2       ad       1373:                /* LINTED */
1.1       ad       1374:                tchunk = RB_MIN(chunk_tree_s, &old_chunks);
                   1375:                while (tchunk != NULL) {
                   1376:                        /* Found an address range.  Try to recycle it. */
                   1377:
                   1378:                        chunk = tchunk->chunk;
                   1379:                        delchunk = tchunk;
1.2       ad       1380:                        /* LINTED */
1.1       ad       1381:                        tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk);
                   1382:
                   1383:                        /* Remove delchunk from the tree. */
1.2       ad       1384:                        /* LINTED */
1.1       ad       1385:                        RB_REMOVE(chunk_tree_s, &old_chunks, delchunk);
                   1386:                        base_chunk_node_dealloc(delchunk);
                   1387:
                   1388: #ifdef USE_BRK
                   1389:                        if ((uintptr_t)chunk >= (uintptr_t)brk_base
                   1390:                            && (uintptr_t)chunk < (uintptr_t)brk_max) {
                   1391:                                /* Re-use a previously freed brk chunk. */
                   1392:                                ret = chunk;
                   1393:                                goto RETURN;
                   1394:                        }
                   1395: #endif
                   1396:                        if ((ret = pages_map(chunk, size)) != NULL) {
                   1397:                                /* Success. */
                   1398:                                goto RETURN;
                   1399:                        }
                   1400:                }
                   1401:        }
                   1402:
                   1403:        /*
                   1404:         * Try to over-allocate, but allow the OS to place the allocation
                   1405:         * anywhere.  Beware of size_t wrap-around.
                   1406:         */
                   1407:        if (size + chunksize > size) {
1.5       yamt     1408:                if ((ret = pages_map_align(NULL, size, chunksize_2pow))
                   1409:                    != NULL) {
1.1       ad       1410:                        goto RETURN;
                   1411:                }
                   1412:        }
                   1413:
                   1414: #ifdef USE_BRK
                   1415:        /*
                   1416:         * Try to create allocations in brk, in order to make full use of
                   1417:         * limited address space.
                   1418:         */
                   1419:        if (brk_prev != (void *)-1) {
                   1420:                void *brk_cur;
                   1421:                intptr_t incr;
                   1422:
                   1423:                /*
                   1424:                 * The loop is necessary to recover from races with other
                   1425:                 * threads that are using brk for something other than malloc.
                   1426:                 */
                   1427:                malloc_mutex_lock(&brk_mtx);
                   1428:                do {
                   1429:                        /* Get the current end of brk. */
                   1430:                        brk_cur = sbrk(0);
                   1431:
                   1432:                        /*
                   1433:                         * Calculate how much padding is necessary to
                   1434:                         * chunk-align the end of brk.
                   1435:                         */
                   1436:                        incr = (intptr_t)size
                   1437:                            - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur);
1.20      lukem    1438:                        if (incr == (intptr_t)size) {
1.1       ad       1439:                                ret = brk_cur;
                   1440:                        } else {
                   1441:                                ret = (void *)((intptr_t)brk_cur + incr);
                   1442:                                incr += size;
                   1443:                        }
                   1444:
                   1445:                        brk_prev = sbrk(incr);
                   1446:                        if (brk_prev == brk_cur) {
                   1447:                                /* Success. */
                   1448:                                malloc_mutex_unlock(&brk_mtx);
                   1449:                                brk_max = (void *)((intptr_t)ret + size);
                   1450:                                goto RETURN;
                   1451:                        }
                   1452:                } while (brk_prev != (void *)-1);
                   1453:                malloc_mutex_unlock(&brk_mtx);
                   1454:        }
                   1455: #endif
                   1456:
                   1457:        /* All strategies for allocation failed. */
                   1458:        ret = NULL;
                   1459: RETURN:
                   1460:        if (ret != NULL) {
                   1461:                chunk_node_t key;
                   1462:                /*
                   1463:                 * Clean out any entries in old_chunks that overlap with the
                   1464:                 * memory we just allocated.
                   1465:                 */
                   1466:                key.chunk = ret;
1.2       ad       1467:                /* LINTED */
1.1       ad       1468:                tchunk = RB_NFIND(chunk_tree_s, &old_chunks, &key);
                   1469:                while (tchunk != NULL
                   1470:                    && (uintptr_t)tchunk->chunk >= (uintptr_t)ret
                   1471:                    && (uintptr_t)tchunk->chunk < (uintptr_t)ret + size) {
                   1472:                        delchunk = tchunk;
1.2       ad       1473:                        /* LINTED */
1.1       ad       1474:                        tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk);
1.2       ad       1475:                        /* LINTED */
1.1       ad       1476:                        RB_REMOVE(chunk_tree_s, &old_chunks, delchunk);
                   1477:                        base_chunk_node_dealloc(delchunk);
                   1478:                }
                   1479:
                   1480:        }
                   1481: #ifdef MALLOC_STATS
                   1482:        if (ret != NULL) {
                   1483:                stats_chunks.nchunks += (size / chunksize);
                   1484:                stats_chunks.curchunks += (size / chunksize);
                   1485:        }
                   1486:        if (stats_chunks.curchunks > stats_chunks.highchunks)
                   1487:                stats_chunks.highchunks = stats_chunks.curchunks;
                   1488: #endif
                   1489:        malloc_mutex_unlock(&chunks_mtx);
                   1490:
                   1491:        assert(CHUNK_ADDR2BASE(ret) == ret);
                   1492:        return (ret);
                   1493: }
                   1494:
                   1495: static void
                   1496: chunk_dealloc(void *chunk, size_t size)
                   1497: {
                   1498:        chunk_node_t *node;
                   1499:
                   1500:        assert(chunk != NULL);
                   1501:        assert(CHUNK_ADDR2BASE(chunk) == chunk);
                   1502:        assert(size != 0);
                   1503:        assert((size & chunksize_mask) == 0);
                   1504:
                   1505:        malloc_mutex_lock(&chunks_mtx);
                   1506:
                   1507: #ifdef USE_BRK
                   1508:        if ((uintptr_t)chunk >= (uintptr_t)brk_base
                   1509:            && (uintptr_t)chunk < (uintptr_t)brk_max) {
                   1510:                void *brk_cur;
                   1511:
                   1512:                malloc_mutex_lock(&brk_mtx);
                   1513:                /* Get the current end of brk. */
                   1514:                brk_cur = sbrk(0);
                   1515:
                   1516:                /*
                   1517:                 * Try to shrink the data segment if this chunk is at the end
                   1518:                 * of the data segment.  The sbrk() call here is subject to a
                   1519:                 * race condition with threads that use brk(2) or sbrk(2)
                   1520:                 * directly, but the alternative would be to leak memory for
                   1521:                 * the sake of poorly designed multi-threaded programs.
                   1522:                 */
                   1523:                if (brk_cur == brk_max
                   1524:                    && (void *)((uintptr_t)chunk + size) == brk_max
                   1525:                    && sbrk(-(intptr_t)size) == brk_max) {
                   1526:                        malloc_mutex_unlock(&brk_mtx);
                   1527:                        if (brk_prev == brk_max) {
                   1528:                                /* Success. */
                   1529:                                brk_prev = (void *)((intptr_t)brk_max
                   1530:                                    - (intptr_t)size);
                   1531:                                brk_max = brk_prev;
                   1532:                        }
                   1533:                } else {
                   1534:                        size_t offset;
                   1535:
                   1536:                        malloc_mutex_unlock(&brk_mtx);
                   1537:                        madvise(chunk, size, MADV_FREE);
                   1538:
                   1539:                        /*
                   1540:                         * Iteratively create records of each chunk-sized
                   1541:                         * memory region that 'chunk' is comprised of, so that
                   1542:                         * the address range can be recycled if memory usage
                   1543:                         * increases later on.
                   1544:                         */
                   1545:                        for (offset = 0; offset < size; offset += chunksize) {
                   1546:                                node = base_chunk_node_alloc();
                   1547:                                if (node == NULL)
                   1548:                                        break;
                   1549:
                   1550:                                node->chunk = (void *)((uintptr_t)chunk
                   1551:                                    + (uintptr_t)offset);
                   1552:                                node->size = chunksize;
1.2       ad       1553:                                /* LINTED */
1.1       ad       1554:                                RB_INSERT(chunk_tree_s, &old_chunks, node);
                   1555:                        }
                   1556:                }
                   1557:        } else {
                   1558: #endif
                   1559:                pages_unmap(chunk, size);
                   1560:
                   1561:                /*
                   1562:                 * Make a record of the chunk's address, so that the address
                   1563:                 * range can be recycled if memory usage increases later on.
                   1564:                 * Don't bother to create entries if (size > chunksize), since
                   1565:                 * doing so could cause scalability issues for truly gargantuan
                   1566:                 * objects (many gigabytes or larger).
                   1567:                 */
                   1568:                if (size == chunksize) {
                   1569:                        node = base_chunk_node_alloc();
                   1570:                        if (node != NULL) {
                   1571:                                node->chunk = (void *)(uintptr_t)chunk;
                   1572:                                node->size = chunksize;
1.2       ad       1573:                                /* LINTED */
1.1       ad       1574:                                RB_INSERT(chunk_tree_s, &old_chunks, node);
                   1575:                        }
                   1576:                }
                   1577: #ifdef USE_BRK
                   1578:        }
                   1579: #endif
                   1580:
                   1581: #ifdef MALLOC_STATS
                   1582:        stats_chunks.curchunks -= (size / chunksize);
                   1583: #endif
                   1584:        malloc_mutex_unlock(&chunks_mtx);
                   1585: }
                   1586:
                   1587: /*
                   1588:  * End chunk management functions.
                   1589:  */
                   1590: /******************************************************************************/
                   1591: /*
                   1592:  * Begin arena.
                   1593:  */
                   1594:
                   1595: /*
1.17      ad       1596:  * Choose an arena based on a per-thread and (optimistically) per-CPU value.
                   1597:  *
                   1598:  * We maintain at least one block of arenas.  Usually there are more.
                   1599:  * The blocks are $ncpu arenas in size.  Whole blocks are 'hashed'
                   1600:  * amongst threads.  To accomplish this, next_arena advances only in
                   1601:  * ncpu steps.
1.1       ad       1602:  */
1.19      ad       1603: static __noinline arena_t *
                   1604: choose_arena_hard(void)
1.1       ad       1605: {
1.17      ad       1606:        unsigned i, curcpu;
                   1607:        arena_t **map;
1.1       ad       1608:
1.17      ad       1609:        /* Initialize the current block of arenas and advance to next. */
1.1       ad       1610:        malloc_mutex_lock(&arenas_mtx);
1.17      ad       1611:        assert(next_arena % ncpus == 0);
                   1612:        assert(narenas % ncpus == 0);
                   1613:        map = &arenas[next_arena];
                   1614:        set_arenas_map(map);
                   1615:        for (i = 0; i < ncpus; i++) {
                   1616:                if (arenas[next_arena] == NULL)
                   1617:                        arenas_extend(next_arena);
                   1618:                next_arena = (next_arena + 1) % narenas;
1.15      ad       1619:        }
1.1       ad       1620:        malloc_mutex_unlock(&arenas_mtx);
                   1621:
1.17      ad       1622:        /*
                   1623:         * If we were unable to allocate an arena above, then default to
                   1624:         * the first arena, which is always present.
                   1625:         */
                   1626:        curcpu = thr_curcpu();
                   1627:        if (map[curcpu] != NULL)
                   1628:                return map[curcpu];
                   1629:        return arenas[0];
1.1       ad       1630: }
                   1631:
1.19      ad       1632: static inline arena_t *
                   1633: choose_arena(void)
                   1634: {
                   1635:        unsigned curcpu;
                   1636:        arena_t **map;
                   1637:
                   1638:        map = get_arenas_map();
                   1639:        curcpu = thr_curcpu();
                   1640:        if (__predict_true(map != NULL && map[curcpu] != NULL))
                   1641:                return map[curcpu];
                   1642:
                   1643:         return choose_arena_hard();
                   1644: }
                   1645:
1.7       yamt     1646: #ifndef lint
1.1       ad       1647: static inline int
                   1648: arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
                   1649: {
                   1650:
                   1651:        assert(a != NULL);
                   1652:        assert(b != NULL);
                   1653:
1.34.2.3! martin   1654:        if (a->max_frun_npages < b->max_frun_npages)
        !          1655:                return -1;
        !          1656:        if (a->max_frun_npages > b->max_frun_npages)
        !          1657:                return 1;
        !          1658:
1.1       ad       1659:        if ((uintptr_t)a < (uintptr_t)b)
                   1660:                return (-1);
                   1661:        else if (a == b)
                   1662:                return (0);
                   1663:        else
                   1664:                return (1);
                   1665: }
                   1666:
                   1667: /* Generate red-black tree code for arena chunks. */
                   1668: RB_GENERATE_STATIC(arena_chunk_tree_s, arena_chunk_s, link, arena_chunk_comp);
1.2       ad       1669: #endif
1.1       ad       1670:
1.7       yamt     1671: #ifndef lint
1.1       ad       1672: static inline int
                   1673: arena_run_comp(arena_run_t *a, arena_run_t *b)
                   1674: {
                   1675:
                   1676:        assert(a != NULL);
                   1677:        assert(b != NULL);
                   1678:
                   1679:        if ((uintptr_t)a < (uintptr_t)b)
                   1680:                return (-1);
                   1681:        else if (a == b)
                   1682:                return (0);
                   1683:        else
                   1684:                return (1);
                   1685: }
                   1686:
                   1687: /* Generate red-black tree code for arena runs. */
                   1688: RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp);
1.2       ad       1689: #endif
1.1       ad       1690:
                   1691: static inline void *
                   1692: arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
                   1693: {
                   1694:        void *ret;
                   1695:        unsigned i, mask, bit, regind;
                   1696:
                   1697:        assert(run->magic == ARENA_RUN_MAGIC);
                   1698:        assert(run->regs_minelm < bin->regs_mask_nelms);
                   1699:
                   1700:        /*
                   1701:         * Move the first check outside the loop, so that run->regs_minelm can
                   1702:         * be updated unconditionally, without the possibility of updating it
                   1703:         * multiple times.
                   1704:         */
                   1705:        i = run->regs_minelm;
                   1706:        mask = run->regs_mask[i];
                   1707:        if (mask != 0) {
                   1708:                /* Usable allocation found. */
                   1709:                bit = ffs((int)mask) - 1;
                   1710:
                   1711:                regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
                   1712:                ret = (void *)(((uintptr_t)run) + bin->reg0_offset
                   1713:                    + (bin->reg_size * regind));
                   1714:
                   1715:                /* Clear bit. */
                   1716:                mask ^= (1 << bit);
                   1717:                run->regs_mask[i] = mask;
                   1718:
                   1719:                return (ret);
                   1720:        }
                   1721:
                   1722:        for (i++; i < bin->regs_mask_nelms; i++) {
                   1723:                mask = run->regs_mask[i];
                   1724:                if (mask != 0) {
                   1725:                        /* Usable allocation found. */
                   1726:                        bit = ffs((int)mask) - 1;
                   1727:
                   1728:                        regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
                   1729:                        ret = (void *)(((uintptr_t)run) + bin->reg0_offset
                   1730:                            + (bin->reg_size * regind));
                   1731:
                   1732:                        /* Clear bit. */
                   1733:                        mask ^= (1 << bit);
                   1734:                        run->regs_mask[i] = mask;
                   1735:
                   1736:                        /*
                   1737:                         * Make a note that nothing before this element
                   1738:                         * contains a free region.
                   1739:                         */
                   1740:                        run->regs_minelm = i; /* Low payoff: + (mask == 0); */
                   1741:
                   1742:                        return (ret);
                   1743:                }
                   1744:        }
                   1745:        /* Not reached. */
1.7       yamt     1746:        /* LINTED */
1.1       ad       1747:        assert(0);
                   1748:        return (NULL);
                   1749: }
                   1750:
                   1751: static inline void
                   1752: arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
                   1753: {
                   1754:        /*
                   1755:         * To divide by a number D that is not a power of two we multiply
                   1756:         * by (2^21 / D) and then right shift by 21 positions.
                   1757:         *
                   1758:         *   X / D
                   1759:         *
                   1760:         * becomes
                   1761:         *
                   1762:         *   (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
                   1763:         */
                   1764: #define SIZE_INV_SHIFT 21
                   1765: #define SIZE_INV(s) (((1 << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
                   1766:        static const unsigned size_invs[] = {
                   1767:            SIZE_INV(3),
                   1768:            SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
                   1769:            SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
                   1770:            SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
                   1771:            SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
                   1772:            SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
                   1773:            SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
                   1774:            SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
                   1775: #if (QUANTUM_2POW_MIN < 4)
                   1776:            ,
                   1777:            SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
                   1778:            SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
                   1779:            SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
                   1780:            SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
                   1781:            SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
                   1782:            SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
                   1783:            SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
                   1784:            SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
                   1785: #endif
                   1786:        };
                   1787:        unsigned diff, regind, elm, bit;
                   1788:
1.7       yamt     1789:        /* LINTED */
1.1       ad       1790:        assert(run->magic == ARENA_RUN_MAGIC);
                   1791:        assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3
                   1792:            >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
                   1793:
                   1794:        /*
                   1795:         * Avoid doing division with a variable divisor if possible.  Using
                   1796:         * actual division here can reduce allocator throughput by over 20%!
                   1797:         */
                   1798:        diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset);
                   1799:        if ((size & (size - 1)) == 0) {
                   1800:                /*
                   1801:                 * log2_table allows fast division of a power of two in the
                   1802:                 * [1..128] range.
                   1803:                 *
                   1804:                 * (x / divisor) becomes (x >> log2_table[divisor - 1]).
                   1805:                 */
                   1806:                static const unsigned char log2_table[] = {
                   1807:                    0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4,
                   1808:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
                   1809:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                   1810:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
                   1811:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                   1812:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                   1813:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                   1814:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
                   1815:                };
                   1816:
                   1817:                if (size <= 128)
                   1818:                        regind = (diff >> log2_table[size - 1]);
                   1819:                else if (size <= 32768)
                   1820:                        regind = diff >> (8 + log2_table[(size >> 8) - 1]);
                   1821:                else {
                   1822:                        /*
                   1823:                         * The page size is too large for us to use the lookup
                   1824:                         * table.  Use real division.
                   1825:                         */
1.9       christos 1826:                        regind = (unsigned)(diff / size);
1.1       ad       1827:                }
                   1828:        } else if (size <= ((sizeof(size_invs) / sizeof(unsigned))
                   1829:            << QUANTUM_2POW_MIN) + 2) {
                   1830:                regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
                   1831:                regind >>= SIZE_INV_SHIFT;
                   1832:        } else {
                   1833:                /*
                   1834:                 * size_invs isn't large enough to handle this size class, so
                   1835:                 * calculate regind using actual division.  This only happens
                   1836:                 * if the user increases small_max via the 'S' runtime
                   1837:                 * configuration option.
                   1838:                 */
1.9       christos 1839:                regind = (unsigned)(diff / size);
1.1       ad       1840:        };
                   1841:        assert(diff == regind * size);
                   1842:        assert(regind < bin->nregs);
                   1843:
                   1844:        elm = regind >> (SIZEOF_INT_2POW + 3);
                   1845:        if (elm < run->regs_minelm)
                   1846:                run->regs_minelm = elm;
                   1847:        bit = regind - (elm << (SIZEOF_INT_2POW + 3));
                   1848:        assert((run->regs_mask[elm] & (1 << bit)) == 0);
                   1849:        run->regs_mask[elm] |= (1 << bit);
                   1850: #undef SIZE_INV
                   1851: #undef SIZE_INV_SHIFT
                   1852: }
                   1853:
                   1854: static void
                   1855: arena_run_split(arena_t *arena, arena_run_t *run, size_t size)
                   1856: {
                   1857:        arena_chunk_t *chunk;
                   1858:        unsigned run_ind, map_offset, total_pages, need_pages, rem_pages;
                   1859:        unsigned i;
                   1860:
                   1861:        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
                   1862:        run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
                   1863:            >> pagesize_2pow);
                   1864:        total_pages = chunk->map[run_ind].npages;
1.9       christos 1865:        need_pages = (unsigned)(size >> pagesize_2pow);
1.1       ad       1866:        assert(need_pages <= total_pages);
                   1867:        rem_pages = total_pages - need_pages;
                   1868:
                   1869:        /* Split enough pages from the front of run to fit allocation size. */
                   1870:        map_offset = run_ind;
                   1871:        for (i = 0; i < need_pages; i++) {
                   1872:                chunk->map[map_offset + i].npages = need_pages;
                   1873:                chunk->map[map_offset + i].pos = i;
                   1874:        }
                   1875:
                   1876:        /* Keep track of trailing unused pages for later use. */
                   1877:        if (rem_pages > 0) {
                   1878:                /* Update map for trailing pages. */
                   1879:                map_offset += need_pages;
                   1880:                chunk->map[map_offset].npages = rem_pages;
                   1881:                chunk->map[map_offset].pos = POS_FREE;
                   1882:                chunk->map[map_offset + rem_pages - 1].npages = rem_pages;
                   1883:                chunk->map[map_offset + rem_pages - 1].pos = POS_FREE;
                   1884:        }
                   1885:
                   1886:        chunk->pages_used += need_pages;
                   1887: }
                   1888:
                   1889: static arena_chunk_t *
                   1890: arena_chunk_alloc(arena_t *arena)
                   1891: {
                   1892:        arena_chunk_t *chunk;
                   1893:
                   1894:        if (arena->spare != NULL) {
                   1895:                chunk = arena->spare;
                   1896:                arena->spare = NULL;
                   1897:
1.2       ad       1898:                /* LINTED */
1.1       ad       1899:                RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
                   1900:        } else {
                   1901:                chunk = (arena_chunk_t *)chunk_alloc(chunksize);
                   1902:                if (chunk == NULL)
                   1903:                        return (NULL);
                   1904: #ifdef MALLOC_STATS
                   1905:                arena->stats.mapped += chunksize;
                   1906: #endif
                   1907:
                   1908:                chunk->arena = arena;
                   1909:
                   1910:                /*
                   1911:                 * Claim that no pages are in use, since the header is merely
                   1912:                 * overhead.
                   1913:                 */
                   1914:                chunk->pages_used = 0;
                   1915:
                   1916:                chunk->max_frun_npages = chunk_npages -
                   1917:                    arena_chunk_header_npages;
                   1918:                chunk->min_frun_ind = arena_chunk_header_npages;
                   1919:
                   1920:                /*
                   1921:                 * Initialize enough of the map to support one maximal free run.
                   1922:                 */
                   1923:                chunk->map[arena_chunk_header_npages].npages = chunk_npages -
                   1924:                    arena_chunk_header_npages;
                   1925:                chunk->map[arena_chunk_header_npages].pos = POS_FREE;
                   1926:                chunk->map[chunk_npages - 1].npages = chunk_npages -
                   1927:                    arena_chunk_header_npages;
                   1928:                chunk->map[chunk_npages - 1].pos = POS_FREE;
1.34.2.2  snj      1929:
                   1930:                RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
1.1       ad       1931:        }
                   1932:
                   1933:        return (chunk);
                   1934: }
                   1935:
                   1936: static void
                   1937: arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
                   1938: {
                   1939:
                   1940:        /*
                   1941:         * Remove chunk from the chunk tree, regardless of whether this chunk
                   1942:         * will be cached, so that the arena does not use it.
                   1943:         */
1.2       ad       1944:        /* LINTED */
1.1       ad       1945:        RB_REMOVE(arena_chunk_tree_s, &chunk->arena->chunks, chunk);
                   1946:
                   1947:        if (opt_hint == false) {
                   1948:                if (arena->spare != NULL) {
                   1949:                        chunk_dealloc((void *)arena->spare, chunksize);
                   1950: #ifdef MALLOC_STATS
                   1951:                        arena->stats.mapped -= chunksize;
                   1952: #endif
                   1953:                }
                   1954:                arena->spare = chunk;
                   1955:        } else {
                   1956:                assert(arena->spare == NULL);
                   1957:                chunk_dealloc((void *)chunk, chunksize);
                   1958: #ifdef MALLOC_STATS
                   1959:                arena->stats.mapped -= chunksize;
                   1960: #endif
                   1961:        }
                   1962: }
                   1963:
                   1964: static arena_run_t *
                   1965: arena_run_alloc(arena_t *arena, size_t size)
                   1966: {
1.34.2.2  snj      1967:        arena_chunk_t *chunk, *chunk_tmp;
1.1       ad       1968:        arena_run_t *run;
1.34.2.2  snj      1969:        unsigned need_npages;
1.1       ad       1970:
                   1971:        assert(size <= (chunksize - (arena_chunk_header_npages <<
                   1972:            pagesize_2pow)));
                   1973:        assert((size & pagesize_mask) == 0);
                   1974:
                   1975:        /*
1.34.2.2  snj      1976:         * Search through the arena chunk tree for a large enough free run.
                   1977:         * Tree order ensures that any exact fit is picked immediately or
                   1978:         * otherwise the lowest address of the next size.
1.1       ad       1979:         */
1.9       christos 1980:        need_npages = (unsigned)(size >> pagesize_2pow);
1.2       ad       1981:        /* LINTED */
1.34.2.2  snj      1982:        for (;;) {
                   1983:                chunk_tmp = RB_ROOT(&arena->chunks);
                   1984:                chunk = NULL;
                   1985:                while (chunk_tmp) {
                   1986:                        if (chunk_tmp->max_frun_npages == need_npages) {
                   1987:                                chunk = chunk_tmp;
                   1988:                                break;
                   1989:                        }
                   1990:                        if (chunk_tmp->max_frun_npages < need_npages) {
                   1991:                                chunk_tmp = RB_RIGHT(chunk_tmp, link);
                   1992:                                continue;
                   1993:                        }
                   1994:                        chunk = chunk_tmp;
                   1995:                        chunk_tmp = RB_LEFT(chunk, link);
                   1996:                }
                   1997:                if (chunk == NULL)
                   1998:                        break;
1.1       ad       1999:                /*
1.34.2.2  snj      2000:                 * At this point, the chunk must have a cached run size large
                   2001:                 * enough to fit the allocation.
1.1       ad       2002:                 */
1.34.2.2  snj      2003:                assert(need_npages <= chunk->max_frun_npages);
                   2004:                {
1.1       ad       2005:                        arena_chunk_map_t *mapelm;
                   2006:                        unsigned i;
                   2007:                        unsigned max_frun_npages = 0;
                   2008:                        unsigned min_frun_ind = chunk_npages;
                   2009:
                   2010:                        assert(chunk->min_frun_ind >=
                   2011:                            arena_chunk_header_npages);
                   2012:                        for (i = chunk->min_frun_ind; i < chunk_npages;) {
                   2013:                                mapelm = &chunk->map[i];
                   2014:                                if (mapelm->pos == POS_FREE) {
                   2015:                                        if (mapelm->npages >= need_npages) {
                   2016:                                                run = (arena_run_t *)
                   2017:                                                    ((uintptr_t)chunk + (i <<
                   2018:                                                    pagesize_2pow));
                   2019:                                                /* Update page map. */
                   2020:                                                arena_run_split(arena, run,
                   2021:                                                    size);
                   2022:                                                return (run);
                   2023:                                        }
                   2024:                                        if (mapelm->npages >
                   2025:                                            max_frun_npages) {
                   2026:                                                max_frun_npages =
                   2027:                                                    mapelm->npages;
                   2028:                                        }
                   2029:                                        if (i < min_frun_ind) {
                   2030:                                                min_frun_ind = i;
                   2031:                                                if (i < chunk->min_frun_ind)
                   2032:                                                        chunk->min_frun_ind = i;
                   2033:                                        }
                   2034:                                }
                   2035:                                i += mapelm->npages;
                   2036:                        }
                   2037:                        /*
                   2038:                         * Search failure.  Reset cached chunk->max_frun_npages.
                   2039:                         * chunk->min_frun_ind was already reset above (if
                   2040:                         * necessary).
                   2041:                         */
1.34.2.2  snj      2042:                        RB_REMOVE(arena_chunk_tree_s, &arena->chunks, chunk);
1.1       ad       2043:                        chunk->max_frun_npages = max_frun_npages;
1.34.2.2  snj      2044:                        RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
1.1       ad       2045:                }
                   2046:        }
                   2047:
                   2048:        /*
                   2049:         * No usable runs.  Create a new chunk from which to allocate the run.
                   2050:         */
                   2051:        chunk = arena_chunk_alloc(arena);
                   2052:        if (chunk == NULL)
                   2053:                return (NULL);
                   2054:        run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
                   2055:            pagesize_2pow));
                   2056:        /* Update page map. */
                   2057:        arena_run_split(arena, run, size);
                   2058:        return (run);
                   2059: }
                   2060:
                   2061: static void
                   2062: arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size)
                   2063: {
                   2064:        arena_chunk_t *chunk;
                   2065:        unsigned run_ind, run_pages;
                   2066:
                   2067:        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
                   2068:
                   2069:        run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
                   2070:            >> pagesize_2pow);
                   2071:        assert(run_ind >= arena_chunk_header_npages);
                   2072:        assert(run_ind < (chunksize >> pagesize_2pow));
1.9       christos 2073:        run_pages = (unsigned)(size >> pagesize_2pow);
1.1       ad       2074:        assert(run_pages == chunk->map[run_ind].npages);
                   2075:
                   2076:        /* Subtract pages from count of pages used in chunk. */
                   2077:        chunk->pages_used -= run_pages;
                   2078:
                   2079:        /* Mark run as deallocated. */
                   2080:        assert(chunk->map[run_ind].npages == run_pages);
                   2081:        chunk->map[run_ind].pos = POS_FREE;
                   2082:        assert(chunk->map[run_ind + run_pages - 1].npages == run_pages);
                   2083:        chunk->map[run_ind + run_pages - 1].pos = POS_FREE;
                   2084:
                   2085:        /*
                   2086:         * Tell the kernel that we don't need the data in this run, but only if
                   2087:         * requested via runtime configuration.
                   2088:         */
                   2089:        if (opt_hint)
                   2090:                madvise(run, size, MADV_FREE);
                   2091:
                   2092:        /* Try to coalesce with neighboring runs. */
                   2093:        if (run_ind > arena_chunk_header_npages &&
                   2094:            chunk->map[run_ind - 1].pos == POS_FREE) {
                   2095:                unsigned prev_npages;
                   2096:
                   2097:                /* Coalesce with previous run. */
                   2098:                prev_npages = chunk->map[run_ind - 1].npages;
                   2099:                run_ind -= prev_npages;
                   2100:                assert(chunk->map[run_ind].npages == prev_npages);
                   2101:                assert(chunk->map[run_ind].pos == POS_FREE);
                   2102:                run_pages += prev_npages;
                   2103:
                   2104:                chunk->map[run_ind].npages = run_pages;
                   2105:                assert(chunk->map[run_ind].pos == POS_FREE);
                   2106:                chunk->map[run_ind + run_pages - 1].npages = run_pages;
                   2107:                assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
                   2108:        }
                   2109:
                   2110:        if (run_ind + run_pages < chunk_npages &&
                   2111:            chunk->map[run_ind + run_pages].pos == POS_FREE) {
                   2112:                unsigned next_npages;
                   2113:
                   2114:                /* Coalesce with next run. */
                   2115:                next_npages = chunk->map[run_ind + run_pages].npages;
                   2116:                run_pages += next_npages;
                   2117:                assert(chunk->map[run_ind + run_pages - 1].npages ==
                   2118:                    next_npages);
                   2119:                assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
                   2120:
                   2121:                chunk->map[run_ind].npages = run_pages;
                   2122:                chunk->map[run_ind].pos = POS_FREE;
                   2123:                chunk->map[run_ind + run_pages - 1].npages = run_pages;
                   2124:                assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
                   2125:        }
                   2126:
1.34.2.2  snj      2127:        if (chunk->map[run_ind].npages > chunk->max_frun_npages) {
                   2128:                RB_REMOVE(arena_chunk_tree_s, &arena->chunks, chunk);
1.1       ad       2129:                chunk->max_frun_npages = chunk->map[run_ind].npages;
1.34.2.2  snj      2130:                RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
                   2131:        }
1.1       ad       2132:        if (run_ind < chunk->min_frun_ind)
                   2133:                chunk->min_frun_ind = run_ind;
                   2134:
                   2135:        /* Deallocate chunk if it is now completely unused. */
                   2136:        if (chunk->pages_used == 0)
                   2137:                arena_chunk_dealloc(arena, chunk);
                   2138: }
                   2139:
                   2140: static arena_run_t *
                   2141: arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
                   2142: {
                   2143:        arena_run_t *run;
                   2144:        unsigned i, remainder;
                   2145:
                   2146:        /* Look for a usable run. */
1.2       ad       2147:        /* LINTED */
1.1       ad       2148:        if ((run = RB_MIN(arena_run_tree_s, &bin->runs)) != NULL) {
                   2149:                /* run is guaranteed to have available space. */
1.2       ad       2150:                /* LINTED */
1.1       ad       2151:                RB_REMOVE(arena_run_tree_s, &bin->runs, run);
                   2152: #ifdef MALLOC_STATS
                   2153:                bin->stats.reruns++;
                   2154: #endif
                   2155:                return (run);
                   2156:        }
                   2157:        /* No existing runs have any space available. */
                   2158:
                   2159:        /* Allocate a new run. */
                   2160:        run = arena_run_alloc(arena, bin->run_size);
                   2161:        if (run == NULL)
                   2162:                return (NULL);
                   2163:
                   2164:        /* Initialize run internals. */
                   2165:        run->bin = bin;
                   2166:
                   2167:        for (i = 0; i < bin->regs_mask_nelms; i++)
                   2168:                run->regs_mask[i] = UINT_MAX;
                   2169:        remainder = bin->nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1);
                   2170:        if (remainder != 0) {
                   2171:                /* The last element has spare bits that need to be unset. */
                   2172:                run->regs_mask[i] = (UINT_MAX >> ((1 << (SIZEOF_INT_2POW + 3))
                   2173:                    - remainder));
                   2174:        }
                   2175:
                   2176:        run->regs_minelm = 0;
                   2177:
                   2178:        run->nfree = bin->nregs;
                   2179: #ifdef MALLOC_DEBUG
                   2180:        run->magic = ARENA_RUN_MAGIC;
                   2181: #endif
                   2182:
                   2183: #ifdef MALLOC_STATS
                   2184:        bin->stats.nruns++;
                   2185:        bin->stats.curruns++;
                   2186:        if (bin->stats.curruns > bin->stats.highruns)
                   2187:                bin->stats.highruns = bin->stats.curruns;
                   2188: #endif
                   2189:        return (run);
                   2190: }
                   2191:
                   2192: /* bin->runcur must have space available before this function is called. */
                   2193: static inline void *
                   2194: arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
                   2195: {
                   2196:        void *ret;
                   2197:
                   2198:        assert(run->magic == ARENA_RUN_MAGIC);
                   2199:        assert(run->nfree > 0);
                   2200:
                   2201:        ret = arena_run_reg_alloc(run, bin);
                   2202:        assert(ret != NULL);
                   2203:        run->nfree--;
                   2204:
                   2205:        return (ret);
                   2206: }
                   2207:
                   2208: /* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
                   2209: static void *
                   2210: arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
                   2211: {
                   2212:
                   2213:        bin->runcur = arena_bin_nonfull_run_get(arena, bin);
                   2214:        if (bin->runcur == NULL)
                   2215:                return (NULL);
                   2216:        assert(bin->runcur->magic == ARENA_RUN_MAGIC);
                   2217:        assert(bin->runcur->nfree > 0);
                   2218:
                   2219:        return (arena_bin_malloc_easy(arena, bin, bin->runcur));
                   2220: }
                   2221:
                   2222: /*
                   2223:  * Calculate bin->run_size such that it meets the following constraints:
                   2224:  *
                   2225:  *   *) bin->run_size >= min_run_size
                   2226:  *   *) bin->run_size <= arena_maxclass
                   2227:  *   *) bin->run_size <= RUN_MAX_SMALL
                   2228:  *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
                   2229:  *
                   2230:  * bin->nregs, bin->regs_mask_nelms, and bin->reg0_offset are
                   2231:  * also calculated here, since these settings are all interdependent.
                   2232:  */
                   2233: static size_t
                   2234: arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
                   2235: {
                   2236:        size_t try_run_size, good_run_size;
                   2237:        unsigned good_nregs, good_mask_nelms, good_reg0_offset;
                   2238:        unsigned try_nregs, try_mask_nelms, try_reg0_offset;
                   2239:
                   2240:        assert(min_run_size >= pagesize);
                   2241:        assert(min_run_size <= arena_maxclass);
                   2242:        assert(min_run_size <= RUN_MAX_SMALL);
                   2243:
                   2244:        /*
                   2245:         * Calculate known-valid settings before entering the run_size
                   2246:         * expansion loop, so that the first part of the loop always copies
                   2247:         * valid settings.
                   2248:         *
                   2249:         * The do..while loop iteratively reduces the number of regions until
                   2250:         * the run header and the regions no longer overlap.  A closed formula
                   2251:         * would be quite messy, since there is an interdependency between the
                   2252:         * header's mask length and the number of regions.
                   2253:         */
                   2254:        try_run_size = min_run_size;
1.9       christos 2255:        try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) /
1.22      njoly    2256:            bin->reg_size) + 1); /* Counter-act try_nregs-- in loop. */
1.1       ad       2257:        do {
                   2258:                try_nregs--;
                   2259:                try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
                   2260:                    ((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
1.9       christos 2261:                try_reg0_offset = (unsigned)(try_run_size -
                   2262:                    (try_nregs * bin->reg_size));
1.1       ad       2263:        } while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))
                   2264:            > try_reg0_offset);
                   2265:
                   2266:        /* run_size expansion loop. */
                   2267:        do {
                   2268:                /*
                   2269:                 * Copy valid settings before trying more aggressive settings.
                   2270:                 */
                   2271:                good_run_size = try_run_size;
                   2272:                good_nregs = try_nregs;
                   2273:                good_mask_nelms = try_mask_nelms;
                   2274:                good_reg0_offset = try_reg0_offset;
                   2275:
                   2276:                /* Try more aggressive settings. */
                   2277:                try_run_size += pagesize;
1.9       christos 2278:                try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) /
                   2279:                    bin->reg_size) + 1); /* Counter-act try_nregs-- in loop. */
1.1       ad       2280:                do {
                   2281:                        try_nregs--;
                   2282:                        try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
                   2283:                            ((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ?
                   2284:                            1 : 0);
1.9       christos 2285:                        try_reg0_offset = (unsigned)(try_run_size - (try_nregs *
                   2286:                            bin->reg_size));
1.1       ad       2287:                } while (sizeof(arena_run_t) + (sizeof(unsigned) *
                   2288:                    (try_mask_nelms - 1)) > try_reg0_offset);
                   2289:        } while (try_run_size <= arena_maxclass && try_run_size <= RUN_MAX_SMALL
1.22      njoly    2290:            && RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX
                   2291:            && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
1.1       ad       2292:
                   2293:        assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
                   2294:            <= good_reg0_offset);
                   2295:        assert((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
                   2296:
                   2297:        /* Copy final settings. */
                   2298:        bin->run_size = good_run_size;
                   2299:        bin->nregs = good_nregs;
                   2300:        bin->regs_mask_nelms = good_mask_nelms;
                   2301:        bin->reg0_offset = good_reg0_offset;
                   2302:
                   2303:        return (good_run_size);
                   2304: }
                   2305:
                   2306: static void *
                   2307: arena_malloc(arena_t *arena, size_t size)
                   2308: {
                   2309:        void *ret;
                   2310:
                   2311:        assert(arena != NULL);
                   2312:        assert(arena->magic == ARENA_MAGIC);
                   2313:        assert(size != 0);
                   2314:        assert(QUANTUM_CEILING(size) <= arena_maxclass);
                   2315:
                   2316:        if (size <= bin_maxclass) {
                   2317:                arena_bin_t *bin;
                   2318:                arena_run_t *run;
                   2319:
                   2320:                /* Small allocation. */
                   2321:
                   2322:                if (size < small_min) {
                   2323:                        /* Tiny. */
                   2324:                        size = pow2_ceil(size);
                   2325:                        bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
                   2326:                            1)))];
                   2327: #if (!defined(NDEBUG) || defined(MALLOC_STATS))
                   2328:                        /*
                   2329:                         * Bin calculation is always correct, but we may need
                   2330:                         * to fix size for the purposes of assertions and/or
                   2331:                         * stats accuracy.
                   2332:                         */
                   2333:                        if (size < (1 << TINY_MIN_2POW))
                   2334:                                size = (1 << TINY_MIN_2POW);
                   2335: #endif
                   2336:                } else if (size <= small_max) {
                   2337:                        /* Quantum-spaced. */
                   2338:                        size = QUANTUM_CEILING(size);
                   2339:                        bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
                   2340:                            - 1];
                   2341:                } else {
                   2342:                        /* Sub-page. */
                   2343:                        size = pow2_ceil(size);
                   2344:                        bin = &arena->bins[ntbins + nqbins
                   2345:                            + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
                   2346:                }
                   2347:                assert(size == bin->reg_size);
                   2348:
                   2349:                malloc_mutex_lock(&arena->mtx);
                   2350:                if ((run = bin->runcur) != NULL && run->nfree > 0)
                   2351:                        ret = arena_bin_malloc_easy(arena, bin, run);
                   2352:                else
                   2353:                        ret = arena_bin_malloc_hard(arena, bin);
                   2354:
                   2355:                if (ret == NULL) {
                   2356:                        malloc_mutex_unlock(&arena->mtx);
                   2357:                        return (NULL);
                   2358:                }
                   2359:
                   2360: #ifdef MALLOC_STATS
                   2361:                bin->stats.nrequests++;
                   2362:                arena->stats.nmalloc_small++;
                   2363:                arena->stats.allocated_small += size;
                   2364: #endif
                   2365:        } else {
                   2366:                /* Large allocation. */
                   2367:                size = PAGE_CEILING(size);
                   2368:                malloc_mutex_lock(&arena->mtx);
                   2369:                ret = (void *)arena_run_alloc(arena, size);
                   2370:                if (ret == NULL) {
                   2371:                        malloc_mutex_unlock(&arena->mtx);
                   2372:                        return (NULL);
                   2373:                }
                   2374: #ifdef MALLOC_STATS
                   2375:                arena->stats.nmalloc_large++;
                   2376:                arena->stats.allocated_large += size;
                   2377: #endif
                   2378:        }
                   2379:
                   2380:        malloc_mutex_unlock(&arena->mtx);
                   2381:
                   2382:        if (opt_junk)
                   2383:                memset(ret, 0xa5, size);
                   2384:        else if (opt_zero)
                   2385:                memset(ret, 0, size);
                   2386:        return (ret);
                   2387: }
                   2388:
                   2389: static inline void
                   2390: arena_palloc_trim(arena_t *arena, arena_chunk_t *chunk, unsigned pageind,
                   2391:     unsigned npages)
                   2392: {
                   2393:        unsigned i;
                   2394:
                   2395:        assert(npages > 0);
                   2396:
                   2397:        /*
                   2398:         * Modifiy the map such that arena_run_dalloc() sees the run as
                   2399:         * separately allocated.
                   2400:         */
                   2401:        for (i = 0; i < npages; i++) {
                   2402:                chunk->map[pageind + i].npages = npages;
                   2403:                chunk->map[pageind + i].pos = i;
                   2404:        }
                   2405:        arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)chunk + (pageind <<
                   2406:            pagesize_2pow)), npages << pagesize_2pow);
                   2407: }
                   2408:
                   2409: /* Only handles large allocations that require more than page alignment. */
                   2410: static void *
                   2411: arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
                   2412: {
                   2413:        void *ret;
                   2414:        size_t offset;
                   2415:        arena_chunk_t *chunk;
                   2416:        unsigned pageind, i, npages;
                   2417:
                   2418:        assert((size & pagesize_mask) == 0);
                   2419:        assert((alignment & pagesize_mask) == 0);
                   2420:
1.9       christos 2421:        npages = (unsigned)(size >> pagesize_2pow);
1.1       ad       2422:
                   2423:        malloc_mutex_lock(&arena->mtx);
                   2424:        ret = (void *)arena_run_alloc(arena, alloc_size);
                   2425:        if (ret == NULL) {
                   2426:                malloc_mutex_unlock(&arena->mtx);
                   2427:                return (NULL);
                   2428:        }
                   2429:
                   2430:        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
                   2431:
                   2432:        offset = (uintptr_t)ret & (alignment - 1);
                   2433:        assert((offset & pagesize_mask) == 0);
                   2434:        assert(offset < alloc_size);
                   2435:        if (offset == 0) {
1.9       christos 2436:                pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >>
1.1       ad       2437:                    pagesize_2pow);
                   2438:
                   2439:                /* Update the map for the run to be kept. */
                   2440:                for (i = 0; i < npages; i++) {
                   2441:                        chunk->map[pageind + i].npages = npages;
                   2442:                        assert(chunk->map[pageind + i].pos == i);
                   2443:                }
                   2444:
                   2445:                /* Trim trailing space. */
                   2446:                arena_palloc_trim(arena, chunk, pageind + npages,
1.9       christos 2447:                    (unsigned)((alloc_size - size) >> pagesize_2pow));
1.1       ad       2448:        } else {
                   2449:                size_t leadsize, trailsize;
                   2450:
                   2451:                leadsize = alignment - offset;
                   2452:                ret = (void *)((uintptr_t)ret + leadsize);
1.9       christos 2453:                pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >>
1.1       ad       2454:                    pagesize_2pow);
                   2455:
                   2456:                /* Update the map for the run to be kept. */
                   2457:                for (i = 0; i < npages; i++) {
                   2458:                        chunk->map[pageind + i].npages = npages;
                   2459:                        chunk->map[pageind + i].pos = i;
                   2460:                }
                   2461:
                   2462:                /* Trim leading space. */
1.9       christos 2463:                arena_palloc_trim(arena, chunk,
                   2464:                    (unsigned)(pageind - (leadsize >> pagesize_2pow)),
                   2465:                    (unsigned)(leadsize >> pagesize_2pow));
1.1       ad       2466:
                   2467:                trailsize = alloc_size - leadsize - size;
                   2468:                if (trailsize != 0) {
                   2469:                        /* Trim trailing space. */
                   2470:                        assert(trailsize < alloc_size);
                   2471:                        arena_palloc_trim(arena, chunk, pageind + npages,
1.9       christos 2472:                            (unsigned)(trailsize >> pagesize_2pow));
1.1       ad       2473:                }
                   2474:        }
                   2475:
                   2476: #ifdef MALLOC_STATS
                   2477:        arena->stats.nmalloc_large++;
                   2478:        arena->stats.allocated_large += size;
                   2479: #endif
                   2480:        malloc_mutex_unlock(&arena->mtx);
                   2481:
                   2482:        if (opt_junk)
                   2483:                memset(ret, 0xa5, size);
                   2484:        else if (opt_zero)
                   2485:                memset(ret, 0, size);
                   2486:        return (ret);
                   2487: }
                   2488:
                   2489: /* Return the size of the allocation pointed to by ptr. */
                   2490: static size_t
                   2491: arena_salloc(const void *ptr)
                   2492: {
                   2493:        size_t ret;
                   2494:        arena_chunk_t *chunk;
                   2495:        arena_chunk_map_t *mapelm;
                   2496:        unsigned pageind;
                   2497:
                   2498:        assert(ptr != NULL);
                   2499:        assert(CHUNK_ADDR2BASE(ptr) != ptr);
                   2500:
                   2501:        /*
                   2502:         * No arena data structures that we query here can change in a way that
                   2503:         * affects this function, so we don't need to lock.
                   2504:         */
                   2505:        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1.9       christos 2506:        pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >>
                   2507:            pagesize_2pow);
1.1       ad       2508:        mapelm = &chunk->map[pageind];
1.10      simonb   2509:        if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind <<
                   2510:            pagesize_2pow)) {
                   2511:                arena_run_t *run;
1.1       ad       2512:
                   2513:                pageind -= mapelm->pos;
                   2514:
1.10      simonb   2515:                run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
                   2516:                    pagesize_2pow));
1.1       ad       2517:                assert(run->magic == ARENA_RUN_MAGIC);
                   2518:                ret = run->bin->reg_size;
                   2519:        } else
                   2520:                ret = mapelm->npages << pagesize_2pow;
                   2521:
                   2522:        return (ret);
                   2523: }
                   2524:
                   2525: static void *
                   2526: arena_ralloc(void *ptr, size_t size, size_t oldsize)
                   2527: {
                   2528:        void *ret;
                   2529:
                   2530:        /* Avoid moving the allocation if the size class would not change. */
                   2531:        if (size < small_min) {
                   2532:                if (oldsize < small_min &&
                   2533:                    ffs((int)(pow2_ceil(size) >> (TINY_MIN_2POW + 1)))
                   2534:                    == ffs((int)(pow2_ceil(oldsize) >> (TINY_MIN_2POW + 1))))
                   2535:                        goto IN_PLACE;
                   2536:        } else if (size <= small_max) {
                   2537:                if (oldsize >= small_min && oldsize <= small_max &&
                   2538:                    (QUANTUM_CEILING(size) >> opt_quantum_2pow)
                   2539:                    == (QUANTUM_CEILING(oldsize) >> opt_quantum_2pow))
                   2540:                        goto IN_PLACE;
                   2541:        } else {
                   2542:                /*
                   2543:                 * We make no attempt to resize runs here, though it would be
                   2544:                 * possible to do so.
                   2545:                 */
                   2546:                if (oldsize > small_max && PAGE_CEILING(size) == oldsize)
                   2547:                        goto IN_PLACE;
                   2548:        }
                   2549:
                   2550:        /*
                   2551:         * If we get here, then size and oldsize are different enough that we
                   2552:         * need to use a different size class.  In that case, fall back to
                   2553:         * allocating new space and copying.
                   2554:         */
                   2555:        ret = arena_malloc(choose_arena(), size);
                   2556:        if (ret == NULL)
                   2557:                return (NULL);
                   2558:
                   2559:        /* Junk/zero-filling were already done by arena_malloc(). */
                   2560:        if (size < oldsize)
                   2561:                memcpy(ret, ptr, size);
                   2562:        else
                   2563:                memcpy(ret, ptr, oldsize);
                   2564:        idalloc(ptr);
                   2565:        return (ret);
                   2566: IN_PLACE:
                   2567:        if (opt_junk && size < oldsize)
                   2568:                memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size);
                   2569:        else if (opt_zero && size > oldsize)
                   2570:                memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
                   2571:        return (ptr);
                   2572: }
                   2573:
                   2574: static void
                   2575: arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
                   2576: {
                   2577:        unsigned pageind;
                   2578:        arena_chunk_map_t *mapelm;
                   2579:        size_t size;
                   2580:
                   2581:        assert(arena != NULL);
                   2582:        assert(arena->magic == ARENA_MAGIC);
                   2583:        assert(chunk->arena == arena);
                   2584:        assert(ptr != NULL);
                   2585:        assert(CHUNK_ADDR2BASE(ptr) != ptr);
                   2586:
1.9       christos 2587:        pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >>
                   2588:            pagesize_2pow);
1.1       ad       2589:        mapelm = &chunk->map[pageind];
1.10      simonb   2590:        if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind <<
                   2591:            pagesize_2pow)) {
                   2592:                arena_run_t *run;
1.1       ad       2593:                arena_bin_t *bin;
                   2594:
                   2595:                /* Small allocation. */
                   2596:
                   2597:                pageind -= mapelm->pos;
                   2598:
1.10      simonb   2599:                run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
                   2600:                    pagesize_2pow));
1.1       ad       2601:                assert(run->magic == ARENA_RUN_MAGIC);
                   2602:                bin = run->bin;
                   2603:                size = bin->reg_size;
                   2604:
                   2605:                if (opt_junk)
                   2606:                        memset(ptr, 0x5a, size);
                   2607:
                   2608:                malloc_mutex_lock(&arena->mtx);
                   2609:                arena_run_reg_dalloc(run, bin, ptr, size);
                   2610:                run->nfree++;
                   2611:
                   2612:                if (run->nfree == bin->nregs) {
                   2613:                        /* Deallocate run. */
                   2614:                        if (run == bin->runcur)
                   2615:                                bin->runcur = NULL;
                   2616:                        else if (bin->nregs != 1) {
                   2617:                                /*
                   2618:                                 * This block's conditional is necessary because
                   2619:                                 * if the run only contains one region, then it
                   2620:                                 * never gets inserted into the non-full runs
                   2621:                                 * tree.
                   2622:                                 */
1.2       ad       2623:                                /* LINTED */
1.1       ad       2624:                                RB_REMOVE(arena_run_tree_s, &bin->runs, run);
                   2625:                        }
                   2626: #ifdef MALLOC_DEBUG
                   2627:                        run->magic = 0;
                   2628: #endif
                   2629:                        arena_run_dalloc(arena, run, bin->run_size);
                   2630: #ifdef MALLOC_STATS
                   2631:                        bin->stats.curruns--;
                   2632: #endif
                   2633:                } else if (run->nfree == 1 && run != bin->runcur) {
                   2634:                        /*
                   2635:                         * Make sure that bin->runcur always refers to the
                   2636:                         * lowest non-full run, if one exists.
                   2637:                         */
                   2638:                        if (bin->runcur == NULL)
                   2639:                                bin->runcur = run;
                   2640:                        else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
                   2641:                                /* Switch runcur. */
                   2642:                                if (bin->runcur->nfree > 0) {
                   2643:                                        /* Insert runcur. */
1.2       ad       2644:                                        /* LINTED */
1.1       ad       2645:                                        RB_INSERT(arena_run_tree_s, &bin->runs,
                   2646:                                            bin->runcur);
                   2647:                                }
                   2648:                                bin->runcur = run;
1.2       ad       2649:                        } else {
                   2650:                                /* LINTED */
1.1       ad       2651:                                RB_INSERT(arena_run_tree_s, &bin->runs, run);
1.2       ad       2652:                        }
1.1       ad       2653:                }
                   2654: #ifdef MALLOC_STATS
                   2655:                arena->stats.allocated_small -= size;
                   2656:                arena->stats.ndalloc_small++;
                   2657: #endif
                   2658:        } else {
                   2659:                /* Large allocation. */
                   2660:
                   2661:                size = mapelm->npages << pagesize_2pow;
                   2662:                assert((((uintptr_t)ptr) & pagesize_mask) == 0);
                   2663:
                   2664:                if (opt_junk)
                   2665:                        memset(ptr, 0x5a, size);
                   2666:
                   2667:                malloc_mutex_lock(&arena->mtx);
                   2668:                arena_run_dalloc(arena, (arena_run_t *)ptr, size);
                   2669: #ifdef MALLOC_STATS
                   2670:                arena->stats.allocated_large -= size;
                   2671:                arena->stats.ndalloc_large++;
                   2672: #endif
                   2673:        }
                   2674:
                   2675:        malloc_mutex_unlock(&arena->mtx);
                   2676: }
                   2677:
                   2678: static bool
                   2679: arena_new(arena_t *arena)
                   2680: {
                   2681:        unsigned i;
                   2682:        arena_bin_t *bin;
1.2       ad       2683:        size_t prev_run_size;
1.1       ad       2684:
                   2685:        malloc_mutex_init(&arena->mtx);
                   2686:
                   2687: #ifdef MALLOC_STATS
                   2688:        memset(&arena->stats, 0, sizeof(arena_stats_t));
                   2689: #endif
                   2690:
                   2691:        /* Initialize chunks. */
                   2692:        RB_INIT(&arena->chunks);
                   2693:        arena->spare = NULL;
                   2694:
                   2695:        /* Initialize bins. */
                   2696:        prev_run_size = pagesize;
                   2697:
                   2698:        /* (2^n)-spaced tiny bins. */
                   2699:        for (i = 0; i < ntbins; i++) {
                   2700:                bin = &arena->bins[i];
                   2701:                bin->runcur = NULL;
                   2702:                RB_INIT(&bin->runs);
                   2703:
                   2704:                bin->reg_size = (1 << (TINY_MIN_2POW + i));
                   2705:                prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
                   2706:
                   2707: #ifdef MALLOC_STATS
                   2708:                memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
                   2709: #endif
                   2710:        }
                   2711:
                   2712:        /* Quantum-spaced bins. */
                   2713:        for (; i < ntbins + nqbins; i++) {
                   2714:                bin = &arena->bins[i];
                   2715:                bin->runcur = NULL;
                   2716:                RB_INIT(&bin->runs);
                   2717:
                   2718:                bin->reg_size = quantum * (i - ntbins + 1);
1.2       ad       2719: /*
1.1       ad       2720:                pow2_size = pow2_ceil(quantum * (i - ntbins + 1));
1.2       ad       2721: */
1.1       ad       2722:                prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
                   2723:
                   2724: #ifdef MALLOC_STATS
                   2725:                memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
                   2726: #endif
                   2727:        }
                   2728:
                   2729:        /* (2^n)-spaced sub-page bins. */
                   2730:        for (; i < ntbins + nqbins + nsbins; i++) {
                   2731:                bin = &arena->bins[i];
                   2732:                bin->runcur = NULL;
                   2733:                RB_INIT(&bin->runs);
                   2734:
                   2735:                bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
                   2736:
                   2737:                prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
                   2738:
                   2739: #ifdef MALLOC_STATS
                   2740:                memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
                   2741: #endif
                   2742:        }
                   2743:
                   2744: #ifdef MALLOC_DEBUG
                   2745:        arena->magic = ARENA_MAGIC;
                   2746: #endif
                   2747:
                   2748:        return (false);
                   2749: }
                   2750:
                   2751: /* Create a new arena and insert it into the arenas array at index ind. */
                   2752: static arena_t *
                   2753: arenas_extend(unsigned ind)
                   2754: {
                   2755:        arena_t *ret;
                   2756:
                   2757:        /* Allocate enough space for trailing bins. */
                   2758:        ret = (arena_t *)base_alloc(sizeof(arena_t)
                   2759:            + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
                   2760:        if (ret != NULL && arena_new(ret) == false) {
                   2761:                arenas[ind] = ret;
                   2762:                return (ret);
                   2763:        }
                   2764:        /* Only reached if there is an OOM error. */
                   2765:
                   2766:        /*
                   2767:         * OOM here is quite inconvenient to propagate, since dealing with it
                   2768:         * would require a check for failure in the fast path.  Instead, punt
                   2769:         * by using arenas[0].  In practice, this is an extremely unlikely
                   2770:         * failure.
                   2771:         */
1.16      christos 2772:        _malloc_message(getprogname(),
1.1       ad       2773:            ": (malloc) Error initializing arena\n", "", "");
                   2774:        if (opt_abort)
                   2775:                abort();
                   2776:
                   2777:        return (arenas[0]);
                   2778: }
                   2779:
                   2780: /*
                   2781:  * End arena.
                   2782:  */
                   2783: /******************************************************************************/
                   2784: /*
                   2785:  * Begin general internal functions.
                   2786:  */
                   2787:
                   2788: static void *
                   2789: huge_malloc(size_t size)
                   2790: {
                   2791:        void *ret;
                   2792:        size_t csize;
                   2793:        chunk_node_t *node;
                   2794:
                   2795:        /* Allocate one or more contiguous chunks for this request. */
                   2796:
                   2797:        csize = CHUNK_CEILING(size);
                   2798:        if (csize == 0) {
                   2799:                /* size is large enough to cause size_t wrap-around. */
                   2800:                return (NULL);
                   2801:        }
                   2802:
                   2803:        /* Allocate a chunk node with which to track the chunk. */
                   2804:        node = base_chunk_node_alloc();
                   2805:        if (node == NULL)
                   2806:                return (NULL);
                   2807:
                   2808:        ret = chunk_alloc(csize);
                   2809:        if (ret == NULL) {
                   2810:                base_chunk_node_dealloc(node);
                   2811:                return (NULL);
                   2812:        }
                   2813:
                   2814:        /* Insert node into huge. */
                   2815:        node->chunk = ret;
                   2816:        node->size = csize;
                   2817:
                   2818:        malloc_mutex_lock(&chunks_mtx);
                   2819:        RB_INSERT(chunk_tree_s, &huge, node);
                   2820: #ifdef MALLOC_STATS
                   2821:        huge_nmalloc++;
                   2822:        huge_allocated += csize;
                   2823: #endif
                   2824:        malloc_mutex_unlock(&chunks_mtx);
                   2825:
                   2826:        if (opt_junk)
                   2827:                memset(ret, 0xa5, csize);
                   2828:        else if (opt_zero)
                   2829:                memset(ret, 0, csize);
                   2830:
                   2831:        return (ret);
                   2832: }
                   2833:
                   2834: /* Only handles large allocations that require more than chunk alignment. */
                   2835: static void *
                   2836: huge_palloc(size_t alignment, size_t size)
                   2837: {
                   2838:        void *ret;
                   2839:        size_t alloc_size, chunk_size, offset;
                   2840:        chunk_node_t *node;
                   2841:
                   2842:        /*
                   2843:         * This allocation requires alignment that is even larger than chunk
                   2844:         * alignment.  This means that huge_malloc() isn't good enough.
                   2845:         *
                   2846:         * Allocate almost twice as many chunks as are demanded by the size or
                   2847:         * alignment, in order to assure the alignment can be achieved, then
                   2848:         * unmap leading and trailing chunks.
                   2849:         */
                   2850:        assert(alignment >= chunksize);
                   2851:
                   2852:        chunk_size = CHUNK_CEILING(size);
                   2853:
                   2854:        if (size >= alignment)
                   2855:                alloc_size = chunk_size + alignment - chunksize;
                   2856:        else
                   2857:                alloc_size = (alignment << 1) - chunksize;
                   2858:
                   2859:        /* Allocate a chunk node with which to track the chunk. */
                   2860:        node = base_chunk_node_alloc();
                   2861:        if (node == NULL)
                   2862:                return (NULL);
                   2863:
                   2864:        ret = chunk_alloc(alloc_size);
                   2865:        if (ret == NULL) {
                   2866:                base_chunk_node_dealloc(node);
                   2867:                return (NULL);
                   2868:        }
                   2869:
                   2870:        offset = (uintptr_t)ret & (alignment - 1);
                   2871:        assert((offset & chunksize_mask) == 0);
                   2872:        assert(offset < alloc_size);
                   2873:        if (offset == 0) {
                   2874:                /* Trim trailing space. */
                   2875:                chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
                   2876:                    - chunk_size);
                   2877:        } else {
                   2878:                size_t trailsize;
                   2879:
                   2880:                /* Trim leading space. */
                   2881:                chunk_dealloc(ret, alignment - offset);
                   2882:
                   2883:                ret = (void *)((uintptr_t)ret + (alignment - offset));
                   2884:
                   2885:                trailsize = alloc_size - (alignment - offset) - chunk_size;
                   2886:                if (trailsize != 0) {
                   2887:                    /* Trim trailing space. */
                   2888:                    assert(trailsize < alloc_size);
                   2889:                    chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
                   2890:                        trailsize);
                   2891:                }
                   2892:        }
                   2893:
                   2894:        /* Insert node into huge. */
                   2895:        node->chunk = ret;
                   2896:        node->size = chunk_size;
                   2897:
                   2898:        malloc_mutex_lock(&chunks_mtx);
                   2899:        RB_INSERT(chunk_tree_s, &huge, node);
                   2900: #ifdef MALLOC_STATS
                   2901:        huge_nmalloc++;
                   2902:        huge_allocated += chunk_size;
                   2903: #endif
                   2904:        malloc_mutex_unlock(&chunks_mtx);
                   2905:
                   2906:        if (opt_junk)
                   2907:                memset(ret, 0xa5, chunk_size);
                   2908:        else if (opt_zero)
                   2909:                memset(ret, 0, chunk_size);
                   2910:
                   2911:        return (ret);
                   2912: }
                   2913:
                   2914: static void *
                   2915: huge_ralloc(void *ptr, size_t size, size_t oldsize)
                   2916: {
                   2917:        void *ret;
                   2918:
                   2919:        /* Avoid moving the allocation if the size class would not change. */
                   2920:        if (oldsize > arena_maxclass &&
                   2921:            CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
                   2922:                if (opt_junk && size < oldsize) {
                   2923:                        memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
                   2924:                            - size);
                   2925:                } else if (opt_zero && size > oldsize) {
                   2926:                        memset((void *)((uintptr_t)ptr + oldsize), 0, size
                   2927:                            - oldsize);
                   2928:                }
                   2929:                return (ptr);
                   2930:        }
                   2931:
1.8       yamt     2932:        if (CHUNK_ADDR2BASE(ptr) == ptr
                   2933: #ifdef USE_BRK
                   2934:            && ((uintptr_t)ptr < (uintptr_t)brk_base
                   2935:            || (uintptr_t)ptr >= (uintptr_t)brk_max)
                   2936: #endif
                   2937:            ) {
                   2938:                chunk_node_t *node, key;
                   2939:                void *newptr;
                   2940:                size_t oldcsize;
                   2941:                size_t newcsize;
                   2942:
                   2943:                newcsize = CHUNK_CEILING(size);
                   2944:                oldcsize = CHUNK_CEILING(oldsize);
                   2945:                assert(oldcsize != newcsize);
                   2946:                if (newcsize == 0) {
                   2947:                        /* size_t wrap-around */
                   2948:                        return (NULL);
                   2949:                }
1.21      enami    2950:
                   2951:                /*
                   2952:                 * Remove the old region from the tree now.  If mremap()
                   2953:                 * returns the region to the system, other thread may
                   2954:                 * map it for same huge allocation and insert it to the
                   2955:                 * tree before we acquire the mutex lock again.
                   2956:                 */
                   2957:                malloc_mutex_lock(&chunks_mtx);
                   2958:                key.chunk = __DECONST(void *, ptr);
                   2959:                /* LINTED */
                   2960:                node = RB_FIND(chunk_tree_s, &huge, &key);
                   2961:                assert(node != NULL);
                   2962:                assert(node->chunk == ptr);
                   2963:                assert(node->size == oldcsize);
                   2964:                RB_REMOVE(chunk_tree_s, &huge, node);
                   2965:                malloc_mutex_unlock(&chunks_mtx);
                   2966:
1.8       yamt     2967:                newptr = mremap(ptr, oldcsize, NULL, newcsize,
                   2968:                    MAP_ALIGNED(chunksize_2pow));
1.21      enami    2969:                if (newptr == MAP_FAILED) {
                   2970:                        /* We still own the old region. */
                   2971:                        malloc_mutex_lock(&chunks_mtx);
                   2972:                        RB_INSERT(chunk_tree_s, &huge, node);
                   2973:                        malloc_mutex_unlock(&chunks_mtx);
                   2974:                } else {
1.8       yamt     2975:                        assert(CHUNK_ADDR2BASE(newptr) == newptr);
                   2976:
1.21      enami    2977:                        /* Insert new or resized old region. */
1.8       yamt     2978:                        malloc_mutex_lock(&chunks_mtx);
                   2979:                        node->size = newcsize;
1.21      enami    2980:                        node->chunk = newptr;
                   2981:                        RB_INSERT(chunk_tree_s, &huge, node);
1.8       yamt     2982: #ifdef MALLOC_STATS
                   2983:                        huge_nralloc++;
                   2984:                        huge_allocated += newcsize - oldcsize;
                   2985:                        if (newcsize > oldcsize) {
                   2986:                                stats_chunks.curchunks +=
                   2987:                                    (newcsize - oldcsize) / chunksize;
                   2988:                                if (stats_chunks.curchunks >
                   2989:                                    stats_chunks.highchunks)
                   2990:                                        stats_chunks.highchunks =
                   2991:                                            stats_chunks.curchunks;
                   2992:                        } else {
                   2993:                                stats_chunks.curchunks -=
                   2994:                                    (oldcsize - newcsize) / chunksize;
                   2995:                        }
                   2996: #endif
                   2997:                        malloc_mutex_unlock(&chunks_mtx);
                   2998:
                   2999:                        if (opt_junk && size < oldsize) {
                   3000:                                memset((void *)((uintptr_t)newptr + size), 0x5a,
                   3001:                                    newcsize - size);
                   3002:                        } else if (opt_zero && size > oldsize) {
                   3003:                                memset((void *)((uintptr_t)newptr + oldsize), 0,
                   3004:                                    size - oldsize);
                   3005:                        }
                   3006:                        return (newptr);
                   3007:                }
                   3008:        }
                   3009:
1.1       ad       3010:        /*
                   3011:         * If we get here, then size and oldsize are different enough that we
                   3012:         * need to use a different size class.  In that case, fall back to
                   3013:         * allocating new space and copying.
                   3014:         */
                   3015:        ret = huge_malloc(size);
                   3016:        if (ret == NULL)
                   3017:                return (NULL);
                   3018:
                   3019:        if (CHUNK_ADDR2BASE(ptr) == ptr) {
                   3020:                /* The old allocation is a chunk. */
                   3021:                if (size < oldsize)
                   3022:                        memcpy(ret, ptr, size);
                   3023:                else
                   3024:                        memcpy(ret, ptr, oldsize);
                   3025:        } else {
                   3026:                /* The old allocation is a region. */
                   3027:                assert(oldsize < size);
                   3028:                memcpy(ret, ptr, oldsize);
                   3029:        }
                   3030:        idalloc(ptr);
                   3031:        return (ret);
                   3032: }
                   3033:
                   3034: static void
                   3035: huge_dalloc(void *ptr)
                   3036: {
                   3037:        chunk_node_t key;
                   3038:        chunk_node_t *node;
                   3039:
                   3040:        malloc_mutex_lock(&chunks_mtx);
                   3041:
                   3042:        /* Extract from tree of huge allocations. */
                   3043:        key.chunk = ptr;
1.2       ad       3044:        /* LINTED */
1.1       ad       3045:        node = RB_FIND(chunk_tree_s, &huge, &key);
                   3046:        assert(node != NULL);
                   3047:        assert(node->chunk == ptr);
1.2       ad       3048:        /* LINTED */
1.1       ad       3049:        RB_REMOVE(chunk_tree_s, &huge, node);
                   3050:
                   3051: #ifdef MALLOC_STATS
                   3052:        huge_ndalloc++;
                   3053:        huge_allocated -= node->size;
                   3054: #endif
                   3055:
                   3056:        malloc_mutex_unlock(&chunks_mtx);
                   3057:
                   3058:        /* Unmap chunk. */
                   3059: #ifdef USE_BRK
                   3060:        if (opt_junk)
                   3061:                memset(node->chunk, 0x5a, node->size);
                   3062: #endif
                   3063:        chunk_dealloc(node->chunk, node->size);
                   3064:
                   3065:        base_chunk_node_dealloc(node);
                   3066: }
                   3067:
                   3068: static void *
                   3069: imalloc(size_t size)
                   3070: {
                   3071:        void *ret;
                   3072:
                   3073:        assert(size != 0);
                   3074:
                   3075:        if (size <= arena_maxclass)
                   3076:                ret = arena_malloc(choose_arena(), size);
                   3077:        else
                   3078:                ret = huge_malloc(size);
                   3079:
                   3080:        return (ret);
                   3081: }
                   3082:
                   3083: static void *
                   3084: ipalloc(size_t alignment, size_t size)
                   3085: {
                   3086:        void *ret;
                   3087:        size_t ceil_size;
                   3088:
                   3089:        /*
                   3090:         * Round size up to the nearest multiple of alignment.
                   3091:         *
                   3092:         * This done, we can take advantage of the fact that for each small
                   3093:         * size class, every object is aligned at the smallest power of two
                   3094:         * that is non-zero in the base two representation of the size.  For
                   3095:         * example:
                   3096:         *
                   3097:         *   Size |   Base 2 | Minimum alignment
                   3098:         *   -----+----------+------------------
                   3099:         *     96 |  1100000 |  32
                   3100:         *    144 | 10100000 |  32
                   3101:         *    192 | 11000000 |  64
                   3102:         *
                   3103:         * Depending on runtime settings, it is possible that arena_malloc()
                   3104:         * will further round up to a power of two, but that never causes
                   3105:         * correctness issues.
                   3106:         */
                   3107:        ceil_size = (size + (alignment - 1)) & (-alignment);
                   3108:        /*
                   3109:         * (ceil_size < size) protects against the combination of maximal
                   3110:         * alignment and size greater than maximal alignment.
                   3111:         */
                   3112:        if (ceil_size < size) {
                   3113:                /* size_t overflow. */
                   3114:                return (NULL);
                   3115:        }
                   3116:
                   3117:        if (ceil_size <= pagesize || (alignment <= pagesize
                   3118:            && ceil_size <= arena_maxclass))
                   3119:                ret = arena_malloc(choose_arena(), ceil_size);
                   3120:        else {
                   3121:                size_t run_size;
                   3122:
                   3123:                /*
                   3124:                 * We can't achieve sub-page alignment, so round up alignment
                   3125:                 * permanently; it makes later calculations simpler.
                   3126:                 */
                   3127:                alignment = PAGE_CEILING(alignment);
                   3128:                ceil_size = PAGE_CEILING(size);
                   3129:                /*
                   3130:                 * (ceil_size < size) protects against very large sizes within
                   3131:                 * pagesize of SIZE_T_MAX.
                   3132:                 *
                   3133:                 * (ceil_size + alignment < ceil_size) protects against the
                   3134:                 * combination of maximal alignment and ceil_size large enough
                   3135:                 * to cause overflow.  This is similar to the first overflow
                   3136:                 * check above, but it needs to be repeated due to the new
                   3137:                 * ceil_size value, which may now be *equal* to maximal
                   3138:                 * alignment, whereas before we only detected overflow if the
                   3139:                 * original size was *greater* than maximal alignment.
                   3140:                 */
                   3141:                if (ceil_size < size || ceil_size + alignment < ceil_size) {
                   3142:                        /* size_t overflow. */
                   3143:                        return (NULL);
                   3144:                }
                   3145:
                   3146:                /*
                   3147:                 * Calculate the size of the over-size run that arena_palloc()
                   3148:                 * would need to allocate in order to guarantee the alignment.
                   3149:                 */
                   3150:                if (ceil_size >= alignment)
                   3151:                        run_size = ceil_size + alignment - pagesize;
                   3152:                else {
                   3153:                        /*
                   3154:                         * It is possible that (alignment << 1) will cause
                   3155:                         * overflow, but it doesn't matter because we also
                   3156:                         * subtract pagesize, which in the case of overflow
                   3157:                         * leaves us with a very large run_size.  That causes
                   3158:                         * the first conditional below to fail, which means
                   3159:                         * that the bogus run_size value never gets used for
                   3160:                         * anything important.
                   3161:                         */
                   3162:                        run_size = (alignment << 1) - pagesize;
                   3163:                }
                   3164:
                   3165:                if (run_size <= arena_maxclass) {
                   3166:                        ret = arena_palloc(choose_arena(), alignment, ceil_size,
                   3167:                            run_size);
                   3168:                } else if (alignment <= chunksize)
                   3169:                        ret = huge_malloc(ceil_size);
                   3170:                else
                   3171:                        ret = huge_palloc(alignment, ceil_size);
                   3172:        }
                   3173:
                   3174:        assert(((uintptr_t)ret & (alignment - 1)) == 0);
                   3175:        return (ret);
                   3176: }
                   3177:
                   3178: static void *
                   3179: icalloc(size_t size)
                   3180: {
                   3181:        void *ret;
                   3182:
                   3183:        if (size <= arena_maxclass) {
                   3184:                ret = arena_malloc(choose_arena(), size);
                   3185:                if (ret == NULL)
                   3186:                        return (NULL);
                   3187:                memset(ret, 0, size);
                   3188:        } else {
                   3189:                /*
                   3190:                 * The virtual memory system provides zero-filled pages, so
                   3191:                 * there is no need to do so manually, unless opt_junk is
                   3192:                 * enabled, in which case huge_malloc() fills huge allocations
                   3193:                 * with junk.
                   3194:                 */
                   3195:                ret = huge_malloc(size);
                   3196:                if (ret == NULL)
                   3197:                        return (NULL);
                   3198:
                   3199:                if (opt_junk)
                   3200:                        memset(ret, 0, size);
                   3201: #ifdef USE_BRK
                   3202:                else if ((uintptr_t)ret >= (uintptr_t)brk_base
                   3203:                    && (uintptr_t)ret < (uintptr_t)brk_max) {
                   3204:                        /*
                   3205:                         * This may be a re-used brk chunk.  Therefore, zero
                   3206:                         * the memory.
                   3207:                         */
                   3208:                        memset(ret, 0, size);
                   3209:                }
                   3210: #endif
                   3211:        }
                   3212:
                   3213:        return (ret);
                   3214: }
                   3215:
                   3216: static size_t
                   3217: isalloc(const void *ptr)
                   3218: {
                   3219:        size_t ret;
                   3220:        arena_chunk_t *chunk;
                   3221:
                   3222:        assert(ptr != NULL);
                   3223:
                   3224:        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
                   3225:        if (chunk != ptr) {
                   3226:                /* Region. */
                   3227:                assert(chunk->arena->magic == ARENA_MAGIC);
                   3228:
                   3229:                ret = arena_salloc(ptr);
                   3230:        } else {
                   3231:                chunk_node_t *node, key;
                   3232:
                   3233:                /* Chunk (huge allocation). */
                   3234:
                   3235:                malloc_mutex_lock(&chunks_mtx);
                   3236:
                   3237:                /* Extract from tree of huge allocations. */
                   3238:                key.chunk = __DECONST(void *, ptr);
1.2       ad       3239:                /* LINTED */
1.1       ad       3240:                node = RB_FIND(chunk_tree_s, &huge, &key);
                   3241:                assert(node != NULL);
                   3242:
                   3243:                ret = node->size;
                   3244:
                   3245:                malloc_mutex_unlock(&chunks_mtx);
                   3246:        }
                   3247:
                   3248:        return (ret);
                   3249: }
                   3250:
                   3251: static void *
                   3252: iralloc(void *ptr, size_t size)
                   3253: {
                   3254:        void *ret;
                   3255:        size_t oldsize;
                   3256:
                   3257:        assert(ptr != NULL);
                   3258:        assert(size != 0);
                   3259:
                   3260:        oldsize = isalloc(ptr);
                   3261:
                   3262:        if (size <= arena_maxclass)
                   3263:                ret = arena_ralloc(ptr, size, oldsize);
                   3264:        else
                   3265:                ret = huge_ralloc(ptr, size, oldsize);
                   3266:
                   3267:        return (ret);
                   3268: }
                   3269:
                   3270: static void
                   3271: idalloc(void *ptr)
                   3272: {
                   3273:        arena_chunk_t *chunk;
                   3274:
                   3275:        assert(ptr != NULL);
                   3276:
                   3277:        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
                   3278:        if (chunk != ptr) {
                   3279:                /* Region. */
                   3280:                arena_dalloc(chunk->arena, chunk, ptr);
                   3281:        } else
                   3282:                huge_dalloc(ptr);
                   3283: }
                   3284:
                   3285: static void
                   3286: malloc_print_stats(void)
                   3287: {
                   3288:
                   3289:        if (opt_print_stats) {
                   3290:                char s[UMAX2S_BUFSIZE];
                   3291:                _malloc_message("___ Begin malloc statistics ___\n", "", "",
                   3292:                    "");
                   3293:                _malloc_message("Assertions ",
                   3294: #ifdef NDEBUG
                   3295:                    "disabled",
                   3296: #else
                   3297:                    "enabled",
                   3298: #endif
                   3299:                    "\n", "");
                   3300:                _malloc_message("Boolean MALLOC_OPTIONS: ",
                   3301:                    opt_abort ? "A" : "a",
                   3302:                    opt_junk ? "J" : "j",
                   3303:                    opt_hint ? "H" : "h");
                   3304:                _malloc_message(opt_utrace ? "PU" : "Pu",
                   3305:                    opt_sysv ? "V" : "v",
                   3306:                    opt_xmalloc ? "X" : "x",
                   3307:                    opt_zero ? "Z\n" : "z\n");
                   3308:
1.28      christos 3309:                _malloc_message("CPUs: ", size_t2s(ncpus, s), "\n", "");
                   3310:                _malloc_message("Max arenas: ", size_t2s(narenas, s), "\n", "");
                   3311:                _malloc_message("Pointer size: ", size_t2s(sizeof(void *), s),
1.1       ad       3312:                    "\n", "");
1.28      christos 3313:                _malloc_message("Quantum size: ", size_t2s(quantum, s), "\n", "");
                   3314:                _malloc_message("Max small size: ", size_t2s(small_max, s), "\n",
1.1       ad       3315:                    "");
                   3316:
1.28      christos 3317:                _malloc_message("Chunk size: ", size_t2s(chunksize, s), "", "");
                   3318:                _malloc_message(" (2^", size_t2s((size_t)opt_chunk_2pow, s),
1.27      matt     3319:                    ")\n", "");
1.1       ad       3320:
                   3321: #ifdef MALLOC_STATS
                   3322:                {
                   3323:                        size_t allocated, mapped;
                   3324:                        unsigned i;
                   3325:                        arena_t *arena;
                   3326:
                   3327:                        /* Calculate and print allocated/mapped stats. */
                   3328:
                   3329:                        /* arenas. */
                   3330:                        for (i = 0, allocated = 0; i < narenas; i++) {
                   3331:                                if (arenas[i] != NULL) {
                   3332:                                        malloc_mutex_lock(&arenas[i]->mtx);
                   3333:                                        allocated +=
                   3334:                                            arenas[i]->stats.allocated_small;
                   3335:                                        allocated +=
                   3336:                                            arenas[i]->stats.allocated_large;
                   3337:                                        malloc_mutex_unlock(&arenas[i]->mtx);
                   3338:                                }
                   3339:                        }
                   3340:
                   3341:                        /* huge/base. */
                   3342:                        malloc_mutex_lock(&chunks_mtx);
                   3343:                        allocated += huge_allocated;
                   3344:                        mapped = stats_chunks.curchunks * chunksize;
                   3345:                        malloc_mutex_unlock(&chunks_mtx);
                   3346:
                   3347:                        malloc_mutex_lock(&base_mtx);
                   3348:                        mapped += base_mapped;
                   3349:                        malloc_mutex_unlock(&base_mtx);
                   3350:
                   3351:                        malloc_printf("Allocated: %zu, mapped: %zu\n",
                   3352:                            allocated, mapped);
                   3353:
                   3354:                        /* Print chunk stats. */
                   3355:                        {
                   3356:                                chunk_stats_t chunks_stats;
                   3357:
                   3358:                                malloc_mutex_lock(&chunks_mtx);
                   3359:                                chunks_stats = stats_chunks;
                   3360:                                malloc_mutex_unlock(&chunks_mtx);
                   3361:
                   3362:                                malloc_printf("chunks: nchunks   "
                   3363:                                    "highchunks    curchunks\n");
                   3364:                                malloc_printf("  %13llu%13lu%13lu\n",
                   3365:                                    chunks_stats.nchunks,
                   3366:                                    chunks_stats.highchunks,
                   3367:                                    chunks_stats.curchunks);
                   3368:                        }
                   3369:
                   3370:                        /* Print chunk stats. */
                   3371:                        malloc_printf(
1.8       yamt     3372:                            "huge: nmalloc      ndalloc      "
                   3373:                            "nralloc    allocated\n");
                   3374:                        malloc_printf(" %12llu %12llu %12llu %12zu\n",
                   3375:                            huge_nmalloc, huge_ndalloc, huge_nralloc,
                   3376:                            huge_allocated);
1.1       ad       3377:
                   3378:                        /* Print stats for each arena. */
                   3379:                        for (i = 0; i < narenas; i++) {
                   3380:                                arena = arenas[i];
                   3381:                                if (arena != NULL) {
                   3382:                                        malloc_printf(
1.2       ad       3383:                                            "\narenas[%u] @ %p\n", i, arena);
1.1       ad       3384:                                        malloc_mutex_lock(&arena->mtx);
                   3385:                                        stats_print(arena);
                   3386:                                        malloc_mutex_unlock(&arena->mtx);
                   3387:                                }
                   3388:                        }
                   3389:                }
                   3390: #endif /* #ifdef MALLOC_STATS */
                   3391:                _malloc_message("--- End malloc statistics ---\n", "", "", "");
                   3392:        }
                   3393: }
                   3394:
                   3395: /*
                   3396:  * FreeBSD's pthreads implementation calls malloc(3), so the malloc
                   3397:  * implementation has to take pains to avoid infinite recursion during
                   3398:  * initialization.
                   3399:  */
                   3400: static inline bool
                   3401: malloc_init(void)
                   3402: {
                   3403:
                   3404:        if (malloc_initialized == false)
                   3405:                return (malloc_init_hard());
                   3406:
                   3407:        return (false);
                   3408: }
                   3409:
                   3410: static bool
                   3411: malloc_init_hard(void)
                   3412: {
                   3413:        unsigned i, j;
1.9       christos 3414:        ssize_t linklen;
1.1       ad       3415:        char buf[PATH_MAX + 1];
1.2       ad       3416:        const char *opts = "";
1.23      christos 3417:        int serrno;
1.1       ad       3418:
                   3419:        malloc_mutex_lock(&init_lock);
                   3420:        if (malloc_initialized) {
                   3421:                /*
                   3422:                 * Another thread initialized the allocator before this one
                   3423:                 * acquired init_lock.
                   3424:                 */
                   3425:                malloc_mutex_unlock(&init_lock);
                   3426:                return (false);
                   3427:        }
                   3428:
1.24      christos 3429:        serrno = errno;
1.1       ad       3430:        /* Get number of CPUs. */
                   3431:        {
                   3432:                int mib[2];
                   3433:                size_t len;
                   3434:
                   3435:                mib[0] = CTL_HW;
                   3436:                mib[1] = HW_NCPU;
                   3437:                len = sizeof(ncpus);
                   3438:                if (sysctl(mib, 2, &ncpus, &len, (void *) 0, 0) == -1) {
                   3439:                        /* Error. */
                   3440:                        ncpus = 1;
                   3441:                }
                   3442:        }
                   3443:
                   3444:        /* Get page size. */
                   3445:        {
                   3446:                long result;
                   3447:
                   3448:                result = sysconf(_SC_PAGESIZE);
                   3449:                assert(result != -1);
                   3450:                pagesize = (unsigned) result;
                   3451:
                   3452:                /*
                   3453:                 * We assume that pagesize is a power of 2 when calculating
                   3454:                 * pagesize_mask and pagesize_2pow.
                   3455:                 */
                   3456:                assert(((result - 1) & result) == 0);
                   3457:                pagesize_mask = result - 1;
                   3458:                pagesize_2pow = ffs((int)result) - 1;
                   3459:        }
                   3460:
                   3461:        for (i = 0; i < 3; i++) {
                   3462:                /* Get runtime configuration. */
                   3463:                switch (i) {
                   3464:                case 0:
                   3465:                        if ((linklen = readlink("/etc/malloc.conf", buf,
                   3466:                                                sizeof(buf) - 1)) != -1) {
                   3467:                                /*
                   3468:                                 * Use the contents of the "/etc/malloc.conf"
                   3469:                                 * symbolic link's name.
                   3470:                                 */
                   3471:                                buf[linklen] = '\0';
                   3472:                                opts = buf;
                   3473:                        } else {
                   3474:                                /* No configuration specified. */
                   3475:                                buf[0] = '\0';
                   3476:                                opts = buf;
                   3477:                        }
                   3478:                        break;
                   3479:                case 1:
1.18      ad       3480:                        if ((opts = getenv("MALLOC_OPTIONS")) != NULL &&
                   3481:                            issetugid() == 0) {
1.1       ad       3482:                                /*
                   3483:                                 * Do nothing; opts is already initialized to
                   3484:                                 * the value of the MALLOC_OPTIONS environment
                   3485:                                 * variable.
                   3486:                                 */
                   3487:                        } else {
                   3488:                                /* No configuration specified. */
                   3489:                                buf[0] = '\0';
                   3490:                                opts = buf;
                   3491:                        }
                   3492:                        break;
                   3493:                case 2:
                   3494:                        if (_malloc_options != NULL) {
                   3495:                            /*
                   3496:                             * Use options that were compiled into the program.
                   3497:                             */
                   3498:                            opts = _malloc_options;
                   3499:                        } else {
                   3500:                                /* No configuration specified. */
                   3501:                                buf[0] = '\0';
                   3502:                                opts = buf;
                   3503:                        }
                   3504:                        break;
                   3505:                default:
                   3506:                        /* NOTREACHED */
1.7       yamt     3507:                        /* LINTED */
1.1       ad       3508:                        assert(false);
                   3509:                }
                   3510:
                   3511:                for (j = 0; opts[j] != '\0'; j++) {
                   3512:                        switch (opts[j]) {
                   3513:                        case 'a':
                   3514:                                opt_abort = false;
                   3515:                                break;
                   3516:                        case 'A':
                   3517:                                opt_abort = true;
                   3518:                                break;
                   3519:                        case 'h':
                   3520:                                opt_hint = false;
                   3521:                                break;
                   3522:                        case 'H':
                   3523:                                opt_hint = true;
                   3524:                                break;
                   3525:                        case 'j':
                   3526:                                opt_junk = false;
                   3527:                                break;
                   3528:                        case 'J':
                   3529:                                opt_junk = true;
                   3530:                                break;
                   3531:                        case 'k':
                   3532:                                /*
                   3533:                                 * Chunks always require at least one header
                   3534:                                 * page, so chunks can never be smaller than
                   3535:                                 * two pages.
                   3536:                                 */
                   3537:                                if (opt_chunk_2pow > pagesize_2pow + 1)
                   3538:                                        opt_chunk_2pow--;
                   3539:                                break;
                   3540:                        case 'K':
1.20      lukem    3541:                                if (opt_chunk_2pow + 1 <
                   3542:                                    (int)(sizeof(size_t) << 3))
1.1       ad       3543:                                        opt_chunk_2pow++;
                   3544:                                break;
                   3545:                        case 'n':
                   3546:                                opt_narenas_lshift--;
                   3547:                                break;
                   3548:                        case 'N':
                   3549:                                opt_narenas_lshift++;
                   3550:                                break;
                   3551:                        case 'p':
                   3552:                                opt_print_stats = false;
                   3553:                                break;
                   3554:                        case 'P':
                   3555:                                opt_print_stats = true;
                   3556:                                break;
                   3557:                        case 'q':
                   3558:                                if (opt_quantum_2pow > QUANTUM_2POW_MIN)
                   3559:                                        opt_quantum_2pow--;
                   3560:                                break;
                   3561:                        case 'Q':
                   3562:                                if (opt_quantum_2pow < pagesize_2pow - 1)
                   3563:                                        opt_quantum_2pow++;
                   3564:                                break;
                   3565:                        case 's':
                   3566:                                if (opt_small_max_2pow > QUANTUM_2POW_MIN)
                   3567:                                        opt_small_max_2pow--;
                   3568:                                break;
                   3569:                        case 'S':
                   3570:                                if (opt_small_max_2pow < pagesize_2pow - 1)
                   3571:                                        opt_small_max_2pow++;
                   3572:                                break;
                   3573:                        case 'u':
                   3574:                                opt_utrace = false;
                   3575:                                break;
                   3576:                        case 'U':
                   3577:                                opt_utrace = true;
                   3578:                                break;
                   3579:                        case 'v':
                   3580:                                opt_sysv = false;
                   3581:                                break;
                   3582:                        case 'V':
                   3583:                                opt_sysv = true;
                   3584:                                break;
                   3585:                        case 'x':
                   3586:                                opt_xmalloc = false;
                   3587:                                break;
                   3588:                        case 'X':
                   3589:                                opt_xmalloc = true;
                   3590:                                break;
                   3591:                        case 'z':
                   3592:                                opt_zero = false;
                   3593:                                break;
                   3594:                        case 'Z':
                   3595:                                opt_zero = true;
                   3596:                                break;
                   3597:                        default: {
                   3598:                                char cbuf[2];
                   3599:
                   3600:                                cbuf[0] = opts[j];
                   3601:                                cbuf[1] = '\0';
1.16      christos 3602:                                _malloc_message(getprogname(),
1.1       ad       3603:                                    ": (malloc) Unsupported character in "
                   3604:                                    "malloc options: '", cbuf, "'\n");
                   3605:                        }
                   3606:                        }
                   3607:                }
                   3608:        }
1.24      christos 3609:        errno = serrno;
1.1       ad       3610:
                   3611:        /* Take care to call atexit() only once. */
                   3612:        if (opt_print_stats) {
                   3613:                /* Print statistics at exit. */
                   3614:                atexit(malloc_print_stats);
                   3615:        }
                   3616:
                   3617:        /* Set variables according to the value of opt_small_max_2pow. */
                   3618:        if (opt_small_max_2pow < opt_quantum_2pow)
                   3619:                opt_small_max_2pow = opt_quantum_2pow;
                   3620:        small_max = (1 << opt_small_max_2pow);
                   3621:
                   3622:        /* Set bin-related variables. */
                   3623:        bin_maxclass = (pagesize >> 1);
                   3624:        assert(opt_quantum_2pow >= TINY_MIN_2POW);
1.9       christos 3625:        ntbins = (unsigned)(opt_quantum_2pow - TINY_MIN_2POW);
1.1       ad       3626:        assert(ntbins <= opt_quantum_2pow);
1.9       christos 3627:        nqbins = (unsigned)(small_max >> opt_quantum_2pow);
                   3628:        nsbins = (unsigned)(pagesize_2pow - opt_small_max_2pow - 1);
1.1       ad       3629:
                   3630:        /* Set variables according to the value of opt_quantum_2pow. */
                   3631:        quantum = (1 << opt_quantum_2pow);
                   3632:        quantum_mask = quantum - 1;
                   3633:        if (ntbins > 0)
                   3634:                small_min = (quantum >> 1) + 1;
                   3635:        else
                   3636:                small_min = 1;
                   3637:        assert(small_min <= quantum);
                   3638:
                   3639:        /* Set variables according to the value of opt_chunk_2pow. */
                   3640:        chunksize = (1LU << opt_chunk_2pow);
                   3641:        chunksize_mask = chunksize - 1;
1.9       christos 3642:        chunksize_2pow = (unsigned)opt_chunk_2pow;
                   3643:        chunk_npages = (unsigned)(chunksize >> pagesize_2pow);
1.1       ad       3644:        {
                   3645:                unsigned header_size;
                   3646:
1.9       christos 3647:                header_size = (unsigned)(sizeof(arena_chunk_t) +
                   3648:                    (sizeof(arena_chunk_map_t) * (chunk_npages - 1)));
1.1       ad       3649:                arena_chunk_header_npages = (header_size >> pagesize_2pow);
                   3650:                if ((header_size & pagesize_mask) != 0)
                   3651:                        arena_chunk_header_npages++;
                   3652:        }
                   3653:        arena_maxclass = chunksize - (arena_chunk_header_npages <<
                   3654:            pagesize_2pow);
                   3655:
                   3656:        UTRACE(0, 0, 0);
                   3657:
                   3658: #ifdef MALLOC_STATS
                   3659:        memset(&stats_chunks, 0, sizeof(chunk_stats_t));
                   3660: #endif
                   3661:
                   3662:        /* Various sanity checks that regard configuration. */
                   3663:        assert(quantum >= sizeof(void *));
                   3664:        assert(quantum <= pagesize);
                   3665:        assert(chunksize >= pagesize);
                   3666:        assert(quantum * 4 <= chunksize);
                   3667:
                   3668:        /* Initialize chunks data. */
                   3669:        malloc_mutex_init(&chunks_mtx);
                   3670:        RB_INIT(&huge);
                   3671: #ifdef USE_BRK
                   3672:        malloc_mutex_init(&brk_mtx);
                   3673:        brk_base = sbrk(0);
                   3674:        brk_prev = brk_base;
                   3675:        brk_max = brk_base;
                   3676: #endif
                   3677: #ifdef MALLOC_STATS
                   3678:        huge_nmalloc = 0;
                   3679:        huge_ndalloc = 0;
1.8       yamt     3680:        huge_nralloc = 0;
1.1       ad       3681:        huge_allocated = 0;
                   3682: #endif
                   3683:        RB_INIT(&old_chunks);
                   3684:
                   3685:        /* Initialize base allocation data structures. */
                   3686: #ifdef MALLOC_STATS
                   3687:        base_mapped = 0;
                   3688: #endif
                   3689: #ifdef USE_BRK
                   3690:        /*
                   3691:         * Allocate a base chunk here, since it doesn't actually have to be
                   3692:         * chunk-aligned.  Doing this before allocating any other chunks allows
                   3693:         * the use of space that would otherwise be wasted.
                   3694:         */
                   3695:        base_pages_alloc(0);
                   3696: #endif
                   3697:        base_chunk_nodes = NULL;
                   3698:        malloc_mutex_init(&base_mtx);
                   3699:
                   3700:        if (ncpus > 1) {
                   3701:                /*
                   3702:                 * For SMP systems, create four times as many arenas as there
                   3703:                 * are CPUs by default.
                   3704:                 */
                   3705:                opt_narenas_lshift += 2;
                   3706:        }
                   3707:
                   3708:        /* Determine how many arenas to use. */
                   3709:        narenas = ncpus;
                   3710:        if (opt_narenas_lshift > 0) {
                   3711:                if ((narenas << opt_narenas_lshift) > narenas)
                   3712:                        narenas <<= opt_narenas_lshift;
                   3713:                /*
                   3714:                 * Make sure not to exceed the limits of what base_malloc()
                   3715:                 * can handle.
                   3716:                 */
                   3717:                if (narenas * sizeof(arena_t *) > chunksize)
1.9       christos 3718:                        narenas = (unsigned)(chunksize / sizeof(arena_t *));
1.1       ad       3719:        } else if (opt_narenas_lshift < 0) {
                   3720:                if ((narenas << opt_narenas_lshift) < narenas)
                   3721:                        narenas <<= opt_narenas_lshift;
1.15      ad       3722:                /* Make sure there is at least one arena. */
                   3723:                if (narenas == 0)
                   3724:                        narenas = 1;
1.1       ad       3725:        }
                   3726:
                   3727:        next_arena = 0;
                   3728:
                   3729:        /* Allocate and initialize arenas. */
                   3730:        arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
                   3731:        if (arenas == NULL) {
                   3732:                malloc_mutex_unlock(&init_lock);
                   3733:                return (true);
                   3734:        }
                   3735:        /*
                   3736:         * Zero the array.  In practice, this should always be pre-zeroed,
                   3737:         * since it was just mmap()ed, but let's be sure.
                   3738:         */
                   3739:        memset(arenas, 0, sizeof(arena_t *) * narenas);
                   3740:
                   3741:        /*
                   3742:         * Initialize one arena here.  The rest are lazily created in
                   3743:         * arena_choose_hard().
                   3744:         */
                   3745:        arenas_extend(0);
                   3746:        if (arenas[0] == NULL) {
                   3747:                malloc_mutex_unlock(&init_lock);
                   3748:                return (true);
                   3749:        }
                   3750:
                   3751:        malloc_mutex_init(&arenas_mtx);
                   3752:
                   3753:        malloc_initialized = true;
                   3754:        malloc_mutex_unlock(&init_lock);
                   3755:        return (false);
                   3756: }
                   3757:
                   3758: /*
                   3759:  * End general internal functions.
                   3760:  */
                   3761: /******************************************************************************/
                   3762: /*
                   3763:  * Begin malloc(3)-compatible functions.
                   3764:  */
                   3765:
                   3766: void *
                   3767: malloc(size_t size)
                   3768: {
                   3769:        void *ret;
                   3770:
                   3771:        if (malloc_init()) {
                   3772:                ret = NULL;
                   3773:                goto RETURN;
                   3774:        }
                   3775:
                   3776:        if (size == 0) {
                   3777:                if (opt_sysv == false)
                   3778:                        size = 1;
                   3779:                else {
                   3780:                        ret = NULL;
                   3781:                        goto RETURN;
                   3782:                }
                   3783:        }
                   3784:
                   3785:        ret = imalloc(size);
                   3786:
                   3787: RETURN:
                   3788:        if (ret == NULL) {
                   3789:                if (opt_xmalloc) {
1.16      christos 3790:                        _malloc_message(getprogname(),
1.1       ad       3791:                            ": (malloc) Error in malloc(): out of memory\n", "",
                   3792:                            "");
                   3793:                        abort();
                   3794:                }
                   3795:                errno = ENOMEM;
                   3796:        }
                   3797:
                   3798:        UTRACE(0, size, ret);
                   3799:        return (ret);
                   3800: }
                   3801:
                   3802: int
                   3803: posix_memalign(void **memptr, size_t alignment, size_t size)
                   3804: {
                   3805:        int ret;
                   3806:        void *result;
                   3807:
                   3808:        if (malloc_init())
                   3809:                result = NULL;
                   3810:        else {
                   3811:                /* Make sure that alignment is a large enough power of 2. */
                   3812:                if (((alignment - 1) & alignment) != 0
                   3813:                    || alignment < sizeof(void *)) {
                   3814:                        if (opt_xmalloc) {
1.16      christos 3815:                                _malloc_message(getprogname(),
1.1       ad       3816:                                    ": (malloc) Error in posix_memalign(): "
                   3817:                                    "invalid alignment\n", "", "");
                   3818:                                abort();
                   3819:                        }
                   3820:                        result = NULL;
                   3821:                        ret = EINVAL;
                   3822:                        goto RETURN;
                   3823:                }
                   3824:
                   3825:                result = ipalloc(alignment, size);
                   3826:        }
                   3827:
                   3828:        if (result == NULL) {
                   3829:                if (opt_xmalloc) {
1.16      christos 3830:                        _malloc_message(getprogname(),
1.1       ad       3831:                        ": (malloc) Error in posix_memalign(): out of memory\n",
                   3832:                        "", "");
                   3833:                        abort();
                   3834:                }
                   3835:                ret = ENOMEM;
                   3836:                goto RETURN;
                   3837:        }
                   3838:
                   3839:        *memptr = result;
                   3840:        ret = 0;
                   3841:
                   3842: RETURN:
                   3843:        UTRACE(0, size, result);
                   3844:        return (ret);
                   3845: }
                   3846:
                   3847: void *
                   3848: calloc(size_t num, size_t size)
                   3849: {
                   3850:        void *ret;
                   3851:        size_t num_size;
                   3852:
                   3853:        if (malloc_init()) {
                   3854:                num_size = 0;
                   3855:                ret = NULL;
                   3856:                goto RETURN;
                   3857:        }
                   3858:
                   3859:        num_size = num * size;
                   3860:        if (num_size == 0) {
                   3861:                if ((opt_sysv == false) && ((num == 0) || (size == 0)))
                   3862:                        num_size = 1;
                   3863:                else {
                   3864:                        ret = NULL;
                   3865:                        goto RETURN;
                   3866:                }
                   3867:        /*
                   3868:         * Try to avoid division here.  We know that it isn't possible to
                   3869:         * overflow during multiplication if neither operand uses any of the
                   3870:         * most significant half of the bits in a size_t.
                   3871:         */
1.2       ad       3872:        } else if ((unsigned long long)((num | size) &
                   3873:           ((unsigned long long)SIZE_T_MAX << (sizeof(size_t) << 2))) &&
                   3874:           (num_size / size != num)) {
1.1       ad       3875:                /* size_t overflow. */
                   3876:                ret = NULL;
                   3877:                goto RETURN;
                   3878:        }
                   3879:
                   3880:        ret = icalloc(num_size);
                   3881:
                   3882: RETURN:
                   3883:        if (ret == NULL) {
                   3884:                if (opt_xmalloc) {
1.16      christos 3885:                        _malloc_message(getprogname(),
1.1       ad       3886:                            ": (malloc) Error in calloc(): out of memory\n", "",
                   3887:                            "");
                   3888:                        abort();
                   3889:                }
                   3890:                errno = ENOMEM;
                   3891:        }
                   3892:
                   3893:        UTRACE(0, num_size, ret);
                   3894:        return (ret);
                   3895: }
                   3896:
                   3897: void *
                   3898: realloc(void *ptr, size_t size)
                   3899: {
                   3900:        void *ret;
                   3901:
                   3902:        if (size == 0) {
                   3903:                if (opt_sysv == false)
                   3904:                        size = 1;
                   3905:                else {
                   3906:                        if (ptr != NULL)
                   3907:                                idalloc(ptr);
                   3908:                        ret = NULL;
                   3909:                        goto RETURN;
                   3910:                }
                   3911:        }
                   3912:
                   3913:        if (ptr != NULL) {
                   3914:                assert(malloc_initialized);
                   3915:
                   3916:                ret = iralloc(ptr, size);
                   3917:
                   3918:                if (ret == NULL) {
                   3919:                        if (opt_xmalloc) {
1.16      christos 3920:                                _malloc_message(getprogname(),
1.1       ad       3921:                                    ": (malloc) Error in realloc(): out of "
                   3922:                                    "memory\n", "", "");
                   3923:                                abort();
                   3924:                        }
                   3925:                        errno = ENOMEM;
                   3926:                }
                   3927:        } else {
                   3928:                if (malloc_init())
                   3929:                        ret = NULL;
                   3930:                else
                   3931:                        ret = imalloc(size);
                   3932:
                   3933:                if (ret == NULL) {
                   3934:                        if (opt_xmalloc) {
1.16      christos 3935:                                _malloc_message(getprogname(),
1.1       ad       3936:                                    ": (malloc) Error in realloc(): out of "
                   3937:                                    "memory\n", "", "");
                   3938:                                abort();
                   3939:                        }
                   3940:                        errno = ENOMEM;
                   3941:                }
                   3942:        }
                   3943:
                   3944: RETURN:
                   3945:        UTRACE(ptr, size, ret);
                   3946:        return (ret);
                   3947: }
                   3948:
                   3949: void
                   3950: free(void *ptr)
                   3951: {
                   3952:
                   3953:        UTRACE(ptr, 0, 0);
                   3954:        if (ptr != NULL) {
                   3955:                assert(malloc_initialized);
                   3956:
                   3957:                idalloc(ptr);
                   3958:        }
                   3959: }
                   3960:
                   3961: /*
                   3962:  * End malloc(3)-compatible functions.
                   3963:  */
                   3964: /******************************************************************************/
                   3965: /*
                   3966:  * Begin non-standard functions.
                   3967:  */
1.2       ad       3968: #ifndef __NetBSD__
1.1       ad       3969: size_t
                   3970: malloc_usable_size(const void *ptr)
                   3971: {
                   3972:
                   3973:        assert(ptr != NULL);
                   3974:
                   3975:        return (isalloc(ptr));
                   3976: }
1.2       ad       3977: #endif
1.1       ad       3978:
                   3979: /*
                   3980:  * End non-standard functions.
                   3981:  */
                   3982: /******************************************************************************/
                   3983: /*
                   3984:  * Begin library-private functions, used by threading libraries for protection
                   3985:  * of malloc during fork().  These functions are only called if the program is
                   3986:  * running in threaded mode, so there is no need to check whether the program
                   3987:  * is threaded here.
                   3988:  */
                   3989:
                   3990: void
                   3991: _malloc_prefork(void)
                   3992: {
                   3993:        unsigned i;
                   3994:
                   3995:        /* Acquire all mutexes in a safe order. */
                   3996:
                   3997:        malloc_mutex_lock(&arenas_mtx);
                   3998:        for (i = 0; i < narenas; i++) {
                   3999:                if (arenas[i] != NULL)
                   4000:                        malloc_mutex_lock(&arenas[i]->mtx);
                   4001:        }
                   4002:
                   4003:        malloc_mutex_lock(&base_mtx);
                   4004:
                   4005:        malloc_mutex_lock(&chunks_mtx);
                   4006: }
                   4007:
                   4008: void
                   4009: _malloc_postfork(void)
                   4010: {
                   4011:        unsigned i;
                   4012:
                   4013:        /* Release all mutexes, now that fork() has completed. */
                   4014:
                   4015:        malloc_mutex_unlock(&chunks_mtx);
                   4016:
                   4017:        malloc_mutex_unlock(&base_mtx);
                   4018:
                   4019:        for (i = 0; i < narenas; i++) {
                   4020:                if (arenas[i] != NULL)
                   4021:                        malloc_mutex_unlock(&arenas[i]->mtx);
                   4022:        }
                   4023:        malloc_mutex_unlock(&arenas_mtx);
                   4024: }
                   4025:
                   4026: /*
                   4027:  * End library-private functions.
                   4028:  */
                   4029: /******************************************************************************/

CVSweb <webmaster@jp.NetBSD.org>