[BACK]Return to jemalloc.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / lib / libc / stdlib

Annotation of src/lib/libc/stdlib/jemalloc.c, Revision 1.12

1.12    ! ad          1: /*     $NetBSD: jemalloc.c,v 1.11 2007/11/19 14:48:42 ad Exp $ */
1.2       ad          2:
1.1       ad          3: /*-
                      4:  * Copyright (C) 2006,2007 Jason Evans <jasone@FreeBSD.org>.
                      5:  * All rights reserved.
                      6:  *
                      7:  * Redistribution and use in source and binary forms, with or without
                      8:  * modification, are permitted provided that the following conditions
                      9:  * are met:
                     10:  * 1. Redistributions of source code must retain the above copyright
                     11:  *    notice(s), this list of conditions and the following disclaimer as
                     12:  *    the first lines of this file unmodified other than the possible
                     13:  *    addition of one or more copyright notices.
                     14:  * 2. Redistributions in binary form must reproduce the above copyright
                     15:  *    notice(s), this list of conditions and the following disclaimer in
                     16:  *    the documentation and/or other materials provided with the
                     17:  *    distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
                     20:  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     21:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
                     23:  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
                     26:  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
                     27:  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
                     28:  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
                     29:  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     30:  *
                     31:  *******************************************************************************
                     32:  *
                     33:  * This allocator implementation is designed to provide scalable performance
                     34:  * for multi-threaded programs on multi-processor systems.  The following
                     35:  * features are included for this purpose:
                     36:  *
                     37:  *   + Multiple arenas are used if there are multiple CPUs, which reduces lock
                     38:  *     contention and cache sloshing.
                     39:  *
                     40:  *   + Cache line sharing between arenas is avoided for internal data
                     41:  *     structures.
                     42:  *
                     43:  *   + Memory is managed in chunks and runs (chunks can be split into runs),
                     44:  *     rather than as individual pages.  This provides a constant-time
                     45:  *     mechanism for associating allocations with particular arenas.
                     46:  *
                     47:  * Allocation requests are rounded up to the nearest size class, and no record
                     48:  * of the original request size is maintained.  Allocations are broken into
                     49:  * categories according to size class.  Assuming runtime defaults, 4 kB pages
                     50:  * and a 16 byte quantum, the size classes in each category are as follows:
                     51:  *
                     52:  *   |=====================================|
                     53:  *   | Category | Subcategory    |    Size |
                     54:  *   |=====================================|
                     55:  *   | Small    | Tiny           |       2 |
                     56:  *   |          |                |       4 |
                     57:  *   |          |                |       8 |
                     58:  *   |          |----------------+---------|
                     59:  *   |          | Quantum-spaced |      16 |
                     60:  *   |          |                |      32 |
                     61:  *   |          |                |      48 |
                     62:  *   |          |                |     ... |
                     63:  *   |          |                |     480 |
                     64:  *   |          |                |     496 |
                     65:  *   |          |                |     512 |
                     66:  *   |          |----------------+---------|
                     67:  *   |          | Sub-page       |    1 kB |
                     68:  *   |          |                |    2 kB |
                     69:  *   |=====================================|
                     70:  *   | Large                     |    4 kB |
                     71:  *   |                           |    8 kB |
                     72:  *   |                           |   12 kB |
                     73:  *   |                           |     ... |
                     74:  *   |                           | 1012 kB |
                     75:  *   |                           | 1016 kB |
                     76:  *   |                           | 1020 kB |
                     77:  *   |=====================================|
                     78:  *   | Huge                      |    1 MB |
                     79:  *   |                           |    2 MB |
                     80:  *   |                           |    3 MB |
                     81:  *   |                           |     ... |
                     82:  *   |=====================================|
                     83:  *
                     84:  * A different mechanism is used for each category:
                     85:  *
                     86:  *   Small : Each size class is segregated into its own set of runs.  Each run
                     87:  *           maintains a bitmap of which regions are free/allocated.
                     88:  *
                     89:  *   Large : Each allocation is backed by a dedicated run.  Metadata are stored
                     90:  *           in the associated arena chunk header maps.
                     91:  *
                     92:  *   Huge : Each allocation is backed by a dedicated contiguous set of chunks.
                     93:  *          Metadata are stored in a separate red-black tree.
                     94:  *
                     95:  *******************************************************************************
                     96:  */
                     97:
1.2       ad         98: /* LINTLIBRARY */
                     99:
                    100: #ifdef __NetBSD__
                    101: #  define xutrace(a, b)                utrace("malloc", (a), (b))
                    102: #  define __DECONST(x, y)      ((x)__UNCONST(y))
                    103: #  define NO_TLS
                    104: #else
                    105: #  define xutrace(a, b)                utrace((a), (b))
                    106: #endif /* __NetBSD__ */
                    107:
1.1       ad        108: /*
                    109:  * MALLOC_PRODUCTION disables assertions and statistics gathering.  It also
                    110:  * defaults the A and J runtime options to off.  These settings are appropriate
                    111:  * for production systems.
                    112:  */
1.2       ad        113: #define MALLOC_PRODUCTION
1.1       ad        114:
                    115: #ifndef MALLOC_PRODUCTION
                    116: #  define MALLOC_DEBUG
                    117: #endif
                    118:
                    119: #include <sys/cdefs.h>
1.2       ad        120: /* __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.147 2007/06/15 22:00:16 jasone Exp $"); */
1.12    ! ad        121: __RCSID("$NetBSD: jemalloc.c,v 1.11 2007/11/19 14:48:42 ad Exp $");
1.1       ad        122:
1.2       ad        123: #ifdef __FreeBSD__
1.1       ad        124: #include "libc_private.h"
                    125: #ifdef MALLOC_DEBUG
                    126: #  define _LOCK_DEBUG
                    127: #endif
                    128: #include "spinlock.h"
1.2       ad        129: #endif
1.1       ad        130: #include "namespace.h"
                    131: #include <sys/mman.h>
                    132: #include <sys/param.h>
1.2       ad        133: #ifdef __FreeBSD__
1.1       ad        134: #include <sys/stddef.h>
1.2       ad        135: #endif
1.1       ad        136: #include <sys/time.h>
                    137: #include <sys/types.h>
                    138: #include <sys/sysctl.h>
                    139: #include <sys/tree.h>
                    140: #include <sys/uio.h>
                    141: #include <sys/ktrace.h> /* Must come after several other sys/ includes. */
                    142:
1.2       ad        143: #ifdef __FreeBSD__
1.1       ad        144: #include <machine/atomic.h>
                    145: #include <machine/cpufunc.h>
1.2       ad        146: #endif
1.1       ad        147: #include <machine/vmparam.h>
                    148:
                    149: #include <errno.h>
                    150: #include <limits.h>
                    151: #include <pthread.h>
                    152: #include <sched.h>
                    153: #include <stdarg.h>
                    154: #include <stdbool.h>
                    155: #include <stdio.h>
                    156: #include <stdint.h>
                    157: #include <stdlib.h>
                    158: #include <string.h>
                    159: #include <strings.h>
                    160: #include <unistd.h>
                    161:
1.2       ad        162: #ifdef __NetBSD__
                    163: #  include <reentrant.h>
                    164: void   _malloc_prefork(void);
                    165: void   _malloc_postfork(void);
                    166: ssize_t        _write(int, const void *, size_t);
                    167: const char     *_getprogname(void);
                    168: #endif
                    169:
                    170: #ifdef __FreeBSD__
1.1       ad        171: #include "un-namespace.h"
1.2       ad        172: #endif
1.1       ad        173:
                    174: /* MALLOC_STATS enables statistics calculation. */
                    175: #ifndef MALLOC_PRODUCTION
                    176: #  define MALLOC_STATS
                    177: #endif
                    178:
                    179: #ifdef MALLOC_DEBUG
                    180: #  ifdef NDEBUG
                    181: #    undef NDEBUG
                    182: #  endif
                    183: #else
                    184: #  ifndef NDEBUG
                    185: #    define NDEBUG
                    186: #  endif
                    187: #endif
                    188: #include <assert.h>
                    189:
                    190: #ifdef MALLOC_DEBUG
                    191:    /* Disable inlining to make debugging easier. */
                    192: #  define inline
                    193: #endif
                    194:
                    195: /* Size of stack-allocated buffer passed to strerror_r(). */
                    196: #define        STRERROR_BUF            64
                    197:
                    198: /* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */
                    199: #ifdef __i386__
                    200: #  define QUANTUM_2POW_MIN     4
                    201: #  define SIZEOF_PTR_2POW      2
                    202: #  define USE_BRK
                    203: #endif
                    204: #ifdef __ia64__
                    205: #  define QUANTUM_2POW_MIN     4
                    206: #  define SIZEOF_PTR_2POW      3
                    207: #endif
                    208: #ifdef __alpha__
                    209: #  define QUANTUM_2POW_MIN     4
                    210: #  define SIZEOF_PTR_2POW      3
                    211: #  define NO_TLS
                    212: #endif
                    213: #ifdef __sparc64__
                    214: #  define QUANTUM_2POW_MIN     4
                    215: #  define SIZEOF_PTR_2POW      3
                    216: #  define NO_TLS
                    217: #endif
                    218: #ifdef __amd64__
                    219: #  define QUANTUM_2POW_MIN     4
                    220: #  define SIZEOF_PTR_2POW      3
                    221: #endif
                    222: #ifdef __arm__
                    223: #  define QUANTUM_2POW_MIN     3
                    224: #  define SIZEOF_PTR_2POW      2
                    225: #  define USE_BRK
                    226: #  define NO_TLS
                    227: #endif
                    228: #ifdef __powerpc__
                    229: #  define QUANTUM_2POW_MIN     4
                    230: #  define SIZEOF_PTR_2POW      2
                    231: #  define USE_BRK
                    232: #endif
1.3       he        233: #if defined(__sparc__) && !defined(__sparc64__)
1.2       ad        234: #  define QUANTUM_2POW_MIN     4
                    235: #  define SIZEOF_PTR_2POW      2
                    236: #  define USE_BRK
                    237: #endif
                    238: #ifdef __vax__
                    239: #  define QUANTUM_2POW_MIN     4
                    240: #  define SIZEOF_PTR_2POW      2
                    241: #  define USE_BRK
                    242: #endif
                    243: #ifdef __sh__
                    244: #  define QUANTUM_2POW_MIN     4
                    245: #  define SIZEOF_PTR_2POW      2
                    246: #  define USE_BRK
                    247: #endif
                    248: #ifdef __m68k__
                    249: #  define QUANTUM_2POW_MIN     4
                    250: #  define SIZEOF_PTR_2POW      2
                    251: #  define USE_BRK
                    252: #endif
                    253: #ifdef __mips__
                    254: #  define QUANTUM_2POW_MIN     4
                    255: #  define SIZEOF_PTR_2POW      2
                    256: #  define USE_BRK
                    257: #endif
1.4       ad        258: #ifdef __hppa__
                    259: #  define QUANTUM_2POW_MIN     4
                    260: #  define SIZEOF_PTR_2POW      2
                    261: #  define USE_BRK
                    262: #endif
1.1       ad        263:
                    264: #define        SIZEOF_PTR              (1 << SIZEOF_PTR_2POW)
                    265:
                    266: /* sizeof(int) == (1 << SIZEOF_INT_2POW). */
                    267: #ifndef SIZEOF_INT_2POW
                    268: #  define SIZEOF_INT_2POW      2
                    269: #endif
                    270:
                    271: /* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
                    272: #if (!defined(PIC) && !defined(NO_TLS))
                    273: #  define NO_TLS
                    274: #endif
                    275:
                    276: /*
                    277:  * Size and alignment of memory chunks that are allocated by the OS's virtual
                    278:  * memory system.
                    279:  */
                    280: #define        CHUNK_2POW_DEFAULT      20
                    281:
                    282: /*
                    283:  * Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
                    284:  * so over-estimates are okay (up to a point), but under-estimates will
                    285:  * negatively affect performance.
                    286:  */
                    287: #define        CACHELINE_2POW          6
                    288: #define        CACHELINE               ((size_t)(1 << CACHELINE_2POW))
                    289:
                    290: /* Smallest size class to support. */
                    291: #define        TINY_MIN_2POW           1
                    292:
                    293: /*
                    294:  * Maximum size class that is a multiple of the quantum, but not (necessarily)
                    295:  * a power of 2.  Above this size, allocations are rounded up to the nearest
                    296:  * power of 2.
                    297:  */
                    298: #define        SMALL_MAX_2POW_DEFAULT  9
                    299: #define        SMALL_MAX_DEFAULT       (1 << SMALL_MAX_2POW_DEFAULT)
                    300:
                    301: /*
                    302:  * Maximum desired run header overhead.  Runs are sized as small as possible
                    303:  * such that this setting is still honored, without violating other constraints.
                    304:  * The goal is to make runs as small as possible without exceeding a per run
                    305:  * external fragmentation threshold.
                    306:  *
                    307:  * Note that it is possible to set this low enough that it cannot be honored
                    308:  * for some/all object sizes, since there is one bit of header overhead per
                    309:  * object (plus a constant).  In such cases, this constraint is relaxed.
                    310:  *
                    311:  * RUN_MAX_OVRHD_RELAX specifies the maximum number of bits per region of
                    312:  * overhead for which RUN_MAX_OVRHD is relaxed.
                    313:  */
                    314: #define RUN_MAX_OVRHD          0.015
                    315: #define RUN_MAX_OVRHD_RELAX    1.5
                    316:
                    317: /* Put a cap on small object run size.  This overrides RUN_MAX_OVRHD. */
                    318: #define RUN_MAX_SMALL_2POW     15
                    319: #define RUN_MAX_SMALL          (1 << RUN_MAX_SMALL_2POW)
                    320:
                    321: /******************************************************************************/
                    322:
1.2       ad        323: #ifdef __FreeBSD__
1.1       ad        324: /*
                    325:  * Mutexes based on spinlocks.  We can't use normal pthread mutexes, because
                    326:  * they require malloc()ed memory.
                    327:  */
                    328: typedef struct {
                    329:        spinlock_t      lock;
                    330: } malloc_mutex_t;
                    331:
                    332: /* Set to true once the allocator has been initialized. */
                    333: static bool malloc_initialized = false;
                    334:
                    335: /* Used to avoid initialization races. */
                    336: static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
1.2       ad        337: #else
                    338: #define        malloc_mutex_t  mutex_t
                    339:
                    340: /* Set to true once the allocator has been initialized. */
                    341: static bool malloc_initialized = false;
                    342:
                    343: /* Used to avoid initialization races. */
                    344: static mutex_t init_lock = MUTEX_INITIALIZER;
                    345: #endif
1.1       ad        346:
                    347: /******************************************************************************/
                    348: /*
                    349:  * Statistics data structures.
                    350:  */
                    351:
                    352: #ifdef MALLOC_STATS
                    353:
                    354: typedef struct malloc_bin_stats_s malloc_bin_stats_t;
                    355: struct malloc_bin_stats_s {
                    356:        /*
                    357:         * Number of allocation requests that corresponded to the size of this
                    358:         * bin.
                    359:         */
                    360:        uint64_t        nrequests;
                    361:
                    362:        /* Total number of runs created for this bin's size class. */
                    363:        uint64_t        nruns;
                    364:
                    365:        /*
                    366:         * Total number of runs reused by extracting them from the runs tree for
                    367:         * this bin's size class.
                    368:         */
                    369:        uint64_t        reruns;
                    370:
                    371:        /* High-water mark for this bin. */
                    372:        unsigned long   highruns;
                    373:
                    374:        /* Current number of runs in this bin. */
                    375:        unsigned long   curruns;
                    376: };
                    377:
                    378: typedef struct arena_stats_s arena_stats_t;
                    379: struct arena_stats_s {
                    380:        /* Number of bytes currently mapped. */
                    381:        size_t          mapped;
                    382:
                    383:        /* Per-size-category statistics. */
                    384:        size_t          allocated_small;
                    385:        uint64_t        nmalloc_small;
                    386:        uint64_t        ndalloc_small;
                    387:
                    388:        size_t          allocated_large;
                    389:        uint64_t        nmalloc_large;
                    390:        uint64_t        ndalloc_large;
                    391: };
                    392:
                    393: typedef struct chunk_stats_s chunk_stats_t;
                    394: struct chunk_stats_s {
                    395:        /* Number of chunks that were allocated. */
                    396:        uint64_t        nchunks;
                    397:
                    398:        /* High-water mark for number of chunks allocated. */
                    399:        unsigned long   highchunks;
                    400:
                    401:        /*
                    402:         * Current number of chunks allocated.  This value isn't maintained for
                    403:         * any other purpose, so keep track of it in order to be able to set
                    404:         * highchunks.
                    405:         */
                    406:        unsigned long   curchunks;
                    407: };
                    408:
                    409: #endif /* #ifdef MALLOC_STATS */
                    410:
                    411: /******************************************************************************/
                    412: /*
                    413:  * Chunk data structures.
                    414:  */
                    415:
                    416: /* Tree of chunks. */
                    417: typedef struct chunk_node_s chunk_node_t;
                    418: struct chunk_node_s {
                    419:        /* Linkage for the chunk tree. */
                    420:        RB_ENTRY(chunk_node_s) link;
                    421:
                    422:        /*
                    423:         * Pointer to the chunk that this tree node is responsible for.  In some
                    424:         * (but certainly not all) cases, this data structure is placed at the
                    425:         * beginning of the corresponding chunk, so this field may point to this
                    426:         * node.
                    427:         */
                    428:        void    *chunk;
                    429:
                    430:        /* Total chunk size. */
                    431:        size_t  size;
                    432: };
                    433: typedef struct chunk_tree_s chunk_tree_t;
                    434: RB_HEAD(chunk_tree_s, chunk_node_s);
                    435:
                    436: /******************************************************************************/
                    437: /*
                    438:  * Arena data structures.
                    439:  */
                    440:
                    441: typedef struct arena_s arena_t;
                    442: typedef struct arena_bin_s arena_bin_t;
                    443:
                    444: typedef struct arena_chunk_map_s arena_chunk_map_t;
                    445: struct arena_chunk_map_s {
                    446:        /* Number of pages in run. */
                    447:        uint32_t        npages;
                    448:        /*
                    449:         * Position within run.  For a free run, this is POS_FREE for the first
                    450:         * and last pages.  The POS_FREE special value makes it possible to
                    451:         * quickly coalesce free runs.
                    452:         *
                    453:         * This is the limiting factor for chunksize; there can be at most 2^31
                    454:         * pages in a run.
                    455:         */
                    456: #define POS_FREE ((uint32_t)0xffffffffU)
                    457:        uint32_t        pos;
                    458: };
                    459:
                    460: /* Arena chunk header. */
                    461: typedef struct arena_chunk_s arena_chunk_t;
                    462: struct arena_chunk_s {
                    463:        /* Arena that owns the chunk. */
                    464:        arena_t *arena;
                    465:
                    466:        /* Linkage for the arena's chunk tree. */
                    467:        RB_ENTRY(arena_chunk_s) link;
                    468:
                    469:        /*
                    470:         * Number of pages in use.  This is maintained in order to make
                    471:         * detection of empty chunks fast.
                    472:         */
                    473:        uint32_t pages_used;
                    474:
                    475:        /*
                    476:         * Every time a free run larger than this value is created/coalesced,
                    477:         * this value is increased.  The only way that the value decreases is if
                    478:         * arena_run_alloc() fails to find a free run as large as advertised by
                    479:         * this value.
                    480:         */
                    481:        uint32_t max_frun_npages;
                    482:
                    483:        /*
                    484:         * Every time a free run that starts at an earlier page than this value
                    485:         * is created/coalesced, this value is decreased.  It is reset in a
                    486:         * similar fashion to max_frun_npages.
                    487:         */
                    488:        uint32_t min_frun_ind;
                    489:
                    490:        /*
                    491:         * Map of pages within chunk that keeps track of free/large/small.  For
                    492:         * free runs, only the map entries for the first and last pages are
                    493:         * kept up to date, so that free runs can be quickly coalesced.
                    494:         */
                    495:        arena_chunk_map_t map[1]; /* Dynamically sized. */
                    496: };
                    497: typedef struct arena_chunk_tree_s arena_chunk_tree_t;
                    498: RB_HEAD(arena_chunk_tree_s, arena_chunk_s);
                    499:
                    500: typedef struct arena_run_s arena_run_t;
                    501: struct arena_run_s {
                    502:        /* Linkage for run trees. */
                    503:        RB_ENTRY(arena_run_s) link;
                    504:
                    505: #ifdef MALLOC_DEBUG
                    506:        uint32_t        magic;
                    507: #  define ARENA_RUN_MAGIC 0x384adf93
                    508: #endif
                    509:
                    510:        /* Bin this run is associated with. */
                    511:        arena_bin_t     *bin;
                    512:
                    513:        /* Index of first element that might have a free region. */
                    514:        unsigned        regs_minelm;
                    515:
                    516:        /* Number of free regions in run. */
                    517:        unsigned        nfree;
                    518:
                    519:        /* Bitmask of in-use regions (0: in use, 1: free). */
                    520:        unsigned        regs_mask[1]; /* Dynamically sized. */
                    521: };
                    522: typedef struct arena_run_tree_s arena_run_tree_t;
                    523: RB_HEAD(arena_run_tree_s, arena_run_s);
                    524:
                    525: struct arena_bin_s {
                    526:        /*
                    527:         * Current run being used to service allocations of this bin's size
                    528:         * class.
                    529:         */
                    530:        arena_run_t     *runcur;
                    531:
                    532:        /*
                    533:         * Tree of non-full runs.  This tree is used when looking for an
                    534:         * existing run when runcur is no longer usable.  We choose the
                    535:         * non-full run that is lowest in memory; this policy tends to keep
                    536:         * objects packed well, and it can also help reduce the number of
                    537:         * almost-empty chunks.
                    538:         */
                    539:        arena_run_tree_t runs;
                    540:
                    541:        /* Size of regions in a run for this bin's size class. */
                    542:        size_t          reg_size;
                    543:
                    544:        /* Total size of a run for this bin's size class. */
                    545:        size_t          run_size;
                    546:
                    547:        /* Total number of regions in a run for this bin's size class. */
                    548:        uint32_t        nregs;
                    549:
                    550:        /* Number of elements in a run's regs_mask for this bin's size class. */
                    551:        uint32_t        regs_mask_nelms;
                    552:
                    553:        /* Offset of first region in a run for this bin's size class. */
                    554:        uint32_t        reg0_offset;
                    555:
                    556: #ifdef MALLOC_STATS
                    557:        /* Bin statistics. */
                    558:        malloc_bin_stats_t stats;
                    559: #endif
                    560: };
                    561:
                    562: struct arena_s {
                    563: #ifdef MALLOC_DEBUG
                    564:        uint32_t                magic;
                    565: #  define ARENA_MAGIC 0x947d3d24
                    566: #endif
                    567:
                    568:        /* All operations on this arena require that mtx be locked. */
                    569:        malloc_mutex_t          mtx;
                    570:
                    571: #ifdef MALLOC_STATS
                    572:        arena_stats_t           stats;
                    573: #endif
                    574:
                    575:        /*
                    576:         * Tree of chunks this arena manages.
                    577:         */
                    578:        arena_chunk_tree_t      chunks;
                    579:
                    580:        /*
                    581:         * In order to avoid rapid chunk allocation/deallocation when an arena
                    582:         * oscillates right on the cusp of needing a new chunk, cache the most
                    583:         * recently freed chunk.  This caching is disabled by opt_hint.
                    584:         *
                    585:         * There is one spare chunk per arena, rather than one spare total, in
                    586:         * order to avoid interactions between multiple threads that could make
                    587:         * a single spare inadequate.
                    588:         */
                    589:        arena_chunk_t *spare;
                    590:
                    591:        /*
                    592:         * bins is used to store rings of free regions of the following sizes,
                    593:         * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
                    594:         *
                    595:         *   bins[i] | size |
                    596:         *   --------+------+
                    597:         *        0  |    2 |
                    598:         *        1  |    4 |
                    599:         *        2  |    8 |
                    600:         *   --------+------+
                    601:         *        3  |   16 |
                    602:         *        4  |   32 |
                    603:         *        5  |   48 |
                    604:         *        6  |   64 |
                    605:         *           :      :
                    606:         *           :      :
                    607:         *       33  |  496 |
                    608:         *       34  |  512 |
                    609:         *   --------+------+
                    610:         *       35  | 1024 |
                    611:         *       36  | 2048 |
                    612:         *   --------+------+
                    613:         */
                    614:        arena_bin_t             bins[1]; /* Dynamically sized. */
                    615: };
                    616:
                    617: /******************************************************************************/
                    618: /*
                    619:  * Data.
                    620:  */
                    621:
                    622: /* Number of CPUs. */
                    623: static unsigned                ncpus;
                    624:
                    625: /* VM page size. */
                    626: static size_t          pagesize;
                    627: static size_t          pagesize_mask;
1.9       christos  628: static int             pagesize_2pow;
1.1       ad        629:
                    630: /* Various bin-related settings. */
                    631: static size_t          bin_maxclass; /* Max size class for bins. */
                    632: static unsigned                ntbins; /* Number of (2^n)-spaced tiny bins. */
                    633: static unsigned                nqbins; /* Number of quantum-spaced bins. */
                    634: static unsigned                nsbins; /* Number of (2^n)-spaced sub-page bins. */
                    635: static size_t          small_min;
                    636: static size_t          small_max;
                    637:
                    638: /* Various quantum-related settings. */
                    639: static size_t          quantum;
                    640: static size_t          quantum_mask; /* (quantum - 1). */
                    641:
                    642: /* Various chunk-related settings. */
                    643: static size_t          chunksize;
                    644: static size_t          chunksize_mask; /* (chunksize - 1). */
1.5       yamt      645: static int             chunksize_2pow;
1.1       ad        646: static unsigned                chunk_npages;
                    647: static unsigned                arena_chunk_header_npages;
                    648: static size_t          arena_maxclass; /* Max size class for arenas. */
                    649:
                    650: /********/
                    651: /*
                    652:  * Chunks.
                    653:  */
                    654:
                    655: /* Protects chunk-related data structures. */
                    656: static malloc_mutex_t  chunks_mtx;
                    657:
                    658: /* Tree of chunks that are stand-alone huge allocations. */
                    659: static chunk_tree_t    huge;
                    660:
                    661: #ifdef USE_BRK
                    662: /*
                    663:  * Try to use brk for chunk-size allocations, due to address space constraints.
                    664:  */
                    665: /*
                    666:  * Protects sbrk() calls.  This must be separate from chunks_mtx, since
                    667:  * base_pages_alloc() also uses sbrk(), but cannot lock chunks_mtx (doing so
                    668:  * could cause recursive lock acquisition).
                    669:  */
                    670: static malloc_mutex_t  brk_mtx;
                    671: /* Result of first sbrk(0) call. */
                    672: static void            *brk_base;
                    673: /* Current end of brk, or ((void *)-1) if brk is exhausted. */
                    674: static void            *brk_prev;
                    675: /* Current upper limit on brk addresses. */
                    676: static void            *brk_max;
                    677: #endif
                    678:
                    679: #ifdef MALLOC_STATS
                    680: /* Huge allocation statistics. */
                    681: static uint64_t                huge_nmalloc;
                    682: static uint64_t                huge_ndalloc;
1.8       yamt      683: static uint64_t                huge_nralloc;
1.1       ad        684: static size_t          huge_allocated;
                    685: #endif
                    686:
                    687: /*
                    688:  * Tree of chunks that were previously allocated.  This is used when allocating
                    689:  * chunks, in an attempt to re-use address space.
                    690:  */
                    691: static chunk_tree_t    old_chunks;
                    692:
                    693: /****************************/
                    694: /*
                    695:  * base (internal allocation).
                    696:  */
                    697:
                    698: /*
                    699:  * Current pages that are being used for internal memory allocations.  These
                    700:  * pages are carved up in cacheline-size quanta, so that there is no chance of
                    701:  * false cache line sharing.
                    702:  */
                    703: static void            *base_pages;
                    704: static void            *base_next_addr;
                    705: static void            *base_past_addr; /* Addr immediately past base_pages. */
                    706: static chunk_node_t    *base_chunk_nodes; /* LIFO cache of chunk nodes. */
                    707: static malloc_mutex_t  base_mtx;
                    708: #ifdef MALLOC_STATS
                    709: static size_t          base_mapped;
                    710: #endif
                    711:
                    712: /********/
                    713: /*
                    714:  * Arenas.
                    715:  */
                    716:
                    717: /*
                    718:  * Arenas that are used to service external requests.  Not all elements of the
                    719:  * arenas array are necessarily used; arenas are created lazily as needed.
                    720:  */
                    721: static arena_t         **arenas;
                    722: static unsigned                narenas;
                    723: static unsigned                next_arena;
                    724: static malloc_mutex_t  arenas_mtx; /* Protects arenas initialization. */
                    725:
1.2       ad        726: static thread_key_t arenas_map_key;
1.12    ! ad        727: #define        get_arenas_map()        ((unsigned)thr_getspecific(arenas_map_key))
        !           728: #define        set_arenas_map(x)       thr_setspecific(arenas_map_key, (void *)x)
1.1       ad        729:
                    730: #ifdef MALLOC_STATS
                    731: /* Chunk statistics. */
                    732: static chunk_stats_t   stats_chunks;
                    733: #endif
                    734:
                    735: /*******************************/
                    736: /*
                    737:  * Runtime configuration options.
                    738:  */
                    739: const char     *_malloc_options;
                    740:
                    741: #ifndef MALLOC_PRODUCTION
                    742: static bool    opt_abort = true;
                    743: static bool    opt_junk = true;
                    744: #else
                    745: static bool    opt_abort = false;
                    746: static bool    opt_junk = false;
                    747: #endif
                    748: static bool    opt_hint = false;
                    749: static bool    opt_print_stats = false;
1.9       christos  750: static int     opt_quantum_2pow = QUANTUM_2POW_MIN;
                    751: static int     opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
                    752: static int     opt_chunk_2pow = CHUNK_2POW_DEFAULT;
1.1       ad        753: static bool    opt_utrace = false;
                    754: static bool    opt_sysv = false;
                    755: static bool    opt_xmalloc = false;
                    756: static bool    opt_zero = false;
                    757: static int32_t opt_narenas_lshift = 0;
                    758:
                    759: typedef struct {
                    760:        void    *p;
                    761:        size_t  s;
                    762:        void    *r;
                    763: } malloc_utrace_t;
                    764:
                    765: #define        UTRACE(a, b, c)                                                 \
                    766:        if (opt_utrace) {                                               \
1.2       ad        767:                malloc_utrace_t ut;                                     \
                    768:                ut.p = a;                                               \
                    769:                ut.s = b;                                               \
                    770:                ut.r = c;                                               \
                    771:                xutrace(&ut, sizeof(ut));                               \
1.1       ad        772:        }
                    773:
                    774: /******************************************************************************/
                    775: /*
                    776:  * Begin function prototypes for non-inline static functions.
                    777:  */
                    778:
                    779: static void    wrtmessage(const char *p1, const char *p2, const char *p3,
                    780:                const char *p4);
                    781: #ifdef MALLOC_STATS
                    782: static void    malloc_printf(const char *format, ...);
                    783: #endif
                    784: static char    *umax2s(uintmax_t x, char *s);
                    785: static bool    base_pages_alloc(size_t minsize);
                    786: static void    *base_alloc(size_t size);
                    787: static chunk_node_t *base_chunk_node_alloc(void);
                    788: static void    base_chunk_node_dealloc(chunk_node_t *node);
                    789: #ifdef MALLOC_STATS
                    790: static void    stats_print(arena_t *arena);
                    791: #endif
                    792: static void    *pages_map(void *addr, size_t size);
1.5       yamt      793: static void    *pages_map_align(void *addr, size_t size, int align);
1.1       ad        794: static void    pages_unmap(void *addr, size_t size);
                    795: static void    *chunk_alloc(size_t size);
                    796: static void    chunk_dealloc(void *chunk, size_t size);
1.12    ! ad        797: static unsigned choose_arena_hard(void);
1.1       ad        798: static void    arena_run_split(arena_t *arena, arena_run_t *run, size_t size);
                    799: static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
                    800: static void    arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
                    801: static arena_run_t *arena_run_alloc(arena_t *arena, size_t size);
                    802: static void    arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size);
                    803: static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
                    804: static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
                    805: static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
                    806: static void    *arena_malloc(arena_t *arena, size_t size);
                    807: static void    *arena_palloc(arena_t *arena, size_t alignment, size_t size,
                    808:     size_t alloc_size);
                    809: static size_t  arena_salloc(const void *ptr);
                    810: static void    *arena_ralloc(void *ptr, size_t size, size_t oldsize);
                    811: static void    arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
                    812: static bool    arena_new(arena_t *arena);
                    813: static arena_t *arenas_extend(unsigned ind);
                    814: static void    *huge_malloc(size_t size);
                    815: static void    *huge_palloc(size_t alignment, size_t size);
                    816: static void    *huge_ralloc(void *ptr, size_t size, size_t oldsize);
                    817: static void    huge_dalloc(void *ptr);
                    818: static void    *imalloc(size_t size);
                    819: static void    *ipalloc(size_t alignment, size_t size);
                    820: static void    *icalloc(size_t size);
                    821: static size_t  isalloc(const void *ptr);
                    822: static void    *iralloc(void *ptr, size_t size);
                    823: static void    idalloc(void *ptr);
                    824: static void    malloc_print_stats(void);
                    825: static bool    malloc_init_hard(void);
                    826:
                    827: /*
                    828:  * End function prototypes.
                    829:  */
                    830: /******************************************************************************/
                    831: /*
                    832:  * Begin mutex.
                    833:  */
                    834:
1.2       ad        835: #define        malloc_mutex_init(m)    mutex_init(m, NULL)
                    836: #define        malloc_mutex_lock(m)    mutex_lock(m)
                    837: #define        malloc_mutex_unlock(m)  mutex_unlock(m)
1.1       ad        838:
                    839: /*
                    840:  * End mutex.
                    841:  */
                    842: /******************************************************************************/
                    843: /*
                    844:  * Begin Utility functions/macros.
                    845:  */
                    846:
                    847: /* Return the chunk address for allocation address a. */
                    848: #define        CHUNK_ADDR2BASE(a)                                              \
                    849:        ((void *)((uintptr_t)(a) & ~chunksize_mask))
                    850:
                    851: /* Return the chunk offset of address a. */
                    852: #define        CHUNK_ADDR2OFFSET(a)                                            \
                    853:        ((size_t)((uintptr_t)(a) & chunksize_mask))
                    854:
                    855: /* Return the smallest chunk multiple that is >= s. */
                    856: #define        CHUNK_CEILING(s)                                                \
                    857:        (((s) + chunksize_mask) & ~chunksize_mask)
                    858:
                    859: /* Return the smallest cacheline multiple that is >= s. */
                    860: #define        CACHELINE_CEILING(s)                                            \
                    861:        (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
                    862:
                    863: /* Return the smallest quantum multiple that is >= a. */
                    864: #define        QUANTUM_CEILING(a)                                              \
                    865:        (((a) + quantum_mask) & ~quantum_mask)
                    866:
                    867: /* Return the smallest pagesize multiple that is >= s. */
                    868: #define        PAGE_CEILING(s)                                                 \
                    869:        (((s) + pagesize_mask) & ~pagesize_mask)
                    870:
                    871: /* Compute the smallest power of 2 that is >= x. */
                    872: static inline size_t
                    873: pow2_ceil(size_t x)
                    874: {
                    875:
                    876:        x--;
                    877:        x |= x >> 1;
                    878:        x |= x >> 2;
                    879:        x |= x >> 4;
                    880:        x |= x >> 8;
                    881:        x |= x >> 16;
                    882: #if (SIZEOF_PTR == 8)
                    883:        x |= x >> 32;
                    884: #endif
                    885:        x++;
                    886:        return (x);
                    887: }
                    888:
                    889: static void
                    890: wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
                    891: {
                    892:
                    893:        _write(STDERR_FILENO, p1, strlen(p1));
                    894:        _write(STDERR_FILENO, p2, strlen(p2));
                    895:        _write(STDERR_FILENO, p3, strlen(p3));
                    896:        _write(STDERR_FILENO, p4, strlen(p4));
                    897: }
                    898:
                    899: void   (*_malloc_message)(const char *p1, const char *p2, const char *p3,
                    900:            const char *p4) = wrtmessage;
                    901:
                    902: #ifdef MALLOC_STATS
                    903: /*
                    904:  * Print to stderr in such a way as to (hopefully) avoid memory allocation.
                    905:  */
                    906: static void
                    907: malloc_printf(const char *format, ...)
                    908: {
                    909:        char buf[4096];
                    910:        va_list ap;
                    911:
                    912:        va_start(ap, format);
                    913:        vsnprintf(buf, sizeof(buf), format, ap);
                    914:        va_end(ap);
                    915:        _malloc_message(buf, "", "", "");
                    916: }
                    917: #endif
                    918:
                    919: /*
                    920:  * We don't want to depend on vsnprintf() for production builds, since that can
                    921:  * cause unnecessary bloat for static binaries.  umax2s() provides minimal
                    922:  * integer printing functionality, so that malloc_printf() use can be limited to
                    923:  * MALLOC_STATS code.
                    924:  */
                    925: #define UMAX2S_BUFSIZE 21
                    926: static char *
                    927: umax2s(uintmax_t x, char *s)
                    928: {
                    929:        unsigned i;
                    930:
                    931:        /* Make sure UMAX2S_BUFSIZE is large enough. */
1.7       yamt      932:        /* LINTED */
1.1       ad        933:        assert(sizeof(uintmax_t) <= 8);
                    934:
                    935:        i = UMAX2S_BUFSIZE - 1;
                    936:        s[i] = '\0';
                    937:        do {
                    938:                i--;
1.2       ad        939:                s[i] = "0123456789"[(int)x % 10];
                    940:                x /= (uintmax_t)10LL;
1.1       ad        941:        } while (x > 0);
                    942:
                    943:        return (&s[i]);
                    944: }
                    945:
                    946: /******************************************************************************/
                    947:
                    948: static bool
                    949: base_pages_alloc(size_t minsize)
                    950: {
1.2       ad        951:        size_t csize = 0;
1.1       ad        952:
                    953: #ifdef USE_BRK
                    954:        /*
                    955:         * Do special brk allocation here, since base allocations don't need to
                    956:         * be chunk-aligned.
                    957:         */
                    958:        if (brk_prev != (void *)-1) {
                    959:                void *brk_cur;
                    960:                intptr_t incr;
                    961:
                    962:                if (minsize != 0)
                    963:                        csize = CHUNK_CEILING(minsize);
                    964:
                    965:                malloc_mutex_lock(&brk_mtx);
                    966:                do {
                    967:                        /* Get the current end of brk. */
                    968:                        brk_cur = sbrk(0);
                    969:
                    970:                        /*
                    971:                         * Calculate how much padding is necessary to
                    972:                         * chunk-align the end of brk.  Don't worry about
                    973:                         * brk_cur not being chunk-aligned though.
                    974:                         */
                    975:                        incr = (intptr_t)chunksize
                    976:                            - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur);
                    977:                        if (incr < minsize)
                    978:                                incr += csize;
                    979:
                    980:                        brk_prev = sbrk(incr);
                    981:                        if (brk_prev == brk_cur) {
                    982:                                /* Success. */
                    983:                                malloc_mutex_unlock(&brk_mtx);
                    984:                                base_pages = brk_cur;
                    985:                                base_next_addr = base_pages;
                    986:                                base_past_addr = (void *)((uintptr_t)base_pages
                    987:                                    + incr);
                    988: #ifdef MALLOC_STATS
                    989:                                base_mapped += incr;
                    990: #endif
                    991:                                return (false);
                    992:                        }
                    993:                } while (brk_prev != (void *)-1);
                    994:                malloc_mutex_unlock(&brk_mtx);
                    995:        }
                    996:        if (minsize == 0) {
                    997:                /*
                    998:                 * Failure during initialization doesn't matter, so avoid
                    999:                 * falling through to the mmap-based page mapping code.
                   1000:                 */
                   1001:                return (true);
                   1002:        }
                   1003: #endif
                   1004:        assert(minsize != 0);
                   1005:        csize = PAGE_CEILING(minsize);
                   1006:        base_pages = pages_map(NULL, csize);
                   1007:        if (base_pages == NULL)
                   1008:                return (true);
                   1009:        base_next_addr = base_pages;
                   1010:        base_past_addr = (void *)((uintptr_t)base_pages + csize);
                   1011: #ifdef MALLOC_STATS
                   1012:        base_mapped += csize;
                   1013: #endif
                   1014:        return (false);
                   1015: }
                   1016:
                   1017: static void *
                   1018: base_alloc(size_t size)
                   1019: {
                   1020:        void *ret;
                   1021:        size_t csize;
                   1022:
                   1023:        /* Round size up to nearest multiple of the cacheline size. */
                   1024:        csize = CACHELINE_CEILING(size);
                   1025:
                   1026:        malloc_mutex_lock(&base_mtx);
                   1027:
                   1028:        /* Make sure there's enough space for the allocation. */
                   1029:        if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
                   1030:                if (base_pages_alloc(csize)) {
                   1031:                        ret = NULL;
                   1032:                        goto RETURN;
                   1033:                }
                   1034:        }
                   1035:
                   1036:        /* Allocate. */
                   1037:        ret = base_next_addr;
                   1038:        base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
                   1039:
                   1040: RETURN:
                   1041:        malloc_mutex_unlock(&base_mtx);
                   1042:        return (ret);
                   1043: }
                   1044:
                   1045: static chunk_node_t *
                   1046: base_chunk_node_alloc(void)
                   1047: {
                   1048:        chunk_node_t *ret;
                   1049:
                   1050:        malloc_mutex_lock(&base_mtx);
                   1051:        if (base_chunk_nodes != NULL) {
                   1052:                ret = base_chunk_nodes;
1.2       ad       1053:                /* LINTED */
1.1       ad       1054:                base_chunk_nodes = *(chunk_node_t **)ret;
                   1055:                malloc_mutex_unlock(&base_mtx);
                   1056:        } else {
                   1057:                malloc_mutex_unlock(&base_mtx);
                   1058:                ret = (chunk_node_t *)base_alloc(sizeof(chunk_node_t));
                   1059:        }
                   1060:
                   1061:        return (ret);
                   1062: }
                   1063:
                   1064: static void
                   1065: base_chunk_node_dealloc(chunk_node_t *node)
                   1066: {
                   1067:
                   1068:        malloc_mutex_lock(&base_mtx);
1.2       ad       1069:        /* LINTED */
1.1       ad       1070:        *(chunk_node_t **)node = base_chunk_nodes;
                   1071:        base_chunk_nodes = node;
                   1072:        malloc_mutex_unlock(&base_mtx);
                   1073: }
                   1074:
                   1075: /******************************************************************************/
                   1076:
                   1077: #ifdef MALLOC_STATS
                   1078: static void
                   1079: stats_print(arena_t *arena)
                   1080: {
                   1081:        unsigned i;
                   1082:        int gap_start;
                   1083:
                   1084:        malloc_printf(
                   1085:            "          allocated/mapped            nmalloc      ndalloc\n");
1.2       ad       1086:
                   1087:        malloc_printf("small: %12zu %-12s %12llu %12llu\n",
1.1       ad       1088:            arena->stats.allocated_small, "", arena->stats.nmalloc_small,
                   1089:            arena->stats.ndalloc_small);
1.2       ad       1090:        malloc_printf("large: %12zu %-12s %12llu %12llu\n",
1.1       ad       1091:            arena->stats.allocated_large, "", arena->stats.nmalloc_large,
                   1092:            arena->stats.ndalloc_large);
1.2       ad       1093:        malloc_printf("total: %12zu/%-12zu %12llu %12llu\n",
1.1       ad       1094:            arena->stats.allocated_small + arena->stats.allocated_large,
                   1095:            arena->stats.mapped,
                   1096:            arena->stats.nmalloc_small + arena->stats.nmalloc_large,
                   1097:            arena->stats.ndalloc_small + arena->stats.ndalloc_large);
                   1098:
                   1099:        malloc_printf("bins:     bin   size regs pgs  requests   newruns"
                   1100:            "    reruns maxruns curruns\n");
                   1101:        for (i = 0, gap_start = -1; i < ntbins + nqbins + nsbins; i++) {
                   1102:                if (arena->bins[i].stats.nrequests == 0) {
                   1103:                        if (gap_start == -1)
                   1104:                                gap_start = i;
                   1105:                } else {
                   1106:                        if (gap_start != -1) {
                   1107:                                if (i > gap_start + 1) {
                   1108:                                        /* Gap of more than one size class. */
                   1109:                                        malloc_printf("[%u..%u]\n",
                   1110:                                            gap_start, i - 1);
                   1111:                                } else {
                   1112:                                        /* Gap of one size class. */
                   1113:                                        malloc_printf("[%u]\n", gap_start);
                   1114:                                }
                   1115:                                gap_start = -1;
                   1116:                        }
                   1117:                        malloc_printf(
                   1118:                            "%13u %1s %4u %4u %3u %9llu %9llu"
                   1119:                            " %9llu %7lu %7lu\n",
                   1120:                            i,
                   1121:                            i < ntbins ? "T" : i < ntbins + nqbins ? "Q" : "S",
                   1122:                            arena->bins[i].reg_size,
                   1123:                            arena->bins[i].nregs,
                   1124:                            arena->bins[i].run_size >> pagesize_2pow,
                   1125:                            arena->bins[i].stats.nrequests,
                   1126:                            arena->bins[i].stats.nruns,
                   1127:                            arena->bins[i].stats.reruns,
                   1128:                            arena->bins[i].stats.highruns,
                   1129:                            arena->bins[i].stats.curruns);
                   1130:                }
                   1131:        }
                   1132:        if (gap_start != -1) {
                   1133:                if (i > gap_start + 1) {
                   1134:                        /* Gap of more than one size class. */
                   1135:                        malloc_printf("[%u..%u]\n", gap_start, i - 1);
                   1136:                } else {
                   1137:                        /* Gap of one size class. */
                   1138:                        malloc_printf("[%u]\n", gap_start);
                   1139:                }
                   1140:        }
                   1141: }
                   1142: #endif
                   1143:
                   1144: /*
                   1145:  * End Utility functions/macros.
                   1146:  */
                   1147: /******************************************************************************/
                   1148: /*
                   1149:  * Begin chunk management functions.
                   1150:  */
                   1151:
1.7       yamt     1152: #ifndef lint
1.1       ad       1153: static inline int
                   1154: chunk_comp(chunk_node_t *a, chunk_node_t *b)
                   1155: {
                   1156:
                   1157:        assert(a != NULL);
                   1158:        assert(b != NULL);
                   1159:
                   1160:        if ((uintptr_t)a->chunk < (uintptr_t)b->chunk)
                   1161:                return (-1);
                   1162:        else if (a->chunk == b->chunk)
                   1163:                return (0);
                   1164:        else
                   1165:                return (1);
                   1166: }
                   1167:
                   1168: /* Generate red-black tree code for chunks. */
                   1169: RB_GENERATE_STATIC(chunk_tree_s, chunk_node_s, link, chunk_comp);
1.2       ad       1170: #endif
1.1       ad       1171:
                   1172: static void *
1.5       yamt     1173: pages_map_align(void *addr, size_t size, int align)
1.1       ad       1174: {
                   1175:        void *ret;
                   1176:
                   1177:        /*
                   1178:         * We don't use MAP_FIXED here, because it can cause the *replacement*
                   1179:         * of existing mappings, and we only want to create new mappings.
                   1180:         */
1.5       yamt     1181:        ret = mmap(addr, size, PROT_READ | PROT_WRITE,
                   1182:            MAP_PRIVATE | MAP_ANON | MAP_ALIGNED(align), -1, 0);
1.1       ad       1183:        assert(ret != NULL);
                   1184:
                   1185:        if (ret == MAP_FAILED)
                   1186:                ret = NULL;
                   1187:        else if (addr != NULL && ret != addr) {
                   1188:                /*
                   1189:                 * We succeeded in mapping memory, but not in the right place.
                   1190:                 */
                   1191:                if (munmap(ret, size) == -1) {
                   1192:                        char buf[STRERROR_BUF];
                   1193:
                   1194:                        strerror_r(errno, buf, sizeof(buf));
                   1195:                        _malloc_message(_getprogname(),
                   1196:                            ": (malloc) Error in munmap(): ", buf, "\n");
                   1197:                        if (opt_abort)
                   1198:                                abort();
                   1199:                }
                   1200:                ret = NULL;
                   1201:        }
                   1202:
                   1203:        assert(ret == NULL || (addr == NULL && ret != addr)
                   1204:            || (addr != NULL && ret == addr));
                   1205:        return (ret);
                   1206: }
                   1207:
1.5       yamt     1208: static void *
                   1209: pages_map(void *addr, size_t size)
                   1210: {
                   1211:
                   1212:        return pages_map_align(addr, size, 0);
                   1213: }
                   1214:
1.1       ad       1215: static void
                   1216: pages_unmap(void *addr, size_t size)
                   1217: {
                   1218:
                   1219:        if (munmap(addr, size) == -1) {
                   1220:                char buf[STRERROR_BUF];
                   1221:
                   1222:                strerror_r(errno, buf, sizeof(buf));
                   1223:                _malloc_message(_getprogname(),
                   1224:                    ": (malloc) Error in munmap(): ", buf, "\n");
                   1225:                if (opt_abort)
                   1226:                        abort();
                   1227:        }
                   1228: }
                   1229:
                   1230: static void *
                   1231: chunk_alloc(size_t size)
                   1232: {
                   1233:        void *ret, *chunk;
                   1234:        chunk_node_t *tchunk, *delchunk;
                   1235:
                   1236:        assert(size != 0);
                   1237:        assert((size & chunksize_mask) == 0);
                   1238:
                   1239:        malloc_mutex_lock(&chunks_mtx);
                   1240:
                   1241:        if (size == chunksize) {
                   1242:                /*
                   1243:                 * Check for address ranges that were previously chunks and try
                   1244:                 * to use them.
                   1245:                 */
                   1246:
1.2       ad       1247:                /* LINTED */
1.1       ad       1248:                tchunk = RB_MIN(chunk_tree_s, &old_chunks);
                   1249:                while (tchunk != NULL) {
                   1250:                        /* Found an address range.  Try to recycle it. */
                   1251:
                   1252:                        chunk = tchunk->chunk;
                   1253:                        delchunk = tchunk;
1.2       ad       1254:                        /* LINTED */
1.1       ad       1255:                        tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk);
                   1256:
                   1257:                        /* Remove delchunk from the tree. */
1.2       ad       1258:                        /* LINTED */
1.1       ad       1259:                        RB_REMOVE(chunk_tree_s, &old_chunks, delchunk);
                   1260:                        base_chunk_node_dealloc(delchunk);
                   1261:
                   1262: #ifdef USE_BRK
                   1263:                        if ((uintptr_t)chunk >= (uintptr_t)brk_base
                   1264:                            && (uintptr_t)chunk < (uintptr_t)brk_max) {
                   1265:                                /* Re-use a previously freed brk chunk. */
                   1266:                                ret = chunk;
                   1267:                                goto RETURN;
                   1268:                        }
                   1269: #endif
                   1270:                        if ((ret = pages_map(chunk, size)) != NULL) {
                   1271:                                /* Success. */
                   1272:                                goto RETURN;
                   1273:                        }
                   1274:                }
                   1275:        }
                   1276:
                   1277:        /*
                   1278:         * Try to over-allocate, but allow the OS to place the allocation
                   1279:         * anywhere.  Beware of size_t wrap-around.
                   1280:         */
                   1281:        if (size + chunksize > size) {
1.5       yamt     1282:                if ((ret = pages_map_align(NULL, size, chunksize_2pow))
                   1283:                    != NULL) {
1.1       ad       1284:                        goto RETURN;
                   1285:                }
                   1286:        }
                   1287:
                   1288: #ifdef USE_BRK
                   1289:        /*
                   1290:         * Try to create allocations in brk, in order to make full use of
                   1291:         * limited address space.
                   1292:         */
                   1293:        if (brk_prev != (void *)-1) {
                   1294:                void *brk_cur;
                   1295:                intptr_t incr;
                   1296:
                   1297:                /*
                   1298:                 * The loop is necessary to recover from races with other
                   1299:                 * threads that are using brk for something other than malloc.
                   1300:                 */
                   1301:                malloc_mutex_lock(&brk_mtx);
                   1302:                do {
                   1303:                        /* Get the current end of brk. */
                   1304:                        brk_cur = sbrk(0);
                   1305:
                   1306:                        /*
                   1307:                         * Calculate how much padding is necessary to
                   1308:                         * chunk-align the end of brk.
                   1309:                         */
                   1310:                        incr = (intptr_t)size
                   1311:                            - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur);
                   1312:                        if (incr == size) {
                   1313:                                ret = brk_cur;
                   1314:                        } else {
                   1315:                                ret = (void *)((intptr_t)brk_cur + incr);
                   1316:                                incr += size;
                   1317:                        }
                   1318:
                   1319:                        brk_prev = sbrk(incr);
                   1320:                        if (brk_prev == brk_cur) {
                   1321:                                /* Success. */
                   1322:                                malloc_mutex_unlock(&brk_mtx);
                   1323:                                brk_max = (void *)((intptr_t)ret + size);
                   1324:                                goto RETURN;
                   1325:                        }
                   1326:                } while (brk_prev != (void *)-1);
                   1327:                malloc_mutex_unlock(&brk_mtx);
                   1328:        }
                   1329: #endif
                   1330:
                   1331:        /* All strategies for allocation failed. */
                   1332:        ret = NULL;
                   1333: RETURN:
                   1334:        if (ret != NULL) {
                   1335:                chunk_node_t key;
                   1336:                /*
                   1337:                 * Clean out any entries in old_chunks that overlap with the
                   1338:                 * memory we just allocated.
                   1339:                 */
                   1340:                key.chunk = ret;
1.2       ad       1341:                /* LINTED */
1.1       ad       1342:                tchunk = RB_NFIND(chunk_tree_s, &old_chunks, &key);
                   1343:                while (tchunk != NULL
                   1344:                    && (uintptr_t)tchunk->chunk >= (uintptr_t)ret
                   1345:                    && (uintptr_t)tchunk->chunk < (uintptr_t)ret + size) {
                   1346:                        delchunk = tchunk;
1.2       ad       1347:                        /* LINTED */
1.1       ad       1348:                        tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk);
1.2       ad       1349:                        /* LINTED */
1.1       ad       1350:                        RB_REMOVE(chunk_tree_s, &old_chunks, delchunk);
                   1351:                        base_chunk_node_dealloc(delchunk);
                   1352:                }
                   1353:
                   1354:        }
                   1355: #ifdef MALLOC_STATS
                   1356:        if (ret != NULL) {
                   1357:                stats_chunks.nchunks += (size / chunksize);
                   1358:                stats_chunks.curchunks += (size / chunksize);
                   1359:        }
                   1360:        if (stats_chunks.curchunks > stats_chunks.highchunks)
                   1361:                stats_chunks.highchunks = stats_chunks.curchunks;
                   1362: #endif
                   1363:        malloc_mutex_unlock(&chunks_mtx);
                   1364:
                   1365:        assert(CHUNK_ADDR2BASE(ret) == ret);
                   1366:        return (ret);
                   1367: }
                   1368:
                   1369: static void
                   1370: chunk_dealloc(void *chunk, size_t size)
                   1371: {
                   1372:        chunk_node_t *node;
                   1373:
                   1374:        assert(chunk != NULL);
                   1375:        assert(CHUNK_ADDR2BASE(chunk) == chunk);
                   1376:        assert(size != 0);
                   1377:        assert((size & chunksize_mask) == 0);
                   1378:
                   1379:        malloc_mutex_lock(&chunks_mtx);
                   1380:
                   1381: #ifdef USE_BRK
                   1382:        if ((uintptr_t)chunk >= (uintptr_t)brk_base
                   1383:            && (uintptr_t)chunk < (uintptr_t)brk_max) {
                   1384:                void *brk_cur;
                   1385:
                   1386:                malloc_mutex_lock(&brk_mtx);
                   1387:                /* Get the current end of brk. */
                   1388:                brk_cur = sbrk(0);
                   1389:
                   1390:                /*
                   1391:                 * Try to shrink the data segment if this chunk is at the end
                   1392:                 * of the data segment.  The sbrk() call here is subject to a
                   1393:                 * race condition with threads that use brk(2) or sbrk(2)
                   1394:                 * directly, but the alternative would be to leak memory for
                   1395:                 * the sake of poorly designed multi-threaded programs.
                   1396:                 */
                   1397:                if (brk_cur == brk_max
                   1398:                    && (void *)((uintptr_t)chunk + size) == brk_max
                   1399:                    && sbrk(-(intptr_t)size) == brk_max) {
                   1400:                        malloc_mutex_unlock(&brk_mtx);
                   1401:                        if (brk_prev == brk_max) {
                   1402:                                /* Success. */
                   1403:                                brk_prev = (void *)((intptr_t)brk_max
                   1404:                                    - (intptr_t)size);
                   1405:                                brk_max = brk_prev;
                   1406:                        }
                   1407:                } else {
                   1408:                        size_t offset;
                   1409:
                   1410:                        malloc_mutex_unlock(&brk_mtx);
                   1411:                        madvise(chunk, size, MADV_FREE);
                   1412:
                   1413:                        /*
                   1414:                         * Iteratively create records of each chunk-sized
                   1415:                         * memory region that 'chunk' is comprised of, so that
                   1416:                         * the address range can be recycled if memory usage
                   1417:                         * increases later on.
                   1418:                         */
                   1419:                        for (offset = 0; offset < size; offset += chunksize) {
                   1420:                                node = base_chunk_node_alloc();
                   1421:                                if (node == NULL)
                   1422:                                        break;
                   1423:
                   1424:                                node->chunk = (void *)((uintptr_t)chunk
                   1425:                                    + (uintptr_t)offset);
                   1426:                                node->size = chunksize;
1.2       ad       1427:                                /* LINTED */
1.1       ad       1428:                                RB_INSERT(chunk_tree_s, &old_chunks, node);
                   1429:                        }
                   1430:                }
                   1431:        } else {
                   1432: #endif
                   1433:                pages_unmap(chunk, size);
                   1434:
                   1435:                /*
                   1436:                 * Make a record of the chunk's address, so that the address
                   1437:                 * range can be recycled if memory usage increases later on.
                   1438:                 * Don't bother to create entries if (size > chunksize), since
                   1439:                 * doing so could cause scalability issues for truly gargantuan
                   1440:                 * objects (many gigabytes or larger).
                   1441:                 */
                   1442:                if (size == chunksize) {
                   1443:                        node = base_chunk_node_alloc();
                   1444:                        if (node != NULL) {
                   1445:                                node->chunk = (void *)(uintptr_t)chunk;
                   1446:                                node->size = chunksize;
1.2       ad       1447:                                /* LINTED */
1.1       ad       1448:                                RB_INSERT(chunk_tree_s, &old_chunks, node);
                   1449:                        }
                   1450:                }
                   1451: #ifdef USE_BRK
                   1452:        }
                   1453: #endif
                   1454:
                   1455: #ifdef MALLOC_STATS
                   1456:        stats_chunks.curchunks -= (size / chunksize);
                   1457: #endif
                   1458:        malloc_mutex_unlock(&chunks_mtx);
                   1459: }
                   1460:
                   1461: /*
                   1462:  * End chunk management functions.
                   1463:  */
                   1464: /******************************************************************************/
                   1465: /*
                   1466:  * Begin arena.
                   1467:  */
                   1468:
                   1469: /*
1.12    ! ad       1470:  * Choose an arena based on a per-thread and (optimistically) per-CPU value.
1.1       ad       1471:  */
                   1472: static inline arena_t *
                   1473: choose_arena(void)
                   1474: {
1.12    ! ad       1475:        unsigned ret;
1.1       ad       1476:
1.12    ! ad       1477:        ret = get_arenas_map() + thr_curcpu();
        !          1478:        if (__predict_true(arenas[ret] != NULL))
        !          1479:                return arenas[ret];
        !          1480:
        !          1481:        ret = choose_arena_hard();
        !          1482:        assert(arenas[ret] != NULL);
        !          1483:        return (arenas[ret]);
1.1       ad       1484: }
                   1485:
                   1486: /*
                   1487:  * Choose an arena based on a per-thread value (slow-path code only, called
                   1488:  * only by choose_arena()).
                   1489:  */
1.12    ! ad       1490: static unsigned
1.1       ad       1491: choose_arena_hard(void)
                   1492: {
1.12    ! ad       1493:        unsigned ret, i, curcpu;
1.1       ad       1494:
                   1495:        assert(__isthreaded);
                   1496:
1.12    ! ad       1497:        /* Assign a block of arenas to this thread, in a round-robin fashion. */
1.1       ad       1498:        malloc_mutex_lock(&arenas_mtx);
1.12    ! ad       1499:        ret = next_arena;
        !          1500:        for (i = 0; i < ncpus; i++) {
        !          1501:                if (arenas[next_arena] == NULL)
        !          1502:                        arenas_extend(next_arena);
        !          1503:                next_arena = (next_arena + 1) % narenas;
        !          1504:        }
        !          1505:        if (arenas[ret] == NULL) {
        !          1506:                /*
        !          1507:                 * Make sure that this function never returns NULL,
        !          1508:                 * so that choose_arena() doesn't have to check for
        !          1509:                 * a NULL return value.
1.1       ad       1510:                 */
1.12    ! ad       1511:                ret = 0;
        !          1512:         }
1.1       ad       1513:        malloc_mutex_unlock(&arenas_mtx);
1.12    ! ad       1514:
1.2       ad       1515:        set_arenas_map(ret);
1.12    ! ad       1516:        curcpu = thr_curcpu();
        !          1517:        if (arenas[ret + curcpu] != NULL)
        !          1518:                ret += curcpu;
1.1       ad       1519:
                   1520:        return (ret);
                   1521: }
                   1522:
1.7       yamt     1523: #ifndef lint
1.1       ad       1524: static inline int
                   1525: arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
                   1526: {
                   1527:
                   1528:        assert(a != NULL);
                   1529:        assert(b != NULL);
                   1530:
                   1531:        if ((uintptr_t)a < (uintptr_t)b)
                   1532:                return (-1);
                   1533:        else if (a == b)
                   1534:                return (0);
                   1535:        else
                   1536:                return (1);
                   1537: }
                   1538:
                   1539: /* Generate red-black tree code for arena chunks. */
                   1540: RB_GENERATE_STATIC(arena_chunk_tree_s, arena_chunk_s, link, arena_chunk_comp);
1.2       ad       1541: #endif
1.1       ad       1542:
1.7       yamt     1543: #ifndef lint
1.1       ad       1544: static inline int
                   1545: arena_run_comp(arena_run_t *a, arena_run_t *b)
                   1546: {
                   1547:
                   1548:        assert(a != NULL);
                   1549:        assert(b != NULL);
                   1550:
                   1551:        if ((uintptr_t)a < (uintptr_t)b)
                   1552:                return (-1);
                   1553:        else if (a == b)
                   1554:                return (0);
                   1555:        else
                   1556:                return (1);
                   1557: }
                   1558:
                   1559: /* Generate red-black tree code for arena runs. */
                   1560: RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp);
1.2       ad       1561: #endif
1.1       ad       1562:
                   1563: static inline void *
                   1564: arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
                   1565: {
                   1566:        void *ret;
                   1567:        unsigned i, mask, bit, regind;
                   1568:
                   1569:        assert(run->magic == ARENA_RUN_MAGIC);
                   1570:        assert(run->regs_minelm < bin->regs_mask_nelms);
                   1571:
                   1572:        /*
                   1573:         * Move the first check outside the loop, so that run->regs_minelm can
                   1574:         * be updated unconditionally, without the possibility of updating it
                   1575:         * multiple times.
                   1576:         */
                   1577:        i = run->regs_minelm;
                   1578:        mask = run->regs_mask[i];
                   1579:        if (mask != 0) {
                   1580:                /* Usable allocation found. */
                   1581:                bit = ffs((int)mask) - 1;
                   1582:
                   1583:                regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
                   1584:                ret = (void *)(((uintptr_t)run) + bin->reg0_offset
                   1585:                    + (bin->reg_size * regind));
                   1586:
                   1587:                /* Clear bit. */
                   1588:                mask ^= (1 << bit);
                   1589:                run->regs_mask[i] = mask;
                   1590:
                   1591:                return (ret);
                   1592:        }
                   1593:
                   1594:        for (i++; i < bin->regs_mask_nelms; i++) {
                   1595:                mask = run->regs_mask[i];
                   1596:                if (mask != 0) {
                   1597:                        /* Usable allocation found. */
                   1598:                        bit = ffs((int)mask) - 1;
                   1599:
                   1600:                        regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
                   1601:                        ret = (void *)(((uintptr_t)run) + bin->reg0_offset
                   1602:                            + (bin->reg_size * regind));
                   1603:
                   1604:                        /* Clear bit. */
                   1605:                        mask ^= (1 << bit);
                   1606:                        run->regs_mask[i] = mask;
                   1607:
                   1608:                        /*
                   1609:                         * Make a note that nothing before this element
                   1610:                         * contains a free region.
                   1611:                         */
                   1612:                        run->regs_minelm = i; /* Low payoff: + (mask == 0); */
                   1613:
                   1614:                        return (ret);
                   1615:                }
                   1616:        }
                   1617:        /* Not reached. */
1.7       yamt     1618:        /* LINTED */
1.1       ad       1619:        assert(0);
                   1620:        return (NULL);
                   1621: }
                   1622:
                   1623: static inline void
                   1624: arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
                   1625: {
                   1626:        /*
                   1627:         * To divide by a number D that is not a power of two we multiply
                   1628:         * by (2^21 / D) and then right shift by 21 positions.
                   1629:         *
                   1630:         *   X / D
                   1631:         *
                   1632:         * becomes
                   1633:         *
                   1634:         *   (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
                   1635:         */
                   1636: #define SIZE_INV_SHIFT 21
                   1637: #define SIZE_INV(s) (((1 << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
                   1638:        static const unsigned size_invs[] = {
                   1639:            SIZE_INV(3),
                   1640:            SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
                   1641:            SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
                   1642:            SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
                   1643:            SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
                   1644:            SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
                   1645:            SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
                   1646:            SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
                   1647: #if (QUANTUM_2POW_MIN < 4)
                   1648:            ,
                   1649:            SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
                   1650:            SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
                   1651:            SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
                   1652:            SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
                   1653:            SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
                   1654:            SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
                   1655:            SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
                   1656:            SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
                   1657: #endif
                   1658:        };
                   1659:        unsigned diff, regind, elm, bit;
                   1660:
1.7       yamt     1661:        /* LINTED */
1.1       ad       1662:        assert(run->magic == ARENA_RUN_MAGIC);
                   1663:        assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3
                   1664:            >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
                   1665:
                   1666:        /*
                   1667:         * Avoid doing division with a variable divisor if possible.  Using
                   1668:         * actual division here can reduce allocator throughput by over 20%!
                   1669:         */
                   1670:        diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset);
                   1671:        if ((size & (size - 1)) == 0) {
                   1672:                /*
                   1673:                 * log2_table allows fast division of a power of two in the
                   1674:                 * [1..128] range.
                   1675:                 *
                   1676:                 * (x / divisor) becomes (x >> log2_table[divisor - 1]).
                   1677:                 */
                   1678:                static const unsigned char log2_table[] = {
                   1679:                    0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4,
                   1680:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
                   1681:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                   1682:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
                   1683:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                   1684:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                   1685:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                   1686:                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
                   1687:                };
                   1688:
                   1689:                if (size <= 128)
                   1690:                        regind = (diff >> log2_table[size - 1]);
                   1691:                else if (size <= 32768)
                   1692:                        regind = diff >> (8 + log2_table[(size >> 8) - 1]);
                   1693:                else {
                   1694:                        /*
                   1695:                         * The page size is too large for us to use the lookup
                   1696:                         * table.  Use real division.
                   1697:                         */
1.9       christos 1698:                        regind = (unsigned)(diff / size);
1.1       ad       1699:                }
                   1700:        } else if (size <= ((sizeof(size_invs) / sizeof(unsigned))
                   1701:            << QUANTUM_2POW_MIN) + 2) {
                   1702:                regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
                   1703:                regind >>= SIZE_INV_SHIFT;
                   1704:        } else {
                   1705:                /*
                   1706:                 * size_invs isn't large enough to handle this size class, so
                   1707:                 * calculate regind using actual division.  This only happens
                   1708:                 * if the user increases small_max via the 'S' runtime
                   1709:                 * configuration option.
                   1710:                 */
1.9       christos 1711:                regind = (unsigned)(diff / size);
1.1       ad       1712:        };
                   1713:        assert(diff == regind * size);
                   1714:        assert(regind < bin->nregs);
                   1715:
                   1716:        elm = regind >> (SIZEOF_INT_2POW + 3);
                   1717:        if (elm < run->regs_minelm)
                   1718:                run->regs_minelm = elm;
                   1719:        bit = regind - (elm << (SIZEOF_INT_2POW + 3));
                   1720:        assert((run->regs_mask[elm] & (1 << bit)) == 0);
                   1721:        run->regs_mask[elm] |= (1 << bit);
                   1722: #undef SIZE_INV
                   1723: #undef SIZE_INV_SHIFT
                   1724: }
                   1725:
                   1726: static void
                   1727: arena_run_split(arena_t *arena, arena_run_t *run, size_t size)
                   1728: {
                   1729:        arena_chunk_t *chunk;
                   1730:        unsigned run_ind, map_offset, total_pages, need_pages, rem_pages;
                   1731:        unsigned i;
                   1732:
                   1733:        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
                   1734:        run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
                   1735:            >> pagesize_2pow);
                   1736:        total_pages = chunk->map[run_ind].npages;
1.9       christos 1737:        need_pages = (unsigned)(size >> pagesize_2pow);
1.1       ad       1738:        assert(need_pages <= total_pages);
                   1739:        rem_pages = total_pages - need_pages;
                   1740:
                   1741:        /* Split enough pages from the front of run to fit allocation size. */
                   1742:        map_offset = run_ind;
                   1743:        for (i = 0; i < need_pages; i++) {
                   1744:                chunk->map[map_offset + i].npages = need_pages;
                   1745:                chunk->map[map_offset + i].pos = i;
                   1746:        }
                   1747:
                   1748:        /* Keep track of trailing unused pages for later use. */
                   1749:        if (rem_pages > 0) {
                   1750:                /* Update map for trailing pages. */
                   1751:                map_offset += need_pages;
                   1752:                chunk->map[map_offset].npages = rem_pages;
                   1753:                chunk->map[map_offset].pos = POS_FREE;
                   1754:                chunk->map[map_offset + rem_pages - 1].npages = rem_pages;
                   1755:                chunk->map[map_offset + rem_pages - 1].pos = POS_FREE;
                   1756:        }
                   1757:
                   1758:        chunk->pages_used += need_pages;
                   1759: }
                   1760:
                   1761: static arena_chunk_t *
                   1762: arena_chunk_alloc(arena_t *arena)
                   1763: {
                   1764:        arena_chunk_t *chunk;
                   1765:
                   1766:        if (arena->spare != NULL) {
                   1767:                chunk = arena->spare;
                   1768:                arena->spare = NULL;
                   1769:
1.2       ad       1770:                /* LINTED */
1.1       ad       1771:                RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
                   1772:        } else {
                   1773:                chunk = (arena_chunk_t *)chunk_alloc(chunksize);
                   1774:                if (chunk == NULL)
                   1775:                        return (NULL);
                   1776: #ifdef MALLOC_STATS
                   1777:                arena->stats.mapped += chunksize;
                   1778: #endif
                   1779:
                   1780:                chunk->arena = arena;
                   1781:
1.2       ad       1782:                /* LINTED */
1.1       ad       1783:                RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
                   1784:
                   1785:                /*
                   1786:                 * Claim that no pages are in use, since the header is merely
                   1787:                 * overhead.
                   1788:                 */
                   1789:                chunk->pages_used = 0;
                   1790:
                   1791:                chunk->max_frun_npages = chunk_npages -
                   1792:                    arena_chunk_header_npages;
                   1793:                chunk->min_frun_ind = arena_chunk_header_npages;
                   1794:
                   1795:                /*
                   1796:                 * Initialize enough of the map to support one maximal free run.
                   1797:                 */
                   1798:                chunk->map[arena_chunk_header_npages].npages = chunk_npages -
                   1799:                    arena_chunk_header_npages;
                   1800:                chunk->map[arena_chunk_header_npages].pos = POS_FREE;
                   1801:                chunk->map[chunk_npages - 1].npages = chunk_npages -
                   1802:                    arena_chunk_header_npages;
                   1803:                chunk->map[chunk_npages - 1].pos = POS_FREE;
                   1804:        }
                   1805:
                   1806:        return (chunk);
                   1807: }
                   1808:
                   1809: static void
                   1810: arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
                   1811: {
                   1812:
                   1813:        /*
                   1814:         * Remove chunk from the chunk tree, regardless of whether this chunk
                   1815:         * will be cached, so that the arena does not use it.
                   1816:         */
1.2       ad       1817:        /* LINTED */
1.1       ad       1818:        RB_REMOVE(arena_chunk_tree_s, &chunk->arena->chunks, chunk);
                   1819:
                   1820:        if (opt_hint == false) {
                   1821:                if (arena->spare != NULL) {
                   1822:                        chunk_dealloc((void *)arena->spare, chunksize);
                   1823: #ifdef MALLOC_STATS
                   1824:                        arena->stats.mapped -= chunksize;
                   1825: #endif
                   1826:                }
                   1827:                arena->spare = chunk;
                   1828:        } else {
                   1829:                assert(arena->spare == NULL);
                   1830:                chunk_dealloc((void *)chunk, chunksize);
                   1831: #ifdef MALLOC_STATS
                   1832:                arena->stats.mapped -= chunksize;
                   1833: #endif
                   1834:        }
                   1835: }
                   1836:
                   1837: static arena_run_t *
                   1838: arena_run_alloc(arena_t *arena, size_t size)
                   1839: {
                   1840:        arena_chunk_t *chunk;
                   1841:        arena_run_t *run;
                   1842:        unsigned need_npages, limit_pages, compl_need_npages;
                   1843:
                   1844:        assert(size <= (chunksize - (arena_chunk_header_npages <<
                   1845:            pagesize_2pow)));
                   1846:        assert((size & pagesize_mask) == 0);
                   1847:
                   1848:        /*
                   1849:         * Search through arena's chunks in address order for a free run that is
                   1850:         * large enough.  Look for the first fit.
                   1851:         */
1.9       christos 1852:        need_npages = (unsigned)(size >> pagesize_2pow);
1.1       ad       1853:        limit_pages = chunk_npages - arena_chunk_header_npages;
                   1854:        compl_need_npages = limit_pages - need_npages;
1.2       ad       1855:        /* LINTED */
1.1       ad       1856:        RB_FOREACH(chunk, arena_chunk_tree_s, &arena->chunks) {
                   1857:                /*
                   1858:                 * Avoid searching this chunk if there are not enough
                   1859:                 * contiguous free pages for there to possibly be a large
                   1860:                 * enough free run.
                   1861:                 */
                   1862:                if (chunk->pages_used <= compl_need_npages &&
                   1863:                    need_npages <= chunk->max_frun_npages) {
                   1864:                        arena_chunk_map_t *mapelm;
                   1865:                        unsigned i;
                   1866:                        unsigned max_frun_npages = 0;
                   1867:                        unsigned min_frun_ind = chunk_npages;
                   1868:
                   1869:                        assert(chunk->min_frun_ind >=
                   1870:                            arena_chunk_header_npages);
                   1871:                        for (i = chunk->min_frun_ind; i < chunk_npages;) {
                   1872:                                mapelm = &chunk->map[i];
                   1873:                                if (mapelm->pos == POS_FREE) {
                   1874:                                        if (mapelm->npages >= need_npages) {
                   1875:                                                run = (arena_run_t *)
                   1876:                                                    ((uintptr_t)chunk + (i <<
                   1877:                                                    pagesize_2pow));
                   1878:                                                /* Update page map. */
                   1879:                                                arena_run_split(arena, run,
                   1880:                                                    size);
                   1881:                                                return (run);
                   1882:                                        }
                   1883:                                        if (mapelm->npages >
                   1884:                                            max_frun_npages) {
                   1885:                                                max_frun_npages =
                   1886:                                                    mapelm->npages;
                   1887:                                        }
                   1888:                                        if (i < min_frun_ind) {
                   1889:                                                min_frun_ind = i;
                   1890:                                                if (i < chunk->min_frun_ind)
                   1891:                                                        chunk->min_frun_ind = i;
                   1892:                                        }
                   1893:                                }
                   1894:                                i += mapelm->npages;
                   1895:                        }
                   1896:                        /*
                   1897:                         * Search failure.  Reset cached chunk->max_frun_npages.
                   1898:                         * chunk->min_frun_ind was already reset above (if
                   1899:                         * necessary).
                   1900:                         */
                   1901:                        chunk->max_frun_npages = max_frun_npages;
                   1902:                }
                   1903:        }
                   1904:
                   1905:        /*
                   1906:         * No usable runs.  Create a new chunk from which to allocate the run.
                   1907:         */
                   1908:        chunk = arena_chunk_alloc(arena);
                   1909:        if (chunk == NULL)
                   1910:                return (NULL);
                   1911:        run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
                   1912:            pagesize_2pow));
                   1913:        /* Update page map. */
                   1914:        arena_run_split(arena, run, size);
                   1915:        return (run);
                   1916: }
                   1917:
                   1918: static void
                   1919: arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size)
                   1920: {
                   1921:        arena_chunk_t *chunk;
                   1922:        unsigned run_ind, run_pages;
                   1923:
                   1924:        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
                   1925:
                   1926:        run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
                   1927:            >> pagesize_2pow);
                   1928:        assert(run_ind >= arena_chunk_header_npages);
                   1929:        assert(run_ind < (chunksize >> pagesize_2pow));
1.9       christos 1930:        run_pages = (unsigned)(size >> pagesize_2pow);
1.1       ad       1931:        assert(run_pages == chunk->map[run_ind].npages);
                   1932:
                   1933:        /* Subtract pages from count of pages used in chunk. */
                   1934:        chunk->pages_used -= run_pages;
                   1935:
                   1936:        /* Mark run as deallocated. */
                   1937:        assert(chunk->map[run_ind].npages == run_pages);
                   1938:        chunk->map[run_ind].pos = POS_FREE;
                   1939:        assert(chunk->map[run_ind + run_pages - 1].npages == run_pages);
                   1940:        chunk->map[run_ind + run_pages - 1].pos = POS_FREE;
                   1941:
                   1942:        /*
                   1943:         * Tell the kernel that we don't need the data in this run, but only if
                   1944:         * requested via runtime configuration.
                   1945:         */
                   1946:        if (opt_hint)
                   1947:                madvise(run, size, MADV_FREE);
                   1948:
                   1949:        /* Try to coalesce with neighboring runs. */
                   1950:        if (run_ind > arena_chunk_header_npages &&
                   1951:            chunk->map[run_ind - 1].pos == POS_FREE) {
                   1952:                unsigned prev_npages;
                   1953:
                   1954:                /* Coalesce with previous run. */
                   1955:                prev_npages = chunk->map[run_ind - 1].npages;
                   1956:                run_ind -= prev_npages;
                   1957:                assert(chunk->map[run_ind].npages == prev_npages);
                   1958:                assert(chunk->map[run_ind].pos == POS_FREE);
                   1959:                run_pages += prev_npages;
                   1960:
                   1961:                chunk->map[run_ind].npages = run_pages;
                   1962:                assert(chunk->map[run_ind].pos == POS_FREE);
                   1963:                chunk->map[run_ind + run_pages - 1].npages = run_pages;
                   1964:                assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
                   1965:        }
                   1966:
                   1967:        if (run_ind + run_pages < chunk_npages &&
                   1968:            chunk->map[run_ind + run_pages].pos == POS_FREE) {
                   1969:                unsigned next_npages;
                   1970:
                   1971:                /* Coalesce with next run. */
                   1972:                next_npages = chunk->map[run_ind + run_pages].npages;
                   1973:                run_pages += next_npages;
                   1974:                assert(chunk->map[run_ind + run_pages - 1].npages ==
                   1975:                    next_npages);
                   1976:                assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
                   1977:
                   1978:                chunk->map[run_ind].npages = run_pages;
                   1979:                chunk->map[run_ind].pos = POS_FREE;
                   1980:                chunk->map[run_ind + run_pages - 1].npages = run_pages;
                   1981:                assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
                   1982:        }
                   1983:
                   1984:        if (chunk->map[run_ind].npages > chunk->max_frun_npages)
                   1985:                chunk->max_frun_npages = chunk->map[run_ind].npages;
                   1986:        if (run_ind < chunk->min_frun_ind)
                   1987:                chunk->min_frun_ind = run_ind;
                   1988:
                   1989:        /* Deallocate chunk if it is now completely unused. */
                   1990:        if (chunk->pages_used == 0)
                   1991:                arena_chunk_dealloc(arena, chunk);
                   1992: }
                   1993:
                   1994: static arena_run_t *
                   1995: arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
                   1996: {
                   1997:        arena_run_t *run;
                   1998:        unsigned i, remainder;
                   1999:
                   2000:        /* Look for a usable run. */
1.2       ad       2001:        /* LINTED */
1.1       ad       2002:        if ((run = RB_MIN(arena_run_tree_s, &bin->runs)) != NULL) {
                   2003:                /* run is guaranteed to have available space. */
1.2       ad       2004:                /* LINTED */
1.1       ad       2005:                RB_REMOVE(arena_run_tree_s, &bin->runs, run);
                   2006: #ifdef MALLOC_STATS
                   2007:                bin->stats.reruns++;
                   2008: #endif
                   2009:                return (run);
                   2010:        }
                   2011:        /* No existing runs have any space available. */
                   2012:
                   2013:        /* Allocate a new run. */
                   2014:        run = arena_run_alloc(arena, bin->run_size);
                   2015:        if (run == NULL)
                   2016:                return (NULL);
                   2017:
                   2018:        /* Initialize run internals. */
                   2019:        run->bin = bin;
                   2020:
                   2021:        for (i = 0; i < bin->regs_mask_nelms; i++)
                   2022:                run->regs_mask[i] = UINT_MAX;
                   2023:        remainder = bin->nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1);
                   2024:        if (remainder != 0) {
                   2025:                /* The last element has spare bits that need to be unset. */
                   2026:                run->regs_mask[i] = (UINT_MAX >> ((1 << (SIZEOF_INT_2POW + 3))
                   2027:                    - remainder));
                   2028:        }
                   2029:
                   2030:        run->regs_minelm = 0;
                   2031:
                   2032:        run->nfree = bin->nregs;
                   2033: #ifdef MALLOC_DEBUG
                   2034:        run->magic = ARENA_RUN_MAGIC;
                   2035: #endif
                   2036:
                   2037: #ifdef MALLOC_STATS
                   2038:        bin->stats.nruns++;
                   2039:        bin->stats.curruns++;
                   2040:        if (bin->stats.curruns > bin->stats.highruns)
                   2041:                bin->stats.highruns = bin->stats.curruns;
                   2042: #endif
                   2043:        return (run);
                   2044: }
                   2045:
                   2046: /* bin->runcur must have space available before this function is called. */
                   2047: static inline void *
                   2048: arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
                   2049: {
                   2050:        void *ret;
                   2051:
                   2052:        assert(run->magic == ARENA_RUN_MAGIC);
                   2053:        assert(run->nfree > 0);
                   2054:
                   2055:        ret = arena_run_reg_alloc(run, bin);
                   2056:        assert(ret != NULL);
                   2057:        run->nfree--;
                   2058:
                   2059:        return (ret);
                   2060: }
                   2061:
                   2062: /* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
                   2063: static void *
                   2064: arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
                   2065: {
                   2066:
                   2067:        bin->runcur = arena_bin_nonfull_run_get(arena, bin);
                   2068:        if (bin->runcur == NULL)
                   2069:                return (NULL);
                   2070:        assert(bin->runcur->magic == ARENA_RUN_MAGIC);
                   2071:        assert(bin->runcur->nfree > 0);
                   2072:
                   2073:        return (arena_bin_malloc_easy(arena, bin, bin->runcur));
                   2074: }
                   2075:
                   2076: /*
                   2077:  * Calculate bin->run_size such that it meets the following constraints:
                   2078:  *
                   2079:  *   *) bin->run_size >= min_run_size
                   2080:  *   *) bin->run_size <= arena_maxclass
                   2081:  *   *) bin->run_size <= RUN_MAX_SMALL
                   2082:  *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
                   2083:  *
                   2084:  * bin->nregs, bin->regs_mask_nelms, and bin->reg0_offset are
                   2085:  * also calculated here, since these settings are all interdependent.
                   2086:  */
                   2087: static size_t
                   2088: arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
                   2089: {
                   2090:        size_t try_run_size, good_run_size;
                   2091:        unsigned good_nregs, good_mask_nelms, good_reg0_offset;
                   2092:        unsigned try_nregs, try_mask_nelms, try_reg0_offset;
                   2093:        float max_ovrhd = RUN_MAX_OVRHD;
                   2094:
                   2095:        assert(min_run_size >= pagesize);
                   2096:        assert(min_run_size <= arena_maxclass);
                   2097:        assert(min_run_size <= RUN_MAX_SMALL);
                   2098:
                   2099:        /*
                   2100:         * Calculate known-valid settings before entering the run_size
                   2101:         * expansion loop, so that the first part of the loop always copies
                   2102:         * valid settings.
                   2103:         *
                   2104:         * The do..while loop iteratively reduces the number of regions until
                   2105:         * the run header and the regions no longer overlap.  A closed formula
                   2106:         * would be quite messy, since there is an interdependency between the
                   2107:         * header's mask length and the number of regions.
                   2108:         */
                   2109:        try_run_size = min_run_size;
1.9       christos 2110:        try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) /
                   2111:            bin->reg_size) + 1); /* Counter-act the first line of the loop. */
1.1       ad       2112:        do {
                   2113:                try_nregs--;
                   2114:                try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
                   2115:                    ((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
1.9       christos 2116:                try_reg0_offset = (unsigned)(try_run_size -
                   2117:                    (try_nregs * bin->reg_size));
1.1       ad       2118:        } while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))
                   2119:            > try_reg0_offset);
                   2120:
                   2121:        /* run_size expansion loop. */
                   2122:        do {
                   2123:                /*
                   2124:                 * Copy valid settings before trying more aggressive settings.
                   2125:                 */
                   2126:                good_run_size = try_run_size;
                   2127:                good_nregs = try_nregs;
                   2128:                good_mask_nelms = try_mask_nelms;
                   2129:                good_reg0_offset = try_reg0_offset;
                   2130:
                   2131:                /* Try more aggressive settings. */
                   2132:                try_run_size += pagesize;
1.9       christos 2133:                try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) /
                   2134:                    bin->reg_size) + 1); /* Counter-act try_nregs-- in loop. */
1.1       ad       2135:                do {
                   2136:                        try_nregs--;
                   2137:                        try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
                   2138:                            ((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ?
                   2139:                            1 : 0);
1.9       christos 2140:                        try_reg0_offset = (unsigned)(try_run_size - (try_nregs *
                   2141:                            bin->reg_size));
1.1       ad       2142:                } while (sizeof(arena_run_t) + (sizeof(unsigned) *
                   2143:                    (try_mask_nelms - 1)) > try_reg0_offset);
                   2144:        } while (try_run_size <= arena_maxclass && try_run_size <= RUN_MAX_SMALL
                   2145:            && max_ovrhd > RUN_MAX_OVRHD_RELAX / ((float)(bin->reg_size << 3))
                   2146:            && ((float)(try_reg0_offset)) / ((float)(try_run_size)) >
                   2147:            max_ovrhd);
                   2148:
                   2149:        assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
                   2150:            <= good_reg0_offset);
                   2151:        assert((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
                   2152:
                   2153:        /* Copy final settings. */
                   2154:        bin->run_size = good_run_size;
                   2155:        bin->nregs = good_nregs;
                   2156:        bin->regs_mask_nelms = good_mask_nelms;
                   2157:        bin->reg0_offset = good_reg0_offset;
                   2158:
                   2159:        return (good_run_size);
                   2160: }
                   2161:
                   2162: static void *
                   2163: arena_malloc(arena_t *arena, size_t size)
                   2164: {
                   2165:        void *ret;
                   2166:
                   2167:        assert(arena != NULL);
                   2168:        assert(arena->magic == ARENA_MAGIC);
                   2169:        assert(size != 0);
                   2170:        assert(QUANTUM_CEILING(size) <= arena_maxclass);
                   2171:
                   2172:        if (size <= bin_maxclass) {
                   2173:                arena_bin_t *bin;
                   2174:                arena_run_t *run;
                   2175:
                   2176:                /* Small allocation. */
                   2177:
                   2178:                if (size < small_min) {
                   2179:                        /* Tiny. */
                   2180:                        size = pow2_ceil(size);
                   2181:                        bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
                   2182:                            1)))];
                   2183: #if (!defined(NDEBUG) || defined(MALLOC_STATS))
                   2184:                        /*
                   2185:                         * Bin calculation is always correct, but we may need
                   2186:                         * to fix size for the purposes of assertions and/or
                   2187:                         * stats accuracy.
                   2188:                         */
                   2189:                        if (size < (1 << TINY_MIN_2POW))
                   2190:                                size = (1 << TINY_MIN_2POW);
                   2191: #endif
                   2192:                } else if (size <= small_max) {
                   2193:                        /* Quantum-spaced. */
                   2194:                        size = QUANTUM_CEILING(size);
                   2195:                        bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
                   2196:                            - 1];
                   2197:                } else {
                   2198:                        /* Sub-page. */
                   2199:                        size = pow2_ceil(size);
                   2200:                        bin = &arena->bins[ntbins + nqbins
                   2201:                            + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
                   2202:                }
                   2203:                assert(size == bin->reg_size);
                   2204:
                   2205:                malloc_mutex_lock(&arena->mtx);
                   2206:                if ((run = bin->runcur) != NULL && run->nfree > 0)
                   2207:                        ret = arena_bin_malloc_easy(arena, bin, run);
                   2208:                else
                   2209:                        ret = arena_bin_malloc_hard(arena, bin);
                   2210:
                   2211:                if (ret == NULL) {
                   2212:                        malloc_mutex_unlock(&arena->mtx);
                   2213:                        return (NULL);
                   2214:                }
                   2215:
                   2216: #ifdef MALLOC_STATS
                   2217:                bin->stats.nrequests++;
                   2218:                arena->stats.nmalloc_small++;
                   2219:                arena->stats.allocated_small += size;
                   2220: #endif
                   2221:        } else {
                   2222:                /* Large allocation. */
                   2223:                size = PAGE_CEILING(size);
                   2224:                malloc_mutex_lock(&arena->mtx);
                   2225:                ret = (void *)arena_run_alloc(arena, size);
                   2226:                if (ret == NULL) {
                   2227:                        malloc_mutex_unlock(&arena->mtx);
                   2228:                        return (NULL);
                   2229:                }
                   2230: #ifdef MALLOC_STATS
                   2231:                arena->stats.nmalloc_large++;
                   2232:                arena->stats.allocated_large += size;
                   2233: #endif
                   2234:        }
                   2235:
                   2236:        malloc_mutex_unlock(&arena->mtx);
                   2237:
                   2238:        if (opt_junk)
                   2239:                memset(ret, 0xa5, size);
                   2240:        else if (opt_zero)
                   2241:                memset(ret, 0, size);
                   2242:        return (ret);
                   2243: }
                   2244:
                   2245: static inline void
                   2246: arena_palloc_trim(arena_t *arena, arena_chunk_t *chunk, unsigned pageind,
                   2247:     unsigned npages)
                   2248: {
                   2249:        unsigned i;
                   2250:
                   2251:        assert(npages > 0);
                   2252:
                   2253:        /*
                   2254:         * Modifiy the map such that arena_run_dalloc() sees the run as
                   2255:         * separately allocated.
                   2256:         */
                   2257:        for (i = 0; i < npages; i++) {
                   2258:                chunk->map[pageind + i].npages = npages;
                   2259:                chunk->map[pageind + i].pos = i;
                   2260:        }
                   2261:        arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)chunk + (pageind <<
                   2262:            pagesize_2pow)), npages << pagesize_2pow);
                   2263: }
                   2264:
                   2265: /* Only handles large allocations that require more than page alignment. */
                   2266: static void *
                   2267: arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
                   2268: {
                   2269:        void *ret;
                   2270:        size_t offset;
                   2271:        arena_chunk_t *chunk;
                   2272:        unsigned pageind, i, npages;
                   2273:
                   2274:        assert((size & pagesize_mask) == 0);
                   2275:        assert((alignment & pagesize_mask) == 0);
                   2276:
1.9       christos 2277:        npages = (unsigned)(size >> pagesize_2pow);
1.1       ad       2278:
                   2279:        malloc_mutex_lock(&arena->mtx);
                   2280:        ret = (void *)arena_run_alloc(arena, alloc_size);
                   2281:        if (ret == NULL) {
                   2282:                malloc_mutex_unlock(&arena->mtx);
                   2283:                return (NULL);
                   2284:        }
                   2285:
                   2286:        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
                   2287:
                   2288:        offset = (uintptr_t)ret & (alignment - 1);
                   2289:        assert((offset & pagesize_mask) == 0);
                   2290:        assert(offset < alloc_size);
                   2291:        if (offset == 0) {
1.9       christos 2292:                pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >>
1.1       ad       2293:                    pagesize_2pow);
                   2294:
                   2295:                /* Update the map for the run to be kept. */
                   2296:                for (i = 0; i < npages; i++) {
                   2297:                        chunk->map[pageind + i].npages = npages;
                   2298:                        assert(chunk->map[pageind + i].pos == i);
                   2299:                }
                   2300:
                   2301:                /* Trim trailing space. */
                   2302:                arena_palloc_trim(arena, chunk, pageind + npages,
1.9       christos 2303:                    (unsigned)((alloc_size - size) >> pagesize_2pow));
1.1       ad       2304:        } else {
                   2305:                size_t leadsize, trailsize;
                   2306:
                   2307:                leadsize = alignment - offset;
                   2308:                ret = (void *)((uintptr_t)ret + leadsize);
1.9       christos 2309:                pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >>
1.1       ad       2310:                    pagesize_2pow);
                   2311:
                   2312:                /* Update the map for the run to be kept. */
                   2313:                for (i = 0; i < npages; i++) {
                   2314:                        chunk->map[pageind + i].npages = npages;
                   2315:                        chunk->map[pageind + i].pos = i;
                   2316:                }
                   2317:
                   2318:                /* Trim leading space. */
1.9       christos 2319:                arena_palloc_trim(arena, chunk,
                   2320:                    (unsigned)(pageind - (leadsize >> pagesize_2pow)),
                   2321:                    (unsigned)(leadsize >> pagesize_2pow));
1.1       ad       2322:
                   2323:                trailsize = alloc_size - leadsize - size;
                   2324:                if (trailsize != 0) {
                   2325:                        /* Trim trailing space. */
                   2326:                        assert(trailsize < alloc_size);
                   2327:                        arena_palloc_trim(arena, chunk, pageind + npages,
1.9       christos 2328:                            (unsigned)(trailsize >> pagesize_2pow));
1.1       ad       2329:                }
                   2330:        }
                   2331:
                   2332: #ifdef MALLOC_STATS
                   2333:        arena->stats.nmalloc_large++;
                   2334:        arena->stats.allocated_large += size;
                   2335: #endif
                   2336:        malloc_mutex_unlock(&arena->mtx);
                   2337:
                   2338:        if (opt_junk)
                   2339:                memset(ret, 0xa5, size);
                   2340:        else if (opt_zero)
                   2341:                memset(ret, 0, size);
                   2342:        return (ret);
                   2343: }
                   2344:
                   2345: /* Return the size of the allocation pointed to by ptr. */
                   2346: static size_t
                   2347: arena_salloc(const void *ptr)
                   2348: {
                   2349:        size_t ret;
                   2350:        arena_chunk_t *chunk;
                   2351:        arena_chunk_map_t *mapelm;
                   2352:        unsigned pageind;
                   2353:
                   2354:        assert(ptr != NULL);
                   2355:        assert(CHUNK_ADDR2BASE(ptr) != ptr);
                   2356:
                   2357:        /*
                   2358:         * No arena data structures that we query here can change in a way that
                   2359:         * affects this function, so we don't need to lock.
                   2360:         */
                   2361:        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1.9       christos 2362:        pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >>
                   2363:            pagesize_2pow);
1.1       ad       2364:        mapelm = &chunk->map[pageind];
1.10      simonb   2365:        if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind <<
                   2366:            pagesize_2pow)) {
                   2367:                arena_run_t *run;
1.1       ad       2368:
                   2369:                pageind -= mapelm->pos;
                   2370:
1.10      simonb   2371:                run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
                   2372:                    pagesize_2pow));
1.1       ad       2373:                assert(run->magic == ARENA_RUN_MAGIC);
                   2374:                ret = run->bin->reg_size;
                   2375:        } else
                   2376:                ret = mapelm->npages << pagesize_2pow;
                   2377:
                   2378:        return (ret);
                   2379: }
                   2380:
                   2381: static void *
                   2382: arena_ralloc(void *ptr, size_t size, size_t oldsize)
                   2383: {
                   2384:        void *ret;
                   2385:
                   2386:        /* Avoid moving the allocation if the size class would not change. */
                   2387:        if (size < small_min) {
                   2388:                if (oldsize < small_min &&
                   2389:                    ffs((int)(pow2_ceil(size) >> (TINY_MIN_2POW + 1)))
                   2390:                    == ffs((int)(pow2_ceil(oldsize) >> (TINY_MIN_2POW + 1))))
                   2391:                        goto IN_PLACE;
                   2392:        } else if (size <= small_max) {
                   2393:                if (oldsize >= small_min && oldsize <= small_max &&
                   2394:                    (QUANTUM_CEILING(size) >> opt_quantum_2pow)
                   2395:                    == (QUANTUM_CEILING(oldsize) >> opt_quantum_2pow))
                   2396:                        goto IN_PLACE;
                   2397:        } else {
                   2398:                /*
                   2399:                 * We make no attempt to resize runs here, though it would be
                   2400:                 * possible to do so.
                   2401:                 */
                   2402:                if (oldsize > small_max && PAGE_CEILING(size) == oldsize)
                   2403:                        goto IN_PLACE;
                   2404:        }
                   2405:
                   2406:        /*
                   2407:         * If we get here, then size and oldsize are different enough that we
                   2408:         * need to use a different size class.  In that case, fall back to
                   2409:         * allocating new space and copying.
                   2410:         */
                   2411:        ret = arena_malloc(choose_arena(), size);
                   2412:        if (ret == NULL)
                   2413:                return (NULL);
                   2414:
                   2415:        /* Junk/zero-filling were already done by arena_malloc(). */
                   2416:        if (size < oldsize)
                   2417:                memcpy(ret, ptr, size);
                   2418:        else
                   2419:                memcpy(ret, ptr, oldsize);
                   2420:        idalloc(ptr);
                   2421:        return (ret);
                   2422: IN_PLACE:
                   2423:        if (opt_junk && size < oldsize)
                   2424:                memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size);
                   2425:        else if (opt_zero && size > oldsize)
                   2426:                memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
                   2427:        return (ptr);
                   2428: }
                   2429:
                   2430: static void
                   2431: arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
                   2432: {
                   2433:        unsigned pageind;
                   2434:        arena_chunk_map_t *mapelm;
                   2435:        size_t size;
                   2436:
                   2437:        assert(arena != NULL);
                   2438:        assert(arena->magic == ARENA_MAGIC);
                   2439:        assert(chunk->arena == arena);
                   2440:        assert(ptr != NULL);
                   2441:        assert(CHUNK_ADDR2BASE(ptr) != ptr);
                   2442:
1.9       christos 2443:        pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >>
                   2444:            pagesize_2pow);
1.1       ad       2445:        mapelm = &chunk->map[pageind];
1.10      simonb   2446:        if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind <<
                   2447:            pagesize_2pow)) {
                   2448:                arena_run_t *run;
1.1       ad       2449:                arena_bin_t *bin;
                   2450:
                   2451:                /* Small allocation. */
                   2452:
                   2453:                pageind -= mapelm->pos;
                   2454:
1.10      simonb   2455:                run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
                   2456:                    pagesize_2pow));
1.1       ad       2457:                assert(run->magic == ARENA_RUN_MAGIC);
                   2458:                bin = run->bin;
                   2459:                size = bin->reg_size;
                   2460:
                   2461:                if (opt_junk)
                   2462:                        memset(ptr, 0x5a, size);
                   2463:
                   2464:                malloc_mutex_lock(&arena->mtx);
                   2465:                arena_run_reg_dalloc(run, bin, ptr, size);
                   2466:                run->nfree++;
                   2467:
                   2468:                if (run->nfree == bin->nregs) {
                   2469:                        /* Deallocate run. */
                   2470:                        if (run == bin->runcur)
                   2471:                                bin->runcur = NULL;
                   2472:                        else if (bin->nregs != 1) {
                   2473:                                /*
                   2474:                                 * This block's conditional is necessary because
                   2475:                                 * if the run only contains one region, then it
                   2476:                                 * never gets inserted into the non-full runs
                   2477:                                 * tree.
                   2478:                                 */
1.2       ad       2479:                                /* LINTED */
1.1       ad       2480:                                RB_REMOVE(arena_run_tree_s, &bin->runs, run);
                   2481:                        }
                   2482: #ifdef MALLOC_DEBUG
                   2483:                        run->magic = 0;
                   2484: #endif
                   2485:                        arena_run_dalloc(arena, run, bin->run_size);
                   2486: #ifdef MALLOC_STATS
                   2487:                        bin->stats.curruns--;
                   2488: #endif
                   2489:                } else if (run->nfree == 1 && run != bin->runcur) {
                   2490:                        /*
                   2491:                         * Make sure that bin->runcur always refers to the
                   2492:                         * lowest non-full run, if one exists.
                   2493:                         */
                   2494:                        if (bin->runcur == NULL)
                   2495:                                bin->runcur = run;
                   2496:                        else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
                   2497:                                /* Switch runcur. */
                   2498:                                if (bin->runcur->nfree > 0) {
                   2499:                                        /* Insert runcur. */
1.2       ad       2500:                                        /* LINTED */
1.1       ad       2501:                                        RB_INSERT(arena_run_tree_s, &bin->runs,
                   2502:                                            bin->runcur);
                   2503:                                }
                   2504:                                bin->runcur = run;
1.2       ad       2505:                        } else {
                   2506:                                /* LINTED */
1.1       ad       2507:                                RB_INSERT(arena_run_tree_s, &bin->runs, run);
1.2       ad       2508:                        }
1.1       ad       2509:                }
                   2510: #ifdef MALLOC_STATS
                   2511:                arena->stats.allocated_small -= size;
                   2512:                arena->stats.ndalloc_small++;
                   2513: #endif
                   2514:        } else {
                   2515:                /* Large allocation. */
                   2516:
                   2517:                size = mapelm->npages << pagesize_2pow;
                   2518:                assert((((uintptr_t)ptr) & pagesize_mask) == 0);
                   2519:
                   2520:                if (opt_junk)
                   2521:                        memset(ptr, 0x5a, size);
                   2522:
                   2523:                malloc_mutex_lock(&arena->mtx);
                   2524:                arena_run_dalloc(arena, (arena_run_t *)ptr, size);
                   2525: #ifdef MALLOC_STATS
                   2526:                arena->stats.allocated_large -= size;
                   2527:                arena->stats.ndalloc_large++;
                   2528: #endif
                   2529:        }
                   2530:
                   2531:        malloc_mutex_unlock(&arena->mtx);
                   2532: }
                   2533:
                   2534: static bool
                   2535: arena_new(arena_t *arena)
                   2536: {
                   2537:        unsigned i;
                   2538:        arena_bin_t *bin;
1.2       ad       2539:        size_t prev_run_size;
1.1       ad       2540:
                   2541:        malloc_mutex_init(&arena->mtx);
                   2542:
                   2543: #ifdef MALLOC_STATS
                   2544:        memset(&arena->stats, 0, sizeof(arena_stats_t));
                   2545: #endif
                   2546:
                   2547:        /* Initialize chunks. */
                   2548:        RB_INIT(&arena->chunks);
                   2549:        arena->spare = NULL;
                   2550:
                   2551:        /* Initialize bins. */
                   2552:        prev_run_size = pagesize;
                   2553:
                   2554:        /* (2^n)-spaced tiny bins. */
                   2555:        for (i = 0; i < ntbins; i++) {
                   2556:                bin = &arena->bins[i];
                   2557:                bin->runcur = NULL;
                   2558:                RB_INIT(&bin->runs);
                   2559:
                   2560:                bin->reg_size = (1 << (TINY_MIN_2POW + i));
                   2561:                prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
                   2562:
                   2563: #ifdef MALLOC_STATS
                   2564:                memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
                   2565: #endif
                   2566:        }
                   2567:
                   2568:        /* Quantum-spaced bins. */
                   2569:        for (; i < ntbins + nqbins; i++) {
                   2570:                bin = &arena->bins[i];
                   2571:                bin->runcur = NULL;
                   2572:                RB_INIT(&bin->runs);
                   2573:
                   2574:                bin->reg_size = quantum * (i - ntbins + 1);
1.2       ad       2575: /*
1.1       ad       2576:                pow2_size = pow2_ceil(quantum * (i - ntbins + 1));
1.2       ad       2577: */
1.1       ad       2578:                prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
                   2579:
                   2580: #ifdef MALLOC_STATS
                   2581:                memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
                   2582: #endif
                   2583:        }
                   2584:
                   2585:        /* (2^n)-spaced sub-page bins. */
                   2586:        for (; i < ntbins + nqbins + nsbins; i++) {
                   2587:                bin = &arena->bins[i];
                   2588:                bin->runcur = NULL;
                   2589:                RB_INIT(&bin->runs);
                   2590:
                   2591:                bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
                   2592:
                   2593:                prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
                   2594:
                   2595: #ifdef MALLOC_STATS
                   2596:                memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
                   2597: #endif
                   2598:        }
                   2599:
                   2600: #ifdef MALLOC_DEBUG
                   2601:        arena->magic = ARENA_MAGIC;
                   2602: #endif
                   2603:
                   2604:        return (false);
                   2605: }
                   2606:
                   2607: /* Create a new arena and insert it into the arenas array at index ind. */
                   2608: static arena_t *
                   2609: arenas_extend(unsigned ind)
                   2610: {
                   2611:        arena_t *ret;
                   2612:
                   2613:        /* Allocate enough space for trailing bins. */
                   2614:        ret = (arena_t *)base_alloc(sizeof(arena_t)
                   2615:            + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
                   2616:        if (ret != NULL && arena_new(ret) == false) {
                   2617:                arenas[ind] = ret;
                   2618:                return (ret);
                   2619:        }
                   2620:        /* Only reached if there is an OOM error. */
                   2621:
                   2622:        /*
                   2623:         * OOM here is quite inconvenient to propagate, since dealing with it
                   2624:         * would require a check for failure in the fast path.  Instead, punt
                   2625:         * by using arenas[0].  In practice, this is an extremely unlikely
                   2626:         * failure.
                   2627:         */
                   2628:        _malloc_message(_getprogname(),
                   2629:            ": (malloc) Error initializing arena\n", "", "");
                   2630:        if (opt_abort)
                   2631:                abort();
                   2632:
                   2633:        return (arenas[0]);
                   2634: }
                   2635:
                   2636: /*
                   2637:  * End arena.
                   2638:  */
                   2639: /******************************************************************************/
                   2640: /*
                   2641:  * Begin general internal functions.
                   2642:  */
                   2643:
                   2644: static void *
                   2645: huge_malloc(size_t size)
                   2646: {
                   2647:        void *ret;
                   2648:        size_t csize;
                   2649:        chunk_node_t *node;
                   2650:
                   2651:        /* Allocate one or more contiguous chunks for this request. */
                   2652:
                   2653:        csize = CHUNK_CEILING(size);
                   2654:        if (csize == 0) {
                   2655:                /* size is large enough to cause size_t wrap-around. */
                   2656:                return (NULL);
                   2657:        }
                   2658:
                   2659:        /* Allocate a chunk node with which to track the chunk. */
                   2660:        node = base_chunk_node_alloc();
                   2661:        if (node == NULL)
                   2662:                return (NULL);
                   2663:
                   2664:        ret = chunk_alloc(csize);
                   2665:        if (ret == NULL) {
                   2666:                base_chunk_node_dealloc(node);
                   2667:                return (NULL);
                   2668:        }
                   2669:
                   2670:        /* Insert node into huge. */
                   2671:        node->chunk = ret;
                   2672:        node->size = csize;
                   2673:
                   2674:        malloc_mutex_lock(&chunks_mtx);
                   2675:        RB_INSERT(chunk_tree_s, &huge, node);
                   2676: #ifdef MALLOC_STATS
                   2677:        huge_nmalloc++;
                   2678:        huge_allocated += csize;
                   2679: #endif
                   2680:        malloc_mutex_unlock(&chunks_mtx);
                   2681:
                   2682:        if (opt_junk)
                   2683:                memset(ret, 0xa5, csize);
                   2684:        else if (opt_zero)
                   2685:                memset(ret, 0, csize);
                   2686:
                   2687:        return (ret);
                   2688: }
                   2689:
                   2690: /* Only handles large allocations that require more than chunk alignment. */
                   2691: static void *
                   2692: huge_palloc(size_t alignment, size_t size)
                   2693: {
                   2694:        void *ret;
                   2695:        size_t alloc_size, chunk_size, offset;
                   2696:        chunk_node_t *node;
                   2697:
                   2698:        /*
                   2699:         * This allocation requires alignment that is even larger than chunk
                   2700:         * alignment.  This means that huge_malloc() isn't good enough.
                   2701:         *
                   2702:         * Allocate almost twice as many chunks as are demanded by the size or
                   2703:         * alignment, in order to assure the alignment can be achieved, then
                   2704:         * unmap leading and trailing chunks.
                   2705:         */
                   2706:        assert(alignment >= chunksize);
                   2707:
                   2708:        chunk_size = CHUNK_CEILING(size);
                   2709:
                   2710:        if (size >= alignment)
                   2711:                alloc_size = chunk_size + alignment - chunksize;
                   2712:        else
                   2713:                alloc_size = (alignment << 1) - chunksize;
                   2714:
                   2715:        /* Allocate a chunk node with which to track the chunk. */
                   2716:        node = base_chunk_node_alloc();
                   2717:        if (node == NULL)
                   2718:                return (NULL);
                   2719:
                   2720:        ret = chunk_alloc(alloc_size);
                   2721:        if (ret == NULL) {
                   2722:                base_chunk_node_dealloc(node);
                   2723:                return (NULL);
                   2724:        }
                   2725:
                   2726:        offset = (uintptr_t)ret & (alignment - 1);
                   2727:        assert((offset & chunksize_mask) == 0);
                   2728:        assert(offset < alloc_size);
                   2729:        if (offset == 0) {
                   2730:                /* Trim trailing space. */
                   2731:                chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
                   2732:                    - chunk_size);
                   2733:        } else {
                   2734:                size_t trailsize;
                   2735:
                   2736:                /* Trim leading space. */
                   2737:                chunk_dealloc(ret, alignment - offset);
                   2738:
                   2739:                ret = (void *)((uintptr_t)ret + (alignment - offset));
                   2740:
                   2741:                trailsize = alloc_size - (alignment - offset) - chunk_size;
                   2742:                if (trailsize != 0) {
                   2743:                    /* Trim trailing space. */
                   2744:                    assert(trailsize < alloc_size);
                   2745:                    chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
                   2746:                        trailsize);
                   2747:                }
                   2748:        }
                   2749:
                   2750:        /* Insert node into huge. */
                   2751:        node->chunk = ret;
                   2752:        node->size = chunk_size;
                   2753:
                   2754:        malloc_mutex_lock(&chunks_mtx);
                   2755:        RB_INSERT(chunk_tree_s, &huge, node);
                   2756: #ifdef MALLOC_STATS
                   2757:        huge_nmalloc++;
                   2758:        huge_allocated += chunk_size;
                   2759: #endif
                   2760:        malloc_mutex_unlock(&chunks_mtx);
                   2761:
                   2762:        if (opt_junk)
                   2763:                memset(ret, 0xa5, chunk_size);
                   2764:        else if (opt_zero)
                   2765:                memset(ret, 0, chunk_size);
                   2766:
                   2767:        return (ret);
                   2768: }
                   2769:
                   2770: static void *
                   2771: huge_ralloc(void *ptr, size_t size, size_t oldsize)
                   2772: {
                   2773:        void *ret;
                   2774:
                   2775:        /* Avoid moving the allocation if the size class would not change. */
                   2776:        if (oldsize > arena_maxclass &&
                   2777:            CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
                   2778:                if (opt_junk && size < oldsize) {
                   2779:                        memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
                   2780:                            - size);
                   2781:                } else if (opt_zero && size > oldsize) {
                   2782:                        memset((void *)((uintptr_t)ptr + oldsize), 0, size
                   2783:                            - oldsize);
                   2784:                }
                   2785:                return (ptr);
                   2786:        }
                   2787:
1.8       yamt     2788:        if (CHUNK_ADDR2BASE(ptr) == ptr
                   2789: #ifdef USE_BRK
                   2790:            && ((uintptr_t)ptr < (uintptr_t)brk_base
                   2791:            || (uintptr_t)ptr >= (uintptr_t)brk_max)
                   2792: #endif
                   2793:            ) {
                   2794:                chunk_node_t *node, key;
                   2795:                void *newptr;
                   2796:                size_t oldcsize;
                   2797:                size_t newcsize;
                   2798:
                   2799:                newcsize = CHUNK_CEILING(size);
                   2800:                oldcsize = CHUNK_CEILING(oldsize);
                   2801:                assert(oldcsize != newcsize);
                   2802:                if (newcsize == 0) {
                   2803:                        /* size_t wrap-around */
                   2804:                        return (NULL);
                   2805:                }
                   2806:                newptr = mremap(ptr, oldcsize, NULL, newcsize,
                   2807:                    MAP_ALIGNED(chunksize_2pow));
                   2808:                if (newptr != MAP_FAILED) {
                   2809:                        assert(CHUNK_ADDR2BASE(newptr) == newptr);
                   2810:
                   2811:                        /* update tree */
                   2812:                        malloc_mutex_lock(&chunks_mtx);
                   2813:                        key.chunk = __DECONST(void *, ptr);
                   2814:                        /* LINTED */
                   2815:                        node = RB_FIND(chunk_tree_s, &huge, &key);
                   2816:                        assert(node != NULL);
                   2817:                        assert(node->chunk == ptr);
                   2818:                        assert(node->size == oldcsize);
                   2819:                        node->size = newcsize;
                   2820:                        if (ptr != newptr) {
                   2821:                                RB_REMOVE(chunk_tree_s, &huge, node);
                   2822:                                node->chunk = newptr;
                   2823:                                RB_INSERT(chunk_tree_s, &huge, node);
                   2824:                        }
                   2825: #ifdef MALLOC_STATS
                   2826:                        huge_nralloc++;
                   2827:                        huge_allocated += newcsize - oldcsize;
                   2828:                        if (newcsize > oldcsize) {
                   2829:                                stats_chunks.curchunks +=
                   2830:                                    (newcsize - oldcsize) / chunksize;
                   2831:                                if (stats_chunks.curchunks >
                   2832:                                    stats_chunks.highchunks)
                   2833:                                        stats_chunks.highchunks =
                   2834:                                            stats_chunks.curchunks;
                   2835:                        } else {
                   2836:                                stats_chunks.curchunks -=
                   2837:                                    (oldcsize - newcsize) / chunksize;
                   2838:                        }
                   2839: #endif
                   2840:                        malloc_mutex_unlock(&chunks_mtx);
                   2841:
                   2842:                        if (opt_junk && size < oldsize) {
                   2843:                                memset((void *)((uintptr_t)newptr + size), 0x5a,
                   2844:                                    newcsize - size);
                   2845:                        } else if (opt_zero && size > oldsize) {
                   2846:                                memset((void *)((uintptr_t)newptr + oldsize), 0,
                   2847:                                    size - oldsize);
                   2848:                        }
                   2849:                        return (newptr);
                   2850:                }
                   2851:        }
                   2852:
1.1       ad       2853:        /*
                   2854:         * If we get here, then size and oldsize are different enough that we
                   2855:         * need to use a different size class.  In that case, fall back to
                   2856:         * allocating new space and copying.
                   2857:         */
                   2858:        ret = huge_malloc(size);
                   2859:        if (ret == NULL)
                   2860:                return (NULL);
                   2861:
                   2862:        if (CHUNK_ADDR2BASE(ptr) == ptr) {
                   2863:                /* The old allocation is a chunk. */
                   2864:                if (size < oldsize)
                   2865:                        memcpy(ret, ptr, size);
                   2866:                else
                   2867:                        memcpy(ret, ptr, oldsize);
                   2868:        } else {
                   2869:                /* The old allocation is a region. */
                   2870:                assert(oldsize < size);
                   2871:                memcpy(ret, ptr, oldsize);
                   2872:        }
                   2873:        idalloc(ptr);
                   2874:        return (ret);
                   2875: }
                   2876:
                   2877: static void
                   2878: huge_dalloc(void *ptr)
                   2879: {
                   2880:        chunk_node_t key;
                   2881:        chunk_node_t *node;
                   2882:
                   2883:        malloc_mutex_lock(&chunks_mtx);
                   2884:
                   2885:        /* Extract from tree of huge allocations. */
                   2886:        key.chunk = ptr;
1.2       ad       2887:        /* LINTED */
1.1       ad       2888:        node = RB_FIND(chunk_tree_s, &huge, &key);
                   2889:        assert(node != NULL);
                   2890:        assert(node->chunk == ptr);
1.2       ad       2891:        /* LINTED */
1.1       ad       2892:        RB_REMOVE(chunk_tree_s, &huge, node);
                   2893:
                   2894: #ifdef MALLOC_STATS
                   2895:        huge_ndalloc++;
                   2896:        huge_allocated -= node->size;
                   2897: #endif
                   2898:
                   2899:        malloc_mutex_unlock(&chunks_mtx);
                   2900:
                   2901:        /* Unmap chunk. */
                   2902: #ifdef USE_BRK
                   2903:        if (opt_junk)
                   2904:                memset(node->chunk, 0x5a, node->size);
                   2905: #endif
                   2906:        chunk_dealloc(node->chunk, node->size);
                   2907:
                   2908:        base_chunk_node_dealloc(node);
                   2909: }
                   2910:
                   2911: static void *
                   2912: imalloc(size_t size)
                   2913: {
                   2914:        void *ret;
                   2915:
                   2916:        assert(size != 0);
                   2917:
                   2918:        if (size <= arena_maxclass)
                   2919:                ret = arena_malloc(choose_arena(), size);
                   2920:        else
                   2921:                ret = huge_malloc(size);
                   2922:
                   2923:        return (ret);
                   2924: }
                   2925:
                   2926: static void *
                   2927: ipalloc(size_t alignment, size_t size)
                   2928: {
                   2929:        void *ret;
                   2930:        size_t ceil_size;
                   2931:
                   2932:        /*
                   2933:         * Round size up to the nearest multiple of alignment.
                   2934:         *
                   2935:         * This done, we can take advantage of the fact that for each small
                   2936:         * size class, every object is aligned at the smallest power of two
                   2937:         * that is non-zero in the base two representation of the size.  For
                   2938:         * example:
                   2939:         *
                   2940:         *   Size |   Base 2 | Minimum alignment
                   2941:         *   -----+----------+------------------
                   2942:         *     96 |  1100000 |  32
                   2943:         *    144 | 10100000 |  32
                   2944:         *    192 | 11000000 |  64
                   2945:         *
                   2946:         * Depending on runtime settings, it is possible that arena_malloc()
                   2947:         * will further round up to a power of two, but that never causes
                   2948:         * correctness issues.
                   2949:         */
                   2950:        ceil_size = (size + (alignment - 1)) & (-alignment);
                   2951:        /*
                   2952:         * (ceil_size < size) protects against the combination of maximal
                   2953:         * alignment and size greater than maximal alignment.
                   2954:         */
                   2955:        if (ceil_size < size) {
                   2956:                /* size_t overflow. */
                   2957:                return (NULL);
                   2958:        }
                   2959:
                   2960:        if (ceil_size <= pagesize || (alignment <= pagesize
                   2961:            && ceil_size <= arena_maxclass))
                   2962:                ret = arena_malloc(choose_arena(), ceil_size);
                   2963:        else {
                   2964:                size_t run_size;
                   2965:
                   2966:                /*
                   2967:                 * We can't achieve sub-page alignment, so round up alignment
                   2968:                 * permanently; it makes later calculations simpler.
                   2969:                 */
                   2970:                alignment = PAGE_CEILING(alignment);
                   2971:                ceil_size = PAGE_CEILING(size);
                   2972:                /*
                   2973:                 * (ceil_size < size) protects against very large sizes within
                   2974:                 * pagesize of SIZE_T_MAX.
                   2975:                 *
                   2976:                 * (ceil_size + alignment < ceil_size) protects against the
                   2977:                 * combination of maximal alignment and ceil_size large enough
                   2978:                 * to cause overflow.  This is similar to the first overflow
                   2979:                 * check above, but it needs to be repeated due to the new
                   2980:                 * ceil_size value, which may now be *equal* to maximal
                   2981:                 * alignment, whereas before we only detected overflow if the
                   2982:                 * original size was *greater* than maximal alignment.
                   2983:                 */
                   2984:                if (ceil_size < size || ceil_size + alignment < ceil_size) {
                   2985:                        /* size_t overflow. */
                   2986:                        return (NULL);
                   2987:                }
                   2988:
                   2989:                /*
                   2990:                 * Calculate the size of the over-size run that arena_palloc()
                   2991:                 * would need to allocate in order to guarantee the alignment.
                   2992:                 */
                   2993:                if (ceil_size >= alignment)
                   2994:                        run_size = ceil_size + alignment - pagesize;
                   2995:                else {
                   2996:                        /*
                   2997:                         * It is possible that (alignment << 1) will cause
                   2998:                         * overflow, but it doesn't matter because we also
                   2999:                         * subtract pagesize, which in the case of overflow
                   3000:                         * leaves us with a very large run_size.  That causes
                   3001:                         * the first conditional below to fail, which means
                   3002:                         * that the bogus run_size value never gets used for
                   3003:                         * anything important.
                   3004:                         */
                   3005:                        run_size = (alignment << 1) - pagesize;
                   3006:                }
                   3007:
                   3008:                if (run_size <= arena_maxclass) {
                   3009:                        ret = arena_palloc(choose_arena(), alignment, ceil_size,
                   3010:                            run_size);
                   3011:                } else if (alignment <= chunksize)
                   3012:                        ret = huge_malloc(ceil_size);
                   3013:                else
                   3014:                        ret = huge_palloc(alignment, ceil_size);
                   3015:        }
                   3016:
                   3017:        assert(((uintptr_t)ret & (alignment - 1)) == 0);
                   3018:        return (ret);
                   3019: }
                   3020:
                   3021: static void *
                   3022: icalloc(size_t size)
                   3023: {
                   3024:        void *ret;
                   3025:
                   3026:        if (size <= arena_maxclass) {
                   3027:                ret = arena_malloc(choose_arena(), size);
                   3028:                if (ret == NULL)
                   3029:                        return (NULL);
                   3030:                memset(ret, 0, size);
                   3031:        } else {
                   3032:                /*
                   3033:                 * The virtual memory system provides zero-filled pages, so
                   3034:                 * there is no need to do so manually, unless opt_junk is
                   3035:                 * enabled, in which case huge_malloc() fills huge allocations
                   3036:                 * with junk.
                   3037:                 */
                   3038:                ret = huge_malloc(size);
                   3039:                if (ret == NULL)
                   3040:                        return (NULL);
                   3041:
                   3042:                if (opt_junk)
                   3043:                        memset(ret, 0, size);
                   3044: #ifdef USE_BRK
                   3045:                else if ((uintptr_t)ret >= (uintptr_t)brk_base
                   3046:                    && (uintptr_t)ret < (uintptr_t)brk_max) {
                   3047:                        /*
                   3048:                         * This may be a re-used brk chunk.  Therefore, zero
                   3049:                         * the memory.
                   3050:                         */
                   3051:                        memset(ret, 0, size);
                   3052:                }
                   3053: #endif
                   3054:        }
                   3055:
                   3056:        return (ret);
                   3057: }
                   3058:
                   3059: static size_t
                   3060: isalloc(const void *ptr)
                   3061: {
                   3062:        size_t ret;
                   3063:        arena_chunk_t *chunk;
                   3064:
                   3065:        assert(ptr != NULL);
                   3066:
                   3067:        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
                   3068:        if (chunk != ptr) {
                   3069:                /* Region. */
                   3070:                assert(chunk->arena->magic == ARENA_MAGIC);
                   3071:
                   3072:                ret = arena_salloc(ptr);
                   3073:        } else {
                   3074:                chunk_node_t *node, key;
                   3075:
                   3076:                /* Chunk (huge allocation). */
                   3077:
                   3078:                malloc_mutex_lock(&chunks_mtx);
                   3079:
                   3080:                /* Extract from tree of huge allocations. */
                   3081:                key.chunk = __DECONST(void *, ptr);
1.2       ad       3082:                /* LINTED */
1.1       ad       3083:                node = RB_FIND(chunk_tree_s, &huge, &key);
                   3084:                assert(node != NULL);
                   3085:
                   3086:                ret = node->size;
                   3087:
                   3088:                malloc_mutex_unlock(&chunks_mtx);
                   3089:        }
                   3090:
                   3091:        return (ret);
                   3092: }
                   3093:
                   3094: static void *
                   3095: iralloc(void *ptr, size_t size)
                   3096: {
                   3097:        void *ret;
                   3098:        size_t oldsize;
                   3099:
                   3100:        assert(ptr != NULL);
                   3101:        assert(size != 0);
                   3102:
                   3103:        oldsize = isalloc(ptr);
                   3104:
                   3105:        if (size <= arena_maxclass)
                   3106:                ret = arena_ralloc(ptr, size, oldsize);
                   3107:        else
                   3108:                ret = huge_ralloc(ptr, size, oldsize);
                   3109:
                   3110:        return (ret);
                   3111: }
                   3112:
                   3113: static void
                   3114: idalloc(void *ptr)
                   3115: {
                   3116:        arena_chunk_t *chunk;
                   3117:
                   3118:        assert(ptr != NULL);
                   3119:
                   3120:        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
                   3121:        if (chunk != ptr) {
                   3122:                /* Region. */
                   3123:                arena_dalloc(chunk->arena, chunk, ptr);
                   3124:        } else
                   3125:                huge_dalloc(ptr);
                   3126: }
                   3127:
                   3128: static void
                   3129: malloc_print_stats(void)
                   3130: {
                   3131:
                   3132:        if (opt_print_stats) {
                   3133:                char s[UMAX2S_BUFSIZE];
                   3134:                _malloc_message("___ Begin malloc statistics ___\n", "", "",
                   3135:                    "");
                   3136:                _malloc_message("Assertions ",
                   3137: #ifdef NDEBUG
                   3138:                    "disabled",
                   3139: #else
                   3140:                    "enabled",
                   3141: #endif
                   3142:                    "\n", "");
                   3143:                _malloc_message("Boolean MALLOC_OPTIONS: ",
                   3144:                    opt_abort ? "A" : "a",
                   3145:                    opt_junk ? "J" : "j",
                   3146:                    opt_hint ? "H" : "h");
                   3147:                _malloc_message(opt_utrace ? "PU" : "Pu",
                   3148:                    opt_sysv ? "V" : "v",
                   3149:                    opt_xmalloc ? "X" : "x",
                   3150:                    opt_zero ? "Z\n" : "z\n");
                   3151:
                   3152:                _malloc_message("CPUs: ", umax2s(ncpus, s), "\n", "");
                   3153:                _malloc_message("Max arenas: ", umax2s(narenas, s), "\n", "");
                   3154:                _malloc_message("Pointer size: ", umax2s(sizeof(void *), s),
                   3155:                    "\n", "");
                   3156:                _malloc_message("Quantum size: ", umax2s(quantum, s), "\n", "");
                   3157:                _malloc_message("Max small size: ", umax2s(small_max, s), "\n",
                   3158:                    "");
                   3159:
                   3160:                _malloc_message("Chunk size: ", umax2s(chunksize, s), "", "");
                   3161:                _malloc_message(" (2^", umax2s(opt_chunk_2pow, s), ")\n", "");
                   3162:
                   3163: #ifdef MALLOC_STATS
                   3164:                {
                   3165:                        size_t allocated, mapped;
                   3166:                        unsigned i;
                   3167:                        arena_t *arena;
                   3168:
                   3169:                        /* Calculate and print allocated/mapped stats. */
                   3170:
                   3171:                        /* arenas. */
                   3172:                        for (i = 0, allocated = 0; i < narenas; i++) {
                   3173:                                if (arenas[i] != NULL) {
                   3174:                                        malloc_mutex_lock(&arenas[i]->mtx);
                   3175:                                        allocated +=
                   3176:                                            arenas[i]->stats.allocated_small;
                   3177:                                        allocated +=
                   3178:                                            arenas[i]->stats.allocated_large;
                   3179:                                        malloc_mutex_unlock(&arenas[i]->mtx);
                   3180:                                }
                   3181:                        }
                   3182:
                   3183:                        /* huge/base. */
                   3184:                        malloc_mutex_lock(&chunks_mtx);
                   3185:                        allocated += huge_allocated;
                   3186:                        mapped = stats_chunks.curchunks * chunksize;
                   3187:                        malloc_mutex_unlock(&chunks_mtx);
                   3188:
                   3189:                        malloc_mutex_lock(&base_mtx);
                   3190:                        mapped += base_mapped;
                   3191:                        malloc_mutex_unlock(&base_mtx);
                   3192:
                   3193:                        malloc_printf("Allocated: %zu, mapped: %zu\n",
                   3194:                            allocated, mapped);
                   3195:
                   3196:                        /* Print chunk stats. */
                   3197:                        {
                   3198:                                chunk_stats_t chunks_stats;
                   3199:
                   3200:                                malloc_mutex_lock(&chunks_mtx);
                   3201:                                chunks_stats = stats_chunks;
                   3202:                                malloc_mutex_unlock(&chunks_mtx);
                   3203:
                   3204:                                malloc_printf("chunks: nchunks   "
                   3205:                                    "highchunks    curchunks\n");
                   3206:                                malloc_printf("  %13llu%13lu%13lu\n",
                   3207:                                    chunks_stats.nchunks,
                   3208:                                    chunks_stats.highchunks,
                   3209:                                    chunks_stats.curchunks);
                   3210:                        }
                   3211:
                   3212:                        /* Print chunk stats. */
                   3213:                        malloc_printf(
1.8       yamt     3214:                            "huge: nmalloc      ndalloc      "
                   3215:                            "nralloc    allocated\n");
                   3216:                        malloc_printf(" %12llu %12llu %12llu %12zu\n",
                   3217:                            huge_nmalloc, huge_ndalloc, huge_nralloc,
                   3218:                            huge_allocated);
1.1       ad       3219:
                   3220:                        /* Print stats for each arena. */
                   3221:                        for (i = 0; i < narenas; i++) {
                   3222:                                arena = arenas[i];
                   3223:                                if (arena != NULL) {
                   3224:                                        malloc_printf(
1.2       ad       3225:                                            "\narenas[%u] @ %p\n", i, arena);
1.1       ad       3226:                                        malloc_mutex_lock(&arena->mtx);
                   3227:                                        stats_print(arena);
                   3228:                                        malloc_mutex_unlock(&arena->mtx);
                   3229:                                }
                   3230:                        }
                   3231:                }
                   3232: #endif /* #ifdef MALLOC_STATS */
                   3233:                _malloc_message("--- End malloc statistics ---\n", "", "", "");
                   3234:        }
                   3235: }
                   3236:
                   3237: /*
                   3238:  * FreeBSD's pthreads implementation calls malloc(3), so the malloc
                   3239:  * implementation has to take pains to avoid infinite recursion during
                   3240:  * initialization.
                   3241:  */
                   3242: static inline bool
                   3243: malloc_init(void)
                   3244: {
                   3245:
                   3246:        if (malloc_initialized == false)
                   3247:                return (malloc_init_hard());
                   3248:
                   3249:        return (false);
                   3250: }
                   3251:
                   3252: static bool
                   3253: malloc_init_hard(void)
                   3254: {
                   3255:        unsigned i, j;
1.9       christos 3256:        ssize_t linklen;
1.1       ad       3257:        char buf[PATH_MAX + 1];
1.2       ad       3258:        const char *opts = "";
1.1       ad       3259:
                   3260:        malloc_mutex_lock(&init_lock);
                   3261:        if (malloc_initialized) {
                   3262:                /*
                   3263:                 * Another thread initialized the allocator before this one
                   3264:                 * acquired init_lock.
                   3265:                 */
                   3266:                malloc_mutex_unlock(&init_lock);
                   3267:                return (false);
                   3268:        }
                   3269:
                   3270:        /* Get number of CPUs. */
                   3271:        {
                   3272:                int mib[2];
                   3273:                size_t len;
                   3274:
                   3275:                mib[0] = CTL_HW;
                   3276:                mib[1] = HW_NCPU;
                   3277:                len = sizeof(ncpus);
                   3278:                if (sysctl(mib, 2, &ncpus, &len, (void *) 0, 0) == -1) {
                   3279:                        /* Error. */
                   3280:                        ncpus = 1;
                   3281:                }
                   3282:        }
                   3283:
                   3284:        /* Get page size. */
                   3285:        {
                   3286:                long result;
                   3287:
                   3288:                result = sysconf(_SC_PAGESIZE);
                   3289:                assert(result != -1);
                   3290:                pagesize = (unsigned) result;
                   3291:
                   3292:                /*
                   3293:                 * We assume that pagesize is a power of 2 when calculating
                   3294:                 * pagesize_mask and pagesize_2pow.
                   3295:                 */
                   3296:                assert(((result - 1) & result) == 0);
                   3297:                pagesize_mask = result - 1;
                   3298:                pagesize_2pow = ffs((int)result) - 1;
                   3299:        }
                   3300:
                   3301:        for (i = 0; i < 3; i++) {
                   3302:                /* Get runtime configuration. */
                   3303:                switch (i) {
                   3304:                case 0:
                   3305:                        if ((linklen = readlink("/etc/malloc.conf", buf,
                   3306:                                                sizeof(buf) - 1)) != -1) {
                   3307:                                /*
                   3308:                                 * Use the contents of the "/etc/malloc.conf"
                   3309:                                 * symbolic link's name.
                   3310:                                 */
                   3311:                                buf[linklen] = '\0';
                   3312:                                opts = buf;
                   3313:                        } else {
                   3314:                                /* No configuration specified. */
                   3315:                                buf[0] = '\0';
                   3316:                                opts = buf;
                   3317:                        }
                   3318:                        break;
                   3319:                case 1:
                   3320:                        if (issetugid() == 0 && (opts =
                   3321:                            getenv("MALLOC_OPTIONS")) != NULL) {
                   3322:                                /*
                   3323:                                 * Do nothing; opts is already initialized to
                   3324:                                 * the value of the MALLOC_OPTIONS environment
                   3325:                                 * variable.
                   3326:                                 */
                   3327:                        } else {
                   3328:                                /* No configuration specified. */
                   3329:                                buf[0] = '\0';
                   3330:                                opts = buf;
                   3331:                        }
                   3332:                        break;
                   3333:                case 2:
                   3334:                        if (_malloc_options != NULL) {
                   3335:                            /*
                   3336:                             * Use options that were compiled into the program.
                   3337:                             */
                   3338:                            opts = _malloc_options;
                   3339:                        } else {
                   3340:                                /* No configuration specified. */
                   3341:                                buf[0] = '\0';
                   3342:                                opts = buf;
                   3343:                        }
                   3344:                        break;
                   3345:                default:
                   3346:                        /* NOTREACHED */
1.7       yamt     3347:                        /* LINTED */
1.1       ad       3348:                        assert(false);
                   3349:                }
                   3350:
                   3351:                for (j = 0; opts[j] != '\0'; j++) {
                   3352:                        switch (opts[j]) {
                   3353:                        case 'a':
                   3354:                                opt_abort = false;
                   3355:                                break;
                   3356:                        case 'A':
                   3357:                                opt_abort = true;
                   3358:                                break;
                   3359:                        case 'h':
                   3360:                                opt_hint = false;
                   3361:                                break;
                   3362:                        case 'H':
                   3363:                                opt_hint = true;
                   3364:                                break;
                   3365:                        case 'j':
                   3366:                                opt_junk = false;
                   3367:                                break;
                   3368:                        case 'J':
                   3369:                                opt_junk = true;
                   3370:                                break;
                   3371:                        case 'k':
                   3372:                                /*
                   3373:                                 * Chunks always require at least one header
                   3374:                                 * page, so chunks can never be smaller than
                   3375:                                 * two pages.
                   3376:                                 */
                   3377:                                if (opt_chunk_2pow > pagesize_2pow + 1)
                   3378:                                        opt_chunk_2pow--;
                   3379:                                break;
                   3380:                        case 'K':
                   3381:                                /*
                   3382:                                 * There must be fewer pages in a chunk than
                   3383:                                 * can be recorded by the pos field of
                   3384:                                 * arena_chunk_map_t, in order to make POS_FREE
                   3385:                                 * special.
                   3386:                                 */
                   3387:                                if (opt_chunk_2pow - pagesize_2pow
                   3388:                                    < (sizeof(uint32_t) << 3) - 1)
                   3389:                                        opt_chunk_2pow++;
                   3390:                                break;
                   3391:                        case 'n':
                   3392:                                opt_narenas_lshift--;
                   3393:                                break;
                   3394:                        case 'N':
                   3395:                                opt_narenas_lshift++;
                   3396:                                break;
                   3397:                        case 'p':
                   3398:                                opt_print_stats = false;
                   3399:                                break;
                   3400:                        case 'P':
                   3401:                                opt_print_stats = true;
                   3402:                                break;
                   3403:                        case 'q':
                   3404:                                if (opt_quantum_2pow > QUANTUM_2POW_MIN)
                   3405:                                        opt_quantum_2pow--;
                   3406:                                break;
                   3407:                        case 'Q':
                   3408:                                if (opt_quantum_2pow < pagesize_2pow - 1)
                   3409:                                        opt_quantum_2pow++;
                   3410:                                break;
                   3411:                        case 's':
                   3412:                                if (opt_small_max_2pow > QUANTUM_2POW_MIN)
                   3413:                                        opt_small_max_2pow--;
                   3414:                                break;
                   3415:                        case 'S':
                   3416:                                if (opt_small_max_2pow < pagesize_2pow - 1)
                   3417:                                        opt_small_max_2pow++;
                   3418:                                break;
                   3419:                        case 'u':
                   3420:                                opt_utrace = false;
                   3421:                                break;
                   3422:                        case 'U':
                   3423:                                opt_utrace = true;
                   3424:                                break;
                   3425:                        case 'v':
                   3426:                                opt_sysv = false;
                   3427:                                break;
                   3428:                        case 'V':
                   3429:                                opt_sysv = true;
                   3430:                                break;
                   3431:                        case 'x':
                   3432:                                opt_xmalloc = false;
                   3433:                                break;
                   3434:                        case 'X':
                   3435:                                opt_xmalloc = true;
                   3436:                                break;
                   3437:                        case 'z':
                   3438:                                opt_zero = false;
                   3439:                                break;
                   3440:                        case 'Z':
                   3441:                                opt_zero = true;
                   3442:                                break;
                   3443:                        default: {
                   3444:                                char cbuf[2];
                   3445:
                   3446:                                cbuf[0] = opts[j];
                   3447:                                cbuf[1] = '\0';
                   3448:                                _malloc_message(_getprogname(),
                   3449:                                    ": (malloc) Unsupported character in "
                   3450:                                    "malloc options: '", cbuf, "'\n");
                   3451:                        }
                   3452:                        }
                   3453:                }
                   3454:        }
                   3455:
                   3456:        /* Take care to call atexit() only once. */
                   3457:        if (opt_print_stats) {
                   3458:                /* Print statistics at exit. */
                   3459:                atexit(malloc_print_stats);
                   3460:        }
                   3461:
                   3462:        /* Set variables according to the value of opt_small_max_2pow. */
                   3463:        if (opt_small_max_2pow < opt_quantum_2pow)
                   3464:                opt_small_max_2pow = opt_quantum_2pow;
                   3465:        small_max = (1 << opt_small_max_2pow);
                   3466:
                   3467:        /* Set bin-related variables. */
                   3468:        bin_maxclass = (pagesize >> 1);
                   3469:        assert(opt_quantum_2pow >= TINY_MIN_2POW);
1.9       christos 3470:        ntbins = (unsigned)(opt_quantum_2pow - TINY_MIN_2POW);
1.1       ad       3471:        assert(ntbins <= opt_quantum_2pow);
1.9       christos 3472:        nqbins = (unsigned)(small_max >> opt_quantum_2pow);
                   3473:        nsbins = (unsigned)(pagesize_2pow - opt_small_max_2pow - 1);
1.1       ad       3474:
                   3475:        /* Set variables according to the value of opt_quantum_2pow. */
                   3476:        quantum = (1 << opt_quantum_2pow);
                   3477:        quantum_mask = quantum - 1;
                   3478:        if (ntbins > 0)
                   3479:                small_min = (quantum >> 1) + 1;
                   3480:        else
                   3481:                small_min = 1;
                   3482:        assert(small_min <= quantum);
                   3483:
                   3484:        /* Set variables according to the value of opt_chunk_2pow. */
                   3485:        chunksize = (1LU << opt_chunk_2pow);
                   3486:        chunksize_mask = chunksize - 1;
1.9       christos 3487:        chunksize_2pow = (unsigned)opt_chunk_2pow;
                   3488:        chunk_npages = (unsigned)(chunksize >> pagesize_2pow);
1.1       ad       3489:        {
                   3490:                unsigned header_size;
                   3491:
1.9       christos 3492:                header_size = (unsigned)(sizeof(arena_chunk_t) +
                   3493:                    (sizeof(arena_chunk_map_t) * (chunk_npages - 1)));
1.1       ad       3494:                arena_chunk_header_npages = (header_size >> pagesize_2pow);
                   3495:                if ((header_size & pagesize_mask) != 0)
                   3496:                        arena_chunk_header_npages++;
                   3497:        }
                   3498:        arena_maxclass = chunksize - (arena_chunk_header_npages <<
                   3499:            pagesize_2pow);
                   3500:
                   3501:        UTRACE(0, 0, 0);
                   3502:
                   3503: #ifdef MALLOC_STATS
                   3504:        memset(&stats_chunks, 0, sizeof(chunk_stats_t));
                   3505: #endif
                   3506:
                   3507:        /* Various sanity checks that regard configuration. */
                   3508:        assert(quantum >= sizeof(void *));
                   3509:        assert(quantum <= pagesize);
                   3510:        assert(chunksize >= pagesize);
                   3511:        assert(quantum * 4 <= chunksize);
                   3512:
                   3513:        /* Initialize chunks data. */
                   3514:        malloc_mutex_init(&chunks_mtx);
                   3515:        RB_INIT(&huge);
                   3516: #ifdef USE_BRK
                   3517:        malloc_mutex_init(&brk_mtx);
                   3518:        brk_base = sbrk(0);
                   3519:        brk_prev = brk_base;
                   3520:        brk_max = brk_base;
                   3521: #endif
                   3522: #ifdef MALLOC_STATS
                   3523:        huge_nmalloc = 0;
                   3524:        huge_ndalloc = 0;
1.8       yamt     3525:        huge_nralloc = 0;
1.1       ad       3526:        huge_allocated = 0;
                   3527: #endif
                   3528:        RB_INIT(&old_chunks);
                   3529:
                   3530:        /* Initialize base allocation data structures. */
                   3531: #ifdef MALLOC_STATS
                   3532:        base_mapped = 0;
                   3533: #endif
                   3534: #ifdef USE_BRK
                   3535:        /*
                   3536:         * Allocate a base chunk here, since it doesn't actually have to be
                   3537:         * chunk-aligned.  Doing this before allocating any other chunks allows
                   3538:         * the use of space that would otherwise be wasted.
                   3539:         */
                   3540:        base_pages_alloc(0);
                   3541: #endif
                   3542:        base_chunk_nodes = NULL;
                   3543:        malloc_mutex_init(&base_mtx);
                   3544:
                   3545:        if (ncpus > 1) {
                   3546:                /*
                   3547:                 * For SMP systems, create four times as many arenas as there
                   3548:                 * are CPUs by default.
                   3549:                 */
                   3550:                opt_narenas_lshift += 2;
                   3551:        }
                   3552:
1.2       ad       3553: #ifdef NO_TLS
                   3554:        /* Initialize arena key. */
                   3555:        (void)thr_keycreate(&arenas_map_key, NULL);
                   3556: #endif
                   3557:
1.1       ad       3558:        /* Determine how many arenas to use. */
                   3559:        narenas = ncpus;
                   3560:        if (opt_narenas_lshift > 0) {
                   3561:                if ((narenas << opt_narenas_lshift) > narenas)
                   3562:                        narenas <<= opt_narenas_lshift;
                   3563:                /*
                   3564:                 * Make sure not to exceed the limits of what base_malloc()
                   3565:                 * can handle.
                   3566:                 */
                   3567:                if (narenas * sizeof(arena_t *) > chunksize)
1.9       christos 3568:                        narenas = (unsigned)(chunksize / sizeof(arena_t *));
1.1       ad       3569:        } else if (opt_narenas_lshift < 0) {
                   3570:                if ((narenas << opt_narenas_lshift) < narenas)
                   3571:                        narenas <<= opt_narenas_lshift;
                   3572:        }
                   3573:
1.12    ! ad       3574:        /*
        !          3575:         * Make sure there are are as many available slots as there
        !          3576:         * are CPUs; we may only use one arena depending on the number
        !          3577:         * of threads.
        !          3578:         */
        !          3579:        if (narenas < ncpus)
        !          3580:                narenas = ncpus;
1.1       ad       3581:        next_arena = 0;
                   3582:
                   3583:        /* Allocate and initialize arenas. */
                   3584:        arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
                   3585:        if (arenas == NULL) {
                   3586:                malloc_mutex_unlock(&init_lock);
                   3587:                return (true);
                   3588:        }
                   3589:        /*
                   3590:         * Zero the array.  In practice, this should always be pre-zeroed,
                   3591:         * since it was just mmap()ed, but let's be sure.
                   3592:         */
                   3593:        memset(arenas, 0, sizeof(arena_t *) * narenas);
                   3594:
                   3595:        /*
                   3596:         * Initialize one arena here.  The rest are lazily created in
                   3597:         * arena_choose_hard().
                   3598:         */
                   3599:        arenas_extend(0);
                   3600:        if (arenas[0] == NULL) {
                   3601:                malloc_mutex_unlock(&init_lock);
                   3602:                return (true);
                   3603:        }
                   3604:
                   3605:        malloc_mutex_init(&arenas_mtx);
                   3606:
                   3607:        malloc_initialized = true;
                   3608:        malloc_mutex_unlock(&init_lock);
                   3609:        return (false);
                   3610: }
                   3611:
                   3612: /*
                   3613:  * End general internal functions.
                   3614:  */
                   3615: /******************************************************************************/
                   3616: /*
                   3617:  * Begin malloc(3)-compatible functions.
                   3618:  */
                   3619:
                   3620: void *
                   3621: malloc(size_t size)
                   3622: {
                   3623:        void *ret;
                   3624:
                   3625:        if (malloc_init()) {
                   3626:                ret = NULL;
                   3627:                goto RETURN;
                   3628:        }
                   3629:
                   3630:        if (size == 0) {
                   3631:                if (opt_sysv == false)
                   3632:                        size = 1;
                   3633:                else {
                   3634:                        ret = NULL;
                   3635:                        goto RETURN;
                   3636:                }
                   3637:        }
                   3638:
                   3639:        ret = imalloc(size);
                   3640:
                   3641: RETURN:
                   3642:        if (ret == NULL) {
                   3643:                if (opt_xmalloc) {
                   3644:                        _malloc_message(_getprogname(),
                   3645:                            ": (malloc) Error in malloc(): out of memory\n", "",
                   3646:                            "");
                   3647:                        abort();
                   3648:                }
                   3649:                errno = ENOMEM;
                   3650:        }
                   3651:
                   3652:        UTRACE(0, size, ret);
                   3653:        return (ret);
                   3654: }
                   3655:
                   3656: int
                   3657: posix_memalign(void **memptr, size_t alignment, size_t size)
                   3658: {
                   3659:        int ret;
                   3660:        void *result;
                   3661:
                   3662:        if (malloc_init())
                   3663:                result = NULL;
                   3664:        else {
                   3665:                /* Make sure that alignment is a large enough power of 2. */
                   3666:                if (((alignment - 1) & alignment) != 0
                   3667:                    || alignment < sizeof(void *)) {
                   3668:                        if (opt_xmalloc) {
                   3669:                                _malloc_message(_getprogname(),
                   3670:                                    ": (malloc) Error in posix_memalign(): "
                   3671:                                    "invalid alignment\n", "", "");
                   3672:                                abort();
                   3673:                        }
                   3674:                        result = NULL;
                   3675:                        ret = EINVAL;
                   3676:                        goto RETURN;
                   3677:                }
                   3678:
                   3679:                result = ipalloc(alignment, size);
                   3680:        }
                   3681:
                   3682:        if (result == NULL) {
                   3683:                if (opt_xmalloc) {
                   3684:                        _malloc_message(_getprogname(),
                   3685:                        ": (malloc) Error in posix_memalign(): out of memory\n",
                   3686:                        "", "");
                   3687:                        abort();
                   3688:                }
                   3689:                ret = ENOMEM;
                   3690:                goto RETURN;
                   3691:        }
                   3692:
                   3693:        *memptr = result;
                   3694:        ret = 0;
                   3695:
                   3696: RETURN:
                   3697:        UTRACE(0, size, result);
                   3698:        return (ret);
                   3699: }
                   3700:
                   3701: void *
                   3702: calloc(size_t num, size_t size)
                   3703: {
                   3704:        void *ret;
                   3705:        size_t num_size;
                   3706:
                   3707:        if (malloc_init()) {
                   3708:                num_size = 0;
                   3709:                ret = NULL;
                   3710:                goto RETURN;
                   3711:        }
                   3712:
                   3713:        num_size = num * size;
                   3714:        if (num_size == 0) {
                   3715:                if ((opt_sysv == false) && ((num == 0) || (size == 0)))
                   3716:                        num_size = 1;
                   3717:                else {
                   3718:                        ret = NULL;
                   3719:                        goto RETURN;
                   3720:                }
                   3721:        /*
                   3722:         * Try to avoid division here.  We know that it isn't possible to
                   3723:         * overflow during multiplication if neither operand uses any of the
                   3724:         * most significant half of the bits in a size_t.
                   3725:         */
1.2       ad       3726:        } else if ((unsigned long long)((num | size) &
                   3727:           ((unsigned long long)SIZE_T_MAX << (sizeof(size_t) << 2))) &&
                   3728:           (num_size / size != num)) {
1.1       ad       3729:                /* size_t overflow. */
                   3730:                ret = NULL;
                   3731:                goto RETURN;
                   3732:        }
                   3733:
                   3734:        ret = icalloc(num_size);
                   3735:
                   3736: RETURN:
                   3737:        if (ret == NULL) {
                   3738:                if (opt_xmalloc) {
                   3739:                        _malloc_message(_getprogname(),
                   3740:                            ": (malloc) Error in calloc(): out of memory\n", "",
                   3741:                            "");
                   3742:                        abort();
                   3743:                }
                   3744:                errno = ENOMEM;
                   3745:        }
                   3746:
                   3747:        UTRACE(0, num_size, ret);
                   3748:        return (ret);
                   3749: }
                   3750:
                   3751: void *
                   3752: realloc(void *ptr, size_t size)
                   3753: {
                   3754:        void *ret;
                   3755:
                   3756:        if (size == 0) {
                   3757:                if (opt_sysv == false)
                   3758:                        size = 1;
                   3759:                else {
                   3760:                        if (ptr != NULL)
                   3761:                                idalloc(ptr);
                   3762:                        ret = NULL;
                   3763:                        goto RETURN;
                   3764:                }
                   3765:        }
                   3766:
                   3767:        if (ptr != NULL) {
                   3768:                assert(malloc_initialized);
                   3769:
                   3770:                ret = iralloc(ptr, size);
                   3771:
                   3772:                if (ret == NULL) {
                   3773:                        if (opt_xmalloc) {
                   3774:                                _malloc_message(_getprogname(),
                   3775:                                    ": (malloc) Error in realloc(): out of "
                   3776:                                    "memory\n", "", "");
                   3777:                                abort();
                   3778:                        }
                   3779:                        errno = ENOMEM;
                   3780:                }
                   3781:        } else {
                   3782:                if (malloc_init())
                   3783:                        ret = NULL;
                   3784:                else
                   3785:                        ret = imalloc(size);
                   3786:
                   3787:                if (ret == NULL) {
                   3788:                        if (opt_xmalloc) {
                   3789:                                _malloc_message(_getprogname(),
                   3790:                                    ": (malloc) Error in realloc(): out of "
                   3791:                                    "memory\n", "", "");
                   3792:                                abort();
                   3793:                        }
                   3794:                        errno = ENOMEM;
                   3795:                }
                   3796:        }
                   3797:
                   3798: RETURN:
                   3799:        UTRACE(ptr, size, ret);
                   3800:        return (ret);
                   3801: }
                   3802:
                   3803: void
                   3804: free(void *ptr)
                   3805: {
                   3806:
                   3807:        UTRACE(ptr, 0, 0);
                   3808:        if (ptr != NULL) {
                   3809:                assert(malloc_initialized);
                   3810:
                   3811:                idalloc(ptr);
                   3812:        }
                   3813: }
                   3814:
                   3815: /*
                   3816:  * End malloc(3)-compatible functions.
                   3817:  */
                   3818: /******************************************************************************/
                   3819: /*
                   3820:  * Begin non-standard functions.
                   3821:  */
1.2       ad       3822: #ifndef __NetBSD__
1.1       ad       3823: size_t
                   3824: malloc_usable_size(const void *ptr)
                   3825: {
                   3826:
                   3827:        assert(ptr != NULL);
                   3828:
                   3829:        return (isalloc(ptr));
                   3830: }
1.2       ad       3831: #endif
1.1       ad       3832:
                   3833: /*
                   3834:  * End non-standard functions.
                   3835:  */
                   3836: /******************************************************************************/
                   3837: /*
                   3838:  * Begin library-private functions, used by threading libraries for protection
                   3839:  * of malloc during fork().  These functions are only called if the program is
                   3840:  * running in threaded mode, so there is no need to check whether the program
                   3841:  * is threaded here.
                   3842:  */
                   3843:
                   3844: void
                   3845: _malloc_prefork(void)
                   3846: {
                   3847:        unsigned i;
                   3848:
                   3849:        /* Acquire all mutexes in a safe order. */
                   3850:
                   3851:        malloc_mutex_lock(&arenas_mtx);
                   3852:        for (i = 0; i < narenas; i++) {
                   3853:                if (arenas[i] != NULL)
                   3854:                        malloc_mutex_lock(&arenas[i]->mtx);
                   3855:        }
                   3856:        malloc_mutex_unlock(&arenas_mtx);
                   3857:
                   3858:        malloc_mutex_lock(&base_mtx);
                   3859:
                   3860:        malloc_mutex_lock(&chunks_mtx);
                   3861: }
                   3862:
                   3863: void
                   3864: _malloc_postfork(void)
                   3865: {
                   3866:        unsigned i;
                   3867:
                   3868:        /* Release all mutexes, now that fork() has completed. */
                   3869:
                   3870:        malloc_mutex_unlock(&chunks_mtx);
                   3871:
                   3872:        malloc_mutex_unlock(&base_mtx);
                   3873:
                   3874:        malloc_mutex_lock(&arenas_mtx);
                   3875:        for (i = 0; i < narenas; i++) {
                   3876:                if (arenas[i] != NULL)
                   3877:                        malloc_mutex_unlock(&arenas[i]->mtx);
                   3878:        }
                   3879:        malloc_mutex_unlock(&arenas_mtx);
                   3880: }
                   3881:
                   3882: /*
                   3883:  * End library-private functions.
                   3884:  */
                   3885: /******************************************************************************/

CVSweb <webmaster@jp.NetBSD.org>