[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Annotation of src/sys/kern/subr_pool.c, Revision 1.66

1.66    ! thorpej     1: /*     $NetBSD: subr_pool.c,v 1.65 2001/11/20 06:57:04 enami Exp $     */
1.1       pk          2:
                      3: /*-
1.43      thorpej     4:  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
1.1       pk          5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
1.20      thorpej     8:  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
                      9:  * Simulation Facility, NASA Ames Research Center.
1.1       pk         10:  *
                     11:  * Redistribution and use in source and binary forms, with or without
                     12:  * modification, are permitted provided that the following conditions
                     13:  * are met:
                     14:  * 1. Redistributions of source code must retain the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer.
                     16:  * 2. Redistributions in binary form must reproduce the above copyright
                     17:  *    notice, this list of conditions and the following disclaimer in the
                     18:  *    documentation and/or other materials provided with the distribution.
                     19:  * 3. All advertising materials mentioning features or use of this software
                     20:  *    must display the following acknowledgement:
1.13      christos   21:  *     This product includes software developed by the NetBSD
                     22:  *     Foundation, Inc. and its contributors.
1.1       pk         23:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     24:  *    contributors may be used to endorse or promote products derived
                     25:  *    from this software without specific prior written permission.
                     26:  *
                     27:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     28:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     29:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     30:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     31:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     32:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     33:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     34:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     35:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     36:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     37:  * POSSIBILITY OF SUCH DAMAGE.
                     38:  */
1.64      lukem      39:
                     40: #include <sys/cdefs.h>
1.66    ! thorpej    41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.65 2001/11/20 06:57:04 enami Exp $");
1.24      scottr     42:
1.25      thorpej    43: #include "opt_pool.h"
1.24      scottr     44: #include "opt_poollog.h"
1.28      thorpej    45: #include "opt_lockdebug.h"
1.1       pk         46:
                     47: #include <sys/param.h>
                     48: #include <sys/systm.h>
                     49: #include <sys/proc.h>
                     50: #include <sys/errno.h>
                     51: #include <sys/kernel.h>
                     52: #include <sys/malloc.h>
                     53: #include <sys/lock.h>
                     54: #include <sys/pool.h>
1.20      thorpej    55: #include <sys/syslog.h>
1.3       pk         56:
                     57: #include <uvm/uvm.h>
                     58:
1.1       pk         59: /*
                     60:  * Pool resource management utility.
1.3       pk         61:  *
                     62:  * Memory is allocated in pages which are split into pieces according
                     63:  * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
                     64:  * in the pool structure and the individual pool items are on a linked list
                     65:  * headed by `ph_itemlist' in each page header. The memory for building
                     66:  * the page list is either taken from the allocated pages themselves (for
                     67:  * small pool items) or taken from an internal pool of page headers (`phpool').
1.1       pk         68:  */
                     69:
1.3       pk         70: /* List of all pools */
1.5       thorpej    71: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.3       pk         72:
                     73: /* Private pool for page header structures */
                     74: static struct pool phpool;
                     75:
1.62      bjh21      76: #ifdef POOL_SUBPAGE
                     77: /* Pool of subpages for use by normal pools. */
                     78: static struct pool psppool;
                     79: #endif
                     80:
1.3       pk         81: /* # of seconds to retain page after last use */
                     82: int pool_inactive_time = 10;
                     83:
                     84: /* Next candidate for drainage (see pool_drain()) */
1.23      thorpej    85: static struct pool     *drainpp;
                     86:
                     87: /* This spin lock protects both pool_head and drainpp. */
                     88: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3       pk         89:
                     90: struct pool_item_header {
                     91:        /* Page headers */
                     92:        TAILQ_ENTRY(pool_item_header)
                     93:                                ph_pagelist;    /* pool page list */
                     94:        TAILQ_HEAD(,pool_item)  ph_itemlist;    /* chunk list for this page */
                     95:        LIST_ENTRY(pool_item_header)
                     96:                                ph_hashlist;    /* Off-page page headers */
                     97:        int                     ph_nmissing;    /* # of chunks in use */
                     98:        caddr_t                 ph_page;        /* this page's address */
                     99:        struct timeval          ph_time;        /* last referenced */
                    100: };
1.61      chs       101: TAILQ_HEAD(pool_pagelist,pool_item_header);
1.3       pk        102:
1.1       pk        103: struct pool_item {
1.3       pk        104: #ifdef DIAGNOSTIC
                    105:        int pi_magic;
1.33      chs       106: #endif
1.25      thorpej   107: #define        PI_MAGIC 0xdeadbeef
1.3       pk        108:        /* Other entries use only this list entry */
                    109:        TAILQ_ENTRY(pool_item)  pi_list;
                    110: };
                    111:
1.25      thorpej   112: #define        PR_HASH_INDEX(pp,addr) \
1.66    ! thorpej   113:        (((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \
        !           114:         (PR_HASHTABSIZE - 1))
1.3       pk        115:
1.53      thorpej   116: #define        POOL_NEEDS_CATCHUP(pp)                                          \
                    117:        ((pp)->pr_nitems < (pp)->pr_minitems)
                    118:
1.43      thorpej   119: /*
                    120:  * Pool cache management.
                    121:  *
                    122:  * Pool caches provide a way for constructed objects to be cached by the
                    123:  * pool subsystem.  This can lead to performance improvements by avoiding
                    124:  * needless object construction/destruction; it is deferred until absolutely
                    125:  * necessary.
                    126:  *
                    127:  * Caches are grouped into cache groups.  Each cache group references
                    128:  * up to 16 constructed objects.  When a cache allocates an object
                    129:  * from the pool, it calls the object's constructor and places it into
                    130:  * a cache group.  When a cache group frees an object back to the pool,
                    131:  * it first calls the object's destructor.  This allows the object to
                    132:  * persist in constructed form while freed to the cache.
                    133:  *
                    134:  * Multiple caches may exist for each pool.  This allows a single
                    135:  * object type to have multiple constructed forms.  The pool references
                    136:  * each cache, so that when a pool is drained by the pagedaemon, it can
                    137:  * drain each individual cache as well.  Each time a cache is drained,
                    138:  * the most idle cache group is freed to the pool in its entirety.
                    139:  *
                    140:  * Pool caches are layed on top of pools.  By layering them, we can avoid
                    141:  * the complexity of cache management for pools which would not benefit
                    142:  * from it.
                    143:  */
                    144:
                    145: /* The cache group pool. */
                    146: static struct pool pcgpool;
                    147:
                    148: /* The pool cache group. */
                    149: #define        PCG_NOBJECTS            16
                    150: struct pool_cache_group {
                    151:        TAILQ_ENTRY(pool_cache_group)
                    152:                pcg_list;       /* link in the pool cache's group list */
                    153:        u_int   pcg_avail;      /* # available objects */
                    154:                                /* pointers to the objects */
                    155:        void    *pcg_objects[PCG_NOBJECTS];
                    156: };
1.3       pk        157:
1.43      thorpej   158: static void    pool_cache_reclaim(struct pool_cache *);
1.3       pk        159:
1.42      thorpej   160: static int     pool_catchup(struct pool *);
1.55      thorpej   161: static void    pool_prime_page(struct pool *, caddr_t,
                    162:                    struct pool_item_header *);
1.66    ! thorpej   163:
        !           164: void           *pool_allocator_alloc(struct pool *, int);
        !           165: void           pool_allocator_free(struct pool *, void *);
1.3       pk        166:
1.42      thorpej   167: static void pool_print1(struct pool *, const char *,
                    168:        void (*)(const char *, ...));
1.3       pk        169:
                    170: /*
1.52      thorpej   171:  * Pool log entry. An array of these is allocated in pool_init().
1.3       pk        172:  */
                    173: struct pool_log {
                    174:        const char      *pl_file;
                    175:        long            pl_line;
                    176:        int             pl_action;
1.25      thorpej   177: #define        PRLOG_GET       1
                    178: #define        PRLOG_PUT       2
1.3       pk        179:        void            *pl_addr;
1.1       pk        180: };
                    181:
1.3       pk        182: /* Number of entries in pool log buffers */
1.17      thorpej   183: #ifndef POOL_LOGSIZE
                    184: #define        POOL_LOGSIZE    10
                    185: #endif
                    186:
                    187: int pool_logsize = POOL_LOGSIZE;
1.1       pk        188:
1.59      thorpej   189: #ifdef POOL_DIAGNOSTIC
1.42      thorpej   190: static __inline void
                    191: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3       pk        192: {
                    193:        int n = pp->pr_curlogentry;
                    194:        struct pool_log *pl;
                    195:
1.20      thorpej   196:        if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3       pk        197:                return;
                    198:
                    199:        /*
                    200:         * Fill in the current entry. Wrap around and overwrite
                    201:         * the oldest entry if necessary.
                    202:         */
                    203:        pl = &pp->pr_log[n];
                    204:        pl->pl_file = file;
                    205:        pl->pl_line = line;
                    206:        pl->pl_action = action;
                    207:        pl->pl_addr = v;
                    208:        if (++n >= pp->pr_logsize)
                    209:                n = 0;
                    210:        pp->pr_curlogentry = n;
                    211: }
                    212:
                    213: static void
1.42      thorpej   214: pr_printlog(struct pool *pp, struct pool_item *pi,
                    215:     void (*pr)(const char *, ...))
1.3       pk        216: {
                    217:        int i = pp->pr_logsize;
                    218:        int n = pp->pr_curlogentry;
                    219:
1.20      thorpej   220:        if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3       pk        221:                return;
                    222:
                    223:        /*
                    224:         * Print all entries in this pool's log.
                    225:         */
                    226:        while (i-- > 0) {
                    227:                struct pool_log *pl = &pp->pr_log[n];
                    228:                if (pl->pl_action != 0) {
1.25      thorpej   229:                        if (pi == NULL || pi == pl->pl_addr) {
                    230:                                (*pr)("\tlog entry %d:\n", i);
                    231:                                (*pr)("\t\taction = %s, addr = %p\n",
                    232:                                    pl->pl_action == PRLOG_GET ? "get" : "put",
                    233:                                    pl->pl_addr);
                    234:                                (*pr)("\t\tfile: %s at line %lu\n",
                    235:                                    pl->pl_file, pl->pl_line);
                    236:                        }
1.3       pk        237:                }
                    238:                if (++n >= pp->pr_logsize)
                    239:                        n = 0;
                    240:        }
                    241: }
1.25      thorpej   242:
1.42      thorpej   243: static __inline void
                    244: pr_enter(struct pool *pp, const char *file, long line)
1.25      thorpej   245: {
                    246:
1.34      thorpej   247:        if (__predict_false(pp->pr_entered_file != NULL)) {
1.25      thorpej   248:                printf("pool %s: reentrancy at file %s line %ld\n",
                    249:                    pp->pr_wchan, file, line);
                    250:                printf("         previous entry at file %s line %ld\n",
                    251:                    pp->pr_entered_file, pp->pr_entered_line);
                    252:                panic("pr_enter");
                    253:        }
                    254:
                    255:        pp->pr_entered_file = file;
                    256:        pp->pr_entered_line = line;
                    257: }
                    258:
1.42      thorpej   259: static __inline void
                    260: pr_leave(struct pool *pp)
1.25      thorpej   261: {
                    262:
1.34      thorpej   263:        if (__predict_false(pp->pr_entered_file == NULL)) {
1.25      thorpej   264:                printf("pool %s not entered?\n", pp->pr_wchan);
                    265:                panic("pr_leave");
                    266:        }
                    267:
                    268:        pp->pr_entered_file = NULL;
                    269:        pp->pr_entered_line = 0;
                    270: }
                    271:
1.42      thorpej   272: static __inline void
                    273: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25      thorpej   274: {
                    275:
                    276:        if (pp->pr_entered_file != NULL)
                    277:                (*pr)("\n\tcurrently entered from file %s line %ld\n",
                    278:                    pp->pr_entered_file, pp->pr_entered_line);
                    279: }
1.3       pk        280: #else
1.25      thorpej   281: #define        pr_log(pp, v, action, file, line)
                    282: #define        pr_printlog(pp, pi, pr)
                    283: #define        pr_enter(pp, file, line)
                    284: #define        pr_leave(pp)
                    285: #define        pr_enter_check(pp, pr)
1.59      thorpej   286: #endif /* POOL_DIAGNOSTIC */
1.3       pk        287:
                    288: /*
                    289:  * Return the pool page header based on page address.
                    290:  */
1.42      thorpej   291: static __inline struct pool_item_header *
                    292: pr_find_pagehead(struct pool *pp, caddr_t page)
1.3       pk        293: {
                    294:        struct pool_item_header *ph;
                    295:
1.20      thorpej   296:        if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3       pk        297:                return ((struct pool_item_header *)(page + pp->pr_phoffset));
                    298:
                    299:        for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
                    300:             ph != NULL;
                    301:             ph = LIST_NEXT(ph, ph_hashlist)) {
                    302:                if (ph->ph_page == page)
                    303:                        return (ph);
                    304:        }
                    305:        return (NULL);
                    306: }
                    307:
                    308: /*
                    309:  * Remove a page from the pool.
                    310:  */
1.42      thorpej   311: static __inline void
1.61      chs       312: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
                    313:      struct pool_pagelist *pq)
1.3       pk        314: {
1.61      chs       315:        int s;
1.3       pk        316:
                    317:        /*
1.7       thorpej   318:         * If the page was idle, decrement the idle page count.
1.3       pk        319:         */
1.6       thorpej   320:        if (ph->ph_nmissing == 0) {
                    321: #ifdef DIAGNOSTIC
                    322:                if (pp->pr_nidle == 0)
                    323:                        panic("pr_rmpage: nidle inconsistent");
1.20      thorpej   324:                if (pp->pr_nitems < pp->pr_itemsperpage)
                    325:                        panic("pr_rmpage: nitems inconsistent");
1.6       thorpej   326: #endif
                    327:                pp->pr_nidle--;
                    328:        }
1.7       thorpej   329:
1.20      thorpej   330:        pp->pr_nitems -= pp->pr_itemsperpage;
                    331:
1.7       thorpej   332:        /*
1.61      chs       333:         * Unlink a page from the pool and release it (or queue it for release).
1.7       thorpej   334:         */
                    335:        TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1.61      chs       336:        if (pq) {
                    337:                TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
                    338:        } else {
1.66    ! thorpej   339:                pool_allocator_free(pp, ph->ph_page);
1.61      chs       340:                if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
                    341:                        LIST_REMOVE(ph, ph_hashlist);
                    342:                        s = splhigh();
                    343:                        pool_put(&phpool, ph);
                    344:                        splx(s);
                    345:                }
                    346:        }
1.7       thorpej   347:        pp->pr_npages--;
                    348:        pp->pr_npagefree++;
1.6       thorpej   349:
1.3       pk        350:        if (pp->pr_curpage == ph) {
                    351:                /*
                    352:                 * Find a new non-empty page header, if any.
                    353:                 * Start search from the page head, to increase the
                    354:                 * chance for "high water" pages to be freed.
                    355:                 */
1.61      chs       356:                TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
1.3       pk        357:                        if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
                    358:                                break;
                    359:
                    360:                pp->pr_curpage = ph;
1.21      thorpej   361:        }
1.3       pk        362: }
                    363:
                    364: /*
                    365:  * Initialize the given pool resource structure.
                    366:  *
                    367:  * We export this routine to allow other kernel parts to declare
                    368:  * static pools that must be initialized before malloc() is available.
                    369:  */
                    370: void
1.42      thorpej   371: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.66    ! thorpej   372:     const char *wchan, struct pool_allocator *palloc)
1.3       pk        373: {
1.16      briggs    374:        int off, slack, i;
1.3       pk        375:
1.25      thorpej   376: #ifdef POOL_DIAGNOSTIC
                    377:        /*
                    378:         * Always log if POOL_DIAGNOSTIC is defined.
                    379:         */
                    380:        if (pool_logsize != 0)
                    381:                flags |= PR_LOGGING;
                    382: #endif
                    383:
1.66    ! thorpej   384: #ifdef POOL_SUBPAGE
        !           385:        /*
        !           386:         * XXX We don't provide a real `nointr' back-end
        !           387:         * yet; all sub-pages come from a kmem back-end.
        !           388:         * maybe some day...
        !           389:         */
        !           390:        if (palloc == NULL) {
        !           391:                extern struct pool_allocator pool_allocator_kmem_subpage;
        !           392:                palloc = &pool_allocator_kmem_subpage;
        !           393:        }
1.3       pk        394:        /*
1.66    ! thorpej   395:         * We'll assume any user-specified back-end allocator
        !           396:         * will deal with sub-pages, or simply don't care.
1.3       pk        397:         */
1.66    ! thorpej   398: #else
        !           399:        if (palloc == NULL)
        !           400:                palloc = &pool_allocator_kmem;
        !           401: #endif /* POOL_SUBPAGE */
        !           402:        if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
        !           403:                if (palloc->pa_pagesz == 0) {
1.62      bjh21     404: #ifdef POOL_SUBPAGE
1.66    ! thorpej   405:                        if (palloc == &pool_allocator_kmem)
        !           406:                                palloc->pa_pagesz = PAGE_SIZE;
        !           407:                        else
        !           408:                                palloc->pa_pagesz = POOL_SUBPAGE;
1.62      bjh21     409: #else
1.66    ! thorpej   410:                        palloc->pa_pagesz = PAGE_SIZE;
        !           411: #endif /* POOL_SUBPAGE */
        !           412:                }
        !           413:
        !           414:                TAILQ_INIT(&palloc->pa_list);
        !           415:
        !           416:                simple_lock_init(&palloc->pa_slock);
        !           417:                palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
        !           418:                palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
        !           419:                palloc->pa_flags |= PA_INITIALIZED;
1.4       thorpej   420:        }
1.3       pk        421:
                    422:        if (align == 0)
                    423:                align = ALIGN(1);
1.14      thorpej   424:
                    425:        if (size < sizeof(struct pool_item))
                    426:                size = sizeof(struct pool_item);
1.3       pk        427:
1.35      pk        428:        size = ALIGN(size);
1.66    ! thorpej   429: #ifdef DIAGNOSTIC
        !           430:        if (size > palloc->pa_pagesz)
1.35      pk        431:                panic("pool_init: pool item size (%lu) too large",
                    432:                      (u_long)size);
1.66    ! thorpej   433: #endif
1.35      pk        434:
1.3       pk        435:        /*
                    436:         * Initialize the pool structure.
                    437:         */
                    438:        TAILQ_INIT(&pp->pr_pagelist);
1.43      thorpej   439:        TAILQ_INIT(&pp->pr_cachelist);
1.3       pk        440:        pp->pr_curpage = NULL;
                    441:        pp->pr_npages = 0;
                    442:        pp->pr_minitems = 0;
                    443:        pp->pr_minpages = 0;
                    444:        pp->pr_maxpages = UINT_MAX;
1.20      thorpej   445:        pp->pr_roflags = flags;
                    446:        pp->pr_flags = 0;
1.35      pk        447:        pp->pr_size = size;
1.3       pk        448:        pp->pr_align = align;
                    449:        pp->pr_wchan = wchan;
1.66    ! thorpej   450:        pp->pr_alloc = palloc;
1.20      thorpej   451:        pp->pr_nitems = 0;
                    452:        pp->pr_nout = 0;
                    453:        pp->pr_hardlimit = UINT_MAX;
                    454:        pp->pr_hardlimit_warning = NULL;
1.31      thorpej   455:        pp->pr_hardlimit_ratecap.tv_sec = 0;
                    456:        pp->pr_hardlimit_ratecap.tv_usec = 0;
                    457:        pp->pr_hardlimit_warning_last.tv_sec = 0;
                    458:        pp->pr_hardlimit_warning_last.tv_usec = 0;
1.3       pk        459:
                    460:        /*
                    461:         * Decide whether to put the page header off page to avoid
                    462:         * wasting too large a part of the page. Off-page page headers
                    463:         * go on a hash table, so we can match a returned item
                    464:         * with its header based on the page address.
                    465:         * We use 1/16 of the page size as the threshold (XXX: tune)
                    466:         */
1.66    ! thorpej   467:        if (pp->pr_size < palloc->pa_pagesz/16) {
1.3       pk        468:                /* Use the end of the page for the page header */
1.20      thorpej   469:                pp->pr_roflags |= PR_PHINPAGE;
1.66    ! thorpej   470:                pp->pr_phoffset = off = palloc->pa_pagesz -
        !           471:                    ALIGN(sizeof(struct pool_item_header));
1.2       pk        472:        } else {
1.3       pk        473:                /* The page header will be taken from our page header pool */
                    474:                pp->pr_phoffset = 0;
1.66    ! thorpej   475:                off = palloc->pa_pagesz;
1.16      briggs    476:                for (i = 0; i < PR_HASHTABSIZE; i++) {
                    477:                        LIST_INIT(&pp->pr_hashtab[i]);
                    478:                }
1.2       pk        479:        }
1.1       pk        480:
1.3       pk        481:        /*
                    482:         * Alignment is to take place at `ioff' within the item. This means
                    483:         * we must reserve up to `align - 1' bytes on the page to allow
                    484:         * appropriate positioning of each item.
                    485:         *
                    486:         * Silently enforce `0 <= ioff < align'.
                    487:         */
                    488:        pp->pr_itemoffset = ioff = ioff % align;
                    489:        pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43      thorpej   490:        KASSERT(pp->pr_itemsperpage != 0);
1.3       pk        491:
                    492:        /*
                    493:         * Use the slack between the chunks and the page header
                    494:         * for "cache coloring".
                    495:         */
                    496:        slack = off - pp->pr_itemsperpage * pp->pr_size;
                    497:        pp->pr_maxcolor = (slack / align) * align;
                    498:        pp->pr_curcolor = 0;
                    499:
                    500:        pp->pr_nget = 0;
                    501:        pp->pr_nfail = 0;
                    502:        pp->pr_nput = 0;
                    503:        pp->pr_npagealloc = 0;
                    504:        pp->pr_npagefree = 0;
1.1       pk        505:        pp->pr_hiwat = 0;
1.8       thorpej   506:        pp->pr_nidle = 0;
1.3       pk        507:
1.59      thorpej   508: #ifdef POOL_DIAGNOSTIC
1.25      thorpej   509:        if (flags & PR_LOGGING) {
                    510:                if (kmem_map == NULL ||
                    511:                    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
                    512:                     M_TEMP, M_NOWAIT)) == NULL)
1.20      thorpej   513:                        pp->pr_roflags &= ~PR_LOGGING;
1.3       pk        514:                pp->pr_curlogentry = 0;
                    515:                pp->pr_logsize = pool_logsize;
                    516:        }
1.59      thorpej   517: #endif
1.25      thorpej   518:
                    519:        pp->pr_entered_file = NULL;
                    520:        pp->pr_entered_line = 0;
1.3       pk        521:
1.21      thorpej   522:        simple_lock_init(&pp->pr_slock);
1.1       pk        523:
1.3       pk        524:        /*
1.43      thorpej   525:         * Initialize private page header pool and cache magazine pool if we
                    526:         * haven't done so yet.
1.23      thorpej   527:         * XXX LOCKING.
1.3       pk        528:         */
                    529:        if (phpool.pr_size == 0) {
1.62      bjh21     530: #ifdef POOL_SUBPAGE
                    531:                pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
1.66    ! thorpej   532:                    "phpool", &pool_allocator_kmem);
1.62      bjh21     533:                pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.66    ! thorpej   534:                    PR_RECURSIVE, "psppool", &pool_allocator_kmem);
1.62      bjh21     535: #else
1.3       pk        536:                pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
1.66    ! thorpej   537:                    0, "phpool", NULL);
1.62      bjh21     538: #endif
1.43      thorpej   539:                pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
1.66    ! thorpej   540:                    0, "pcgpool", NULL);
1.1       pk        541:        }
                    542:
1.23      thorpej   543:        /* Insert into the list of all pools. */
                    544:        simple_lock(&pool_head_slock);
                    545:        TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
                    546:        simple_unlock(&pool_head_slock);
1.66    ! thorpej   547:
        !           548:        /* Insert this into the list of pools using this allocator. */
        !           549:        simple_lock(&palloc->pa_slock);
        !           550:        TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
        !           551:        simple_unlock(&palloc->pa_slock);
1.1       pk        552: }
                    553:
                    554: /*
                    555:  * De-commision a pool resource.
                    556:  */
                    557: void
1.42      thorpej   558: pool_destroy(struct pool *pp)
1.1       pk        559: {
1.3       pk        560:        struct pool_item_header *ph;
1.43      thorpej   561:        struct pool_cache *pc;
                    562:
1.66    ! thorpej   563:        /* Locking order: pool_allocator -> pool */
        !           564:        simple_lock(&pp->pr_alloc->pa_slock);
        !           565:        TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
        !           566:        simple_unlock(&pp->pr_alloc->pa_slock);
        !           567:
1.43      thorpej   568:        /* Destroy all caches for this pool. */
                    569:        while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
                    570:                pool_cache_destroy(pc);
1.3       pk        571:
                    572: #ifdef DIAGNOSTIC
1.20      thorpej   573:        if (pp->pr_nout != 0) {
1.25      thorpej   574:                pr_printlog(pp, NULL, printf);
1.20      thorpej   575:                panic("pool_destroy: pool busy: still out: %u\n",
                    576:                    pp->pr_nout);
1.3       pk        577:        }
                    578: #endif
1.1       pk        579:
1.3       pk        580:        /* Remove all pages */
1.20      thorpej   581:        if ((pp->pr_roflags & PR_STATIC) == 0)
1.61      chs       582:                while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
                    583:                        pr_rmpage(pp, ph, NULL);
1.3       pk        584:
                    585:        /* Remove from global pool list */
1.23      thorpej   586:        simple_lock(&pool_head_slock);
1.3       pk        587:        TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.61      chs       588:        if (drainpp == pp) {
                    589:                drainpp = NULL;
                    590:        }
1.23      thorpej   591:        simple_unlock(&pool_head_slock);
1.3       pk        592:
1.59      thorpej   593: #ifdef POOL_DIAGNOSTIC
1.20      thorpej   594:        if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3       pk        595:                free(pp->pr_log, M_TEMP);
1.59      thorpej   596: #endif
1.2       pk        597:
1.20      thorpej   598:        if (pp->pr_roflags & PR_FREEHEADER)
1.3       pk        599:                free(pp, M_POOL);
1.1       pk        600: }
                    601:
1.55      thorpej   602: static __inline struct pool_item_header *
                    603: pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
                    604: {
                    605:        struct pool_item_header *ph;
                    606:        int s;
                    607:
                    608:        LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
                    609:
                    610:        if ((pp->pr_roflags & PR_PHINPAGE) != 0)
                    611:                ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
                    612:        else {
                    613:                s = splhigh();
                    614:                ph = pool_get(&phpool, flags);
                    615:                splx(s);
                    616:        }
                    617:
                    618:        return (ph);
                    619: }
1.1       pk        620:
                    621: /*
1.3       pk        622:  * Grab an item from the pool; must be called at appropriate spl level
1.1       pk        623:  */
1.3       pk        624: void *
1.59      thorpej   625: #ifdef POOL_DIAGNOSTIC
1.42      thorpej   626: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56      sommerfe  627: #else
                    628: pool_get(struct pool *pp, int flags)
                    629: #endif
1.1       pk        630: {
                    631:        struct pool_item *pi;
1.3       pk        632:        struct pool_item_header *ph;
1.55      thorpej   633:        void *v;
1.1       pk        634:
1.2       pk        635: #ifdef DIAGNOSTIC
1.34      thorpej   636:        if (__predict_false((pp->pr_roflags & PR_STATIC) &&
                    637:                            (flags & PR_MALLOCOK))) {
1.25      thorpej   638:                pr_printlog(pp, NULL, printf);
1.2       pk        639:                panic("pool_get: static");
1.3       pk        640:        }
1.2       pk        641:
1.37      sommerfe  642:        if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
                    643:                            (flags & PR_WAITOK) != 0))
1.3       pk        644:                panic("pool_get: must have NOWAIT");
1.58      thorpej   645:
                    646: #ifdef LOCKDEBUG
                    647:        if (flags & PR_WAITOK)
                    648:                simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
1.56      sommerfe  649: #endif
1.58      thorpej   650: #endif /* DIAGNOSTIC */
1.1       pk        651:
1.21      thorpej   652:        simple_lock(&pp->pr_slock);
1.25      thorpej   653:        pr_enter(pp, file, line);
1.20      thorpej   654:
                    655:  startover:
                    656:        /*
                    657:         * Check to see if we've reached the hard limit.  If we have,
                    658:         * and we can wait, then wait until an item has been returned to
                    659:         * the pool.
                    660:         */
                    661: #ifdef DIAGNOSTIC
1.34      thorpej   662:        if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25      thorpej   663:                pr_leave(pp);
1.21      thorpej   664:                simple_unlock(&pp->pr_slock);
1.20      thorpej   665:                panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
                    666:        }
                    667: #endif
1.34      thorpej   668:        if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.29      sommerfe  669:                if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20      thorpej   670:                        /*
                    671:                         * XXX: A warning isn't logged in this case.  Should
                    672:                         * it be?
                    673:                         */
                    674:                        pp->pr_flags |= PR_WANTED;
1.25      thorpej   675:                        pr_leave(pp);
1.40      sommerfe  676:                        ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25      thorpej   677:                        pr_enter(pp, file, line);
1.20      thorpej   678:                        goto startover;
                    679:                }
1.31      thorpej   680:
                    681:                /*
                    682:                 * Log a message that the hard limit has been hit.
                    683:                 */
                    684:                if (pp->pr_hardlimit_warning != NULL &&
                    685:                    ratecheck(&pp->pr_hardlimit_warning_last,
                    686:                              &pp->pr_hardlimit_ratecap))
                    687:                        log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21      thorpej   688:
                    689:                pp->pr_nfail++;
                    690:
1.25      thorpej   691:                pr_leave(pp);
1.21      thorpej   692:                simple_unlock(&pp->pr_slock);
1.20      thorpej   693:                return (NULL);
                    694:        }
                    695:
1.3       pk        696:        /*
                    697:         * The convention we use is that if `curpage' is not NULL, then
                    698:         * it points at a non-empty bucket. In particular, `curpage'
                    699:         * never points at a page header which has PR_PHINPAGE set and
                    700:         * has no items in its bucket.
                    701:         */
1.20      thorpej   702:        if ((ph = pp->pr_curpage) == NULL) {
                    703: #ifdef DIAGNOSTIC
                    704:                if (pp->pr_nitems != 0) {
1.21      thorpej   705:                        simple_unlock(&pp->pr_slock);
1.20      thorpej   706:                        printf("pool_get: %s: curpage NULL, nitems %u\n",
                    707:                            pp->pr_wchan, pp->pr_nitems);
                    708:                        panic("pool_get: nitems inconsistent\n");
                    709:                }
                    710: #endif
                    711:
1.21      thorpej   712:                /*
                    713:                 * Call the back-end page allocator for more memory.
                    714:                 * Release the pool lock, as the back-end page allocator
                    715:                 * may block.
                    716:                 */
1.25      thorpej   717:                pr_leave(pp);
1.21      thorpej   718:                simple_unlock(&pp->pr_slock);
1.66    ! thorpej   719:                v = pool_allocator_alloc(pp, flags);
1.55      thorpej   720:                if (__predict_true(v != NULL))
                    721:                        ph = pool_alloc_item_header(pp, v, flags);
1.21      thorpej   722:                simple_lock(&pp->pr_slock);
1.25      thorpej   723:                pr_enter(pp, file, line);
1.15      pk        724:
1.55      thorpej   725:                if (__predict_false(v == NULL || ph == NULL)) {
                    726:                        if (v != NULL)
1.66    ! thorpej   727:                                pool_allocator_free(pp, v);
1.55      thorpej   728:
1.21      thorpej   729:                        /*
1.55      thorpej   730:                         * We were unable to allocate a page or item
                    731:                         * header, but we released the lock during
                    732:                         * allocation, so perhaps items were freed
                    733:                         * back to the pool.  Check for this case.
1.21      thorpej   734:                         */
                    735:                        if (pp->pr_curpage != NULL)
                    736:                                goto startover;
1.15      pk        737:
1.3       pk        738:                        if ((flags & PR_WAITOK) == 0) {
                    739:                                pp->pr_nfail++;
1.25      thorpej   740:                                pr_leave(pp);
1.21      thorpej   741:                                simple_unlock(&pp->pr_slock);
1.1       pk        742:                                return (NULL);
1.3       pk        743:                        }
                    744:
1.15      pk        745:                        /*
                    746:                         * Wait for items to be returned to this pool.
1.21      thorpej   747:                         *
1.20      thorpej   748:                         * XXX: maybe we should wake up once a second and
                    749:                         * try again?
1.15      pk        750:                         */
1.1       pk        751:                        pp->pr_flags |= PR_WANTED;
1.66    ! thorpej   752:                        /* PA_WANTED is already set on the allocator. */
1.25      thorpej   753:                        pr_leave(pp);
1.40      sommerfe  754:                        ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25      thorpej   755:                        pr_enter(pp, file, line);
1.20      thorpej   756:                        goto startover;
1.1       pk        757:                }
1.3       pk        758:
1.15      pk        759:                /* We have more memory; add it to the pool */
1.55      thorpej   760:                pool_prime_page(pp, v, ph);
1.15      pk        761:                pp->pr_npagealloc++;
                    762:
1.20      thorpej   763:                /* Start the allocation process over. */
                    764:                goto startover;
1.3       pk        765:        }
                    766:
1.34      thorpej   767:        if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
1.25      thorpej   768:                pr_leave(pp);
1.21      thorpej   769:                simple_unlock(&pp->pr_slock);
1.3       pk        770:                panic("pool_get: %s: page empty", pp->pr_wchan);
1.21      thorpej   771:        }
1.20      thorpej   772: #ifdef DIAGNOSTIC
1.34      thorpej   773:        if (__predict_false(pp->pr_nitems == 0)) {
1.25      thorpej   774:                pr_leave(pp);
1.21      thorpej   775:                simple_unlock(&pp->pr_slock);
1.20      thorpej   776:                printf("pool_get: %s: items on itemlist, nitems %u\n",
                    777:                    pp->pr_wchan, pp->pr_nitems);
                    778:                panic("pool_get: nitems inconsistent\n");
                    779:        }
1.65      enami     780: #endif
1.56      sommerfe  781:
1.65      enami     782: #ifdef POOL_DIAGNOSTIC
1.3       pk        783:        pr_log(pp, v, PRLOG_GET, file, line);
1.65      enami     784: #endif
1.3       pk        785:
1.65      enami     786: #ifdef DIAGNOSTIC
1.34      thorpej   787:        if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1.25      thorpej   788:                pr_printlog(pp, pi, printf);
1.3       pk        789:                panic("pool_get(%s): free list modified: magic=%x; page %p;"
                    790:                       " item addr %p\n",
                    791:                        pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
                    792:        }
                    793: #endif
                    794:
                    795:        /*
                    796:         * Remove from item list.
                    797:         */
                    798:        TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
1.20      thorpej   799:        pp->pr_nitems--;
                    800:        pp->pr_nout++;
1.6       thorpej   801:        if (ph->ph_nmissing == 0) {
                    802: #ifdef DIAGNOSTIC
1.34      thorpej   803:                if (__predict_false(pp->pr_nidle == 0))
1.6       thorpej   804:                        panic("pool_get: nidle inconsistent");
                    805: #endif
                    806:                pp->pr_nidle--;
                    807:        }
1.3       pk        808:        ph->ph_nmissing++;
                    809:        if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
1.21      thorpej   810: #ifdef DIAGNOSTIC
1.34      thorpej   811:                if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
1.25      thorpej   812:                        pr_leave(pp);
1.21      thorpej   813:                        simple_unlock(&pp->pr_slock);
                    814:                        panic("pool_get: %s: nmissing inconsistent",
                    815:                            pp->pr_wchan);
                    816:                }
                    817: #endif
1.3       pk        818:                /*
                    819:                 * Find a new non-empty page header, if any.
                    820:                 * Start search from the page head, to increase
                    821:                 * the chance for "high water" pages to be freed.
                    822:                 *
1.21      thorpej   823:                 * Migrate empty pages to the end of the list.  This
                    824:                 * will speed the update of curpage as pages become
                    825:                 * idle.  Empty pages intermingled with idle pages
                    826:                 * is no big deal.  As soon as a page becomes un-empty,
                    827:                 * it will move back to the head of the list.
1.3       pk        828:                 */
                    829:                TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1.21      thorpej   830:                TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
1.61      chs       831:                TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
1.3       pk        832:                        if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
                    833:                                break;
                    834:
                    835:                pp->pr_curpage = ph;
1.1       pk        836:        }
1.3       pk        837:
                    838:        pp->pr_nget++;
1.20      thorpej   839:
                    840:        /*
                    841:         * If we have a low water mark and we are now below that low
                    842:         * water mark, add more items to the pool.
                    843:         */
1.53      thorpej   844:        if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20      thorpej   845:                /*
                    846:                 * XXX: Should we log a warning?  Should we set up a timeout
                    847:                 * to try again in a second or so?  The latter could break
                    848:                 * a caller's assumptions about interrupt protection, etc.
                    849:                 */
                    850:        }
                    851:
1.25      thorpej   852:        pr_leave(pp);
1.21      thorpej   853:        simple_unlock(&pp->pr_slock);
1.1       pk        854:        return (v);
                    855: }
                    856:
                    857: /*
1.43      thorpej   858:  * Internal version of pool_put().  Pool is already locked/entered.
1.1       pk        859:  */
1.43      thorpej   860: static void
1.56      sommerfe  861: pool_do_put(struct pool *pp, void *v)
1.1       pk        862: {
                    863:        struct pool_item *pi = v;
1.3       pk        864:        struct pool_item_header *ph;
                    865:        caddr_t page;
1.21      thorpej   866:        int s;
1.3       pk        867:
1.61      chs       868:        LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
                    869:
1.66    ! thorpej   870:        page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1.1       pk        871:
1.30      thorpej   872: #ifdef DIAGNOSTIC
1.34      thorpej   873:        if (__predict_false(pp->pr_nout == 0)) {
1.30      thorpej   874:                printf("pool %s: putting with none out\n",
                    875:                    pp->pr_wchan);
                    876:                panic("pool_put");
                    877:        }
                    878: #endif
1.3       pk        879:
1.34      thorpej   880:        if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1.25      thorpej   881:                pr_printlog(pp, NULL, printf);
1.3       pk        882:                panic("pool_put: %s: page header missing", pp->pr_wchan);
                    883:        }
1.28      thorpej   884:
                    885: #ifdef LOCKDEBUG
                    886:        /*
                    887:         * Check if we're freeing a locked simple lock.
                    888:         */
                    889:        simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
                    890: #endif
1.3       pk        891:
                    892:        /*
                    893:         * Return to item list.
                    894:         */
1.2       pk        895: #ifdef DIAGNOSTIC
1.3       pk        896:        pi->pi_magic = PI_MAGIC;
                    897: #endif
1.32      chs       898: #ifdef DEBUG
                    899:        {
                    900:                int i, *ip = v;
                    901:
                    902:                for (i = 0; i < pp->pr_size / sizeof(int); i++) {
                    903:                        *ip++ = PI_MAGIC;
                    904:                }
                    905:        }
                    906: #endif
                    907:
1.3       pk        908:        TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
                    909:        ph->ph_nmissing--;
                    910:        pp->pr_nput++;
1.20      thorpej   911:        pp->pr_nitems++;
                    912:        pp->pr_nout--;
1.3       pk        913:
                    914:        /* Cancel "pool empty" condition if it exists */
                    915:        if (pp->pr_curpage == NULL)
                    916:                pp->pr_curpage = ph;
                    917:
                    918:        if (pp->pr_flags & PR_WANTED) {
                    919:                pp->pr_flags &= ~PR_WANTED;
1.15      pk        920:                if (ph->ph_nmissing == 0)
                    921:                        pp->pr_nidle++;
1.3       pk        922:                wakeup((caddr_t)pp);
                    923:                return;
                    924:        }
                    925:
                    926:        /*
1.21      thorpej   927:         * If this page is now complete, do one of two things:
                    928:         *
                    929:         *      (1) If we have more pages than the page high water
                    930:         *          mark, free the page back to the system.
                    931:         *
                    932:         *      (2) Move it to the end of the page list, so that
                    933:         *          we minimize our chances of fragmenting the
                    934:         *          pool.  Idle pages migrate to the end (along with
                    935:         *          completely empty pages, so that we find un-empty
                    936:         *          pages more quickly when we update curpage) of the
                    937:         *          list so they can be more easily swept up by
                    938:         *          the pagedaemon when pages are scarce.
1.3       pk        939:         */
                    940:        if (ph->ph_nmissing == 0) {
1.6       thorpej   941:                pp->pr_nidle++;
1.3       pk        942:                if (pp->pr_npages > pp->pr_maxpages) {
1.61      chs       943:                        pr_rmpage(pp, ph, NULL);
1.3       pk        944:                } else {
                    945:                        TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
                    946:                        TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
                    947:
1.21      thorpej   948:                        /*
                    949:                         * Update the timestamp on the page.  A page must
                    950:                         * be idle for some period of time before it can
                    951:                         * be reclaimed by the pagedaemon.  This minimizes
                    952:                         * ping-pong'ing for memory.
                    953:                         */
                    954:                        s = splclock();
                    955:                        ph->ph_time = mono_time;
                    956:                        splx(s);
                    957:
                    958:                        /*
                    959:                         * Update the current page pointer.  Just look for
                    960:                         * the first page with any free items.
                    961:                         *
                    962:                         * XXX: Maybe we want an option to look for the
                    963:                         * page with the fewest available items, to minimize
                    964:                         * fragmentation?
                    965:                         */
1.61      chs       966:                        TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
1.3       pk        967:                                if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
                    968:                                        break;
1.1       pk        969:
1.3       pk        970:                        pp->pr_curpage = ph;
1.1       pk        971:                }
                    972:        }
1.21      thorpej   973:        /*
                    974:         * If the page has just become un-empty, move it to the head of
                    975:         * the list, and make it the current page.  The next allocation
                    976:         * will get the item from this page, instead of further fragmenting
                    977:         * the pool.
                    978:         */
                    979:        else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
                    980:                TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
                    981:                TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
                    982:                pp->pr_curpage = ph;
                    983:        }
1.43      thorpej   984: }
                    985:
                    986: /*
                    987:  * Return resource to the pool; must be called at appropriate spl level
                    988:  */
1.59      thorpej   989: #ifdef POOL_DIAGNOSTIC
1.43      thorpej   990: void
                    991: _pool_put(struct pool *pp, void *v, const char *file, long line)
                    992: {
                    993:
                    994:        simple_lock(&pp->pr_slock);
                    995:        pr_enter(pp, file, line);
                    996:
1.56      sommerfe  997:        pr_log(pp, v, PRLOG_PUT, file, line);
                    998:
                    999:        pool_do_put(pp, v);
1.21      thorpej  1000:
1.25      thorpej  1001:        pr_leave(pp);
1.21      thorpej  1002:        simple_unlock(&pp->pr_slock);
1.1       pk       1003: }
1.57      sommerfe 1004: #undef pool_put
1.59      thorpej  1005: #endif /* POOL_DIAGNOSTIC */
1.1       pk       1006:
1.56      sommerfe 1007: void
                   1008: pool_put(struct pool *pp, void *v)
                   1009: {
                   1010:
                   1011:        simple_lock(&pp->pr_slock);
                   1012:
                   1013:        pool_do_put(pp, v);
                   1014:
                   1015:        simple_unlock(&pp->pr_slock);
                   1016: }
1.57      sommerfe 1017:
1.59      thorpej  1018: #ifdef POOL_DIAGNOSTIC
1.57      sommerfe 1019: #define                pool_put(h, v)  _pool_put((h), (v), __FILE__, __LINE__)
1.56      sommerfe 1020: #endif
                   1021:
1.1       pk       1022: /*
1.55      thorpej  1023:  * Add N items to the pool.
                   1024:  */
                   1025: int
                   1026: pool_prime(struct pool *pp, int n)
                   1027: {
                   1028:        struct pool_item_header *ph;
                   1029:        caddr_t cp;
                   1030:        int newpages, error = 0;
                   1031:
                   1032:        simple_lock(&pp->pr_slock);
                   1033:
                   1034:        newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
                   1035:
                   1036:        while (newpages-- > 0) {
                   1037:                simple_unlock(&pp->pr_slock);
1.66    ! thorpej  1038:                cp = pool_allocator_alloc(pp, PR_NOWAIT);
1.55      thorpej  1039:                if (__predict_true(cp != NULL))
                   1040:                        ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
                   1041:                simple_lock(&pp->pr_slock);
                   1042:
                   1043:                if (__predict_false(cp == NULL || ph == NULL)) {
                   1044:                        error = ENOMEM;
                   1045:                        if (cp != NULL)
1.66    ! thorpej  1046:                                pool_allocator_free(pp, cp);
1.55      thorpej  1047:                        break;
                   1048:                }
                   1049:
                   1050:                pool_prime_page(pp, cp, ph);
                   1051:                pp->pr_npagealloc++;
                   1052:                pp->pr_minpages++;
                   1053:        }
                   1054:
                   1055:        if (pp->pr_minpages >= pp->pr_maxpages)
                   1056:                pp->pr_maxpages = pp->pr_minpages + 1;  /* XXX */
                   1057:
                   1058:        simple_unlock(&pp->pr_slock);
                   1059:        return (0);
                   1060: }
                   1061:
                   1062: /*
1.3       pk       1063:  * Add a page worth of items to the pool.
1.21      thorpej  1064:  *
                   1065:  * Note, we must be called with the pool descriptor LOCKED.
1.3       pk       1066:  */
1.55      thorpej  1067: static void
                   1068: pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1.3       pk       1069: {
                   1070:        struct pool_item *pi;
                   1071:        caddr_t cp = storage;
                   1072:        unsigned int align = pp->pr_align;
                   1073:        unsigned int ioff = pp->pr_itemoffset;
1.55      thorpej  1074:        int n;
1.36      pk       1075:
1.66    ! thorpej  1076: #ifdef DIAGNOSTIC
        !          1077:        if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36      pk       1078:                panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66    ! thorpej  1079: #endif
1.3       pk       1080:
1.55      thorpej  1081:        if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.3       pk       1082:                LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1.55      thorpej  1083:                    ph, ph_hashlist);
1.3       pk       1084:
                   1085:        /*
                   1086:         * Insert page header.
                   1087:         */
                   1088:        TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
                   1089:        TAILQ_INIT(&ph->ph_itemlist);
                   1090:        ph->ph_page = storage;
                   1091:        ph->ph_nmissing = 0;
1.21      thorpej  1092:        memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1.3       pk       1093:
1.6       thorpej  1094:        pp->pr_nidle++;
                   1095:
1.3       pk       1096:        /*
                   1097:         * Color this page.
                   1098:         */
                   1099:        cp = (caddr_t)(cp + pp->pr_curcolor);
                   1100:        if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
                   1101:                pp->pr_curcolor = 0;
                   1102:
                   1103:        /*
                   1104:         * Adjust storage to apply aligment to `pr_itemoffset' in each item.
                   1105:         */
                   1106:        if (ioff != 0)
                   1107:                cp = (caddr_t)(cp + (align - ioff));
                   1108:
                   1109:        /*
                   1110:         * Insert remaining chunks on the bucket list.
                   1111:         */
                   1112:        n = pp->pr_itemsperpage;
1.20      thorpej  1113:        pp->pr_nitems += n;
1.3       pk       1114:
                   1115:        while (n--) {
                   1116:                pi = (struct pool_item *)cp;
                   1117:
                   1118:                /* Insert on page list */
                   1119:                TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
                   1120: #ifdef DIAGNOSTIC
                   1121:                pi->pi_magic = PI_MAGIC;
                   1122: #endif
                   1123:                cp = (caddr_t)(cp + pp->pr_size);
                   1124:        }
                   1125:
                   1126:        /*
                   1127:         * If the pool was depleted, point at the new page.
                   1128:         */
                   1129:        if (pp->pr_curpage == NULL)
                   1130:                pp->pr_curpage = ph;
                   1131:
                   1132:        if (++pp->pr_npages > pp->pr_hiwat)
                   1133:                pp->pr_hiwat = pp->pr_npages;
                   1134: }
                   1135:
1.20      thorpej  1136: /*
1.52      thorpej  1137:  * Used by pool_get() when nitems drops below the low water mark.  This
                   1138:  * is used to catch up nitmes with the low water mark.
1.20      thorpej  1139:  *
1.21      thorpej  1140:  * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20      thorpej  1141:  *
                   1142:  * Note 2, this doesn't work with static pools.
                   1143:  *
                   1144:  * Note 3, we must be called with the pool already locked, and we return
                   1145:  * with it locked.
                   1146:  */
                   1147: static int
1.42      thorpej  1148: pool_catchup(struct pool *pp)
1.20      thorpej  1149: {
1.55      thorpej  1150:        struct pool_item_header *ph;
1.20      thorpej  1151:        caddr_t cp;
                   1152:        int error = 0;
                   1153:
                   1154:        if (pp->pr_roflags & PR_STATIC) {
                   1155:                /*
                   1156:                 * We dropped below the low water mark, and this is not a
                   1157:                 * good thing.  Log a warning.
1.21      thorpej  1158:                 *
                   1159:                 * XXX: rate-limit this?
1.20      thorpej  1160:                 */
                   1161:                printf("WARNING: static pool `%s' dropped below low water "
                   1162:                    "mark\n", pp->pr_wchan);
                   1163:                return (0);
                   1164:        }
                   1165:
1.54      thorpej  1166:        while (POOL_NEEDS_CATCHUP(pp)) {
1.20      thorpej  1167:                /*
1.21      thorpej  1168:                 * Call the page back-end allocator for more memory.
                   1169:                 *
                   1170:                 * XXX: We never wait, so should we bother unlocking
                   1171:                 * the pool descriptor?
1.20      thorpej  1172:                 */
1.21      thorpej  1173:                simple_unlock(&pp->pr_slock);
1.66    ! thorpej  1174:                cp = pool_allocator_alloc(pp, PR_NOWAIT);
1.55      thorpej  1175:                if (__predict_true(cp != NULL))
                   1176:                        ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1.21      thorpej  1177:                simple_lock(&pp->pr_slock);
1.55      thorpej  1178:                if (__predict_false(cp == NULL || ph == NULL)) {
                   1179:                        if (cp != NULL)
1.66    ! thorpej  1180:                                pool_allocator_free(pp, cp);
1.20      thorpej  1181:                        error = ENOMEM;
                   1182:                        break;
                   1183:                }
1.55      thorpej  1184:                pool_prime_page(pp, cp, ph);
1.26      thorpej  1185:                pp->pr_npagealloc++;
1.20      thorpej  1186:        }
                   1187:
                   1188:        return (error);
                   1189: }
                   1190:
1.3       pk       1191: void
1.42      thorpej  1192: pool_setlowat(struct pool *pp, int n)
1.3       pk       1193: {
1.20      thorpej  1194:        int error;
1.15      pk       1195:
1.21      thorpej  1196:        simple_lock(&pp->pr_slock);
                   1197:
1.3       pk       1198:        pp->pr_minitems = n;
1.15      pk       1199:        pp->pr_minpages = (n == 0)
                   1200:                ? 0
1.18      thorpej  1201:                : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20      thorpej  1202:
                   1203:        /* Make sure we're caught up with the newly-set low water mark. */
1.53      thorpej  1204:        if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
1.20      thorpej  1205:                /*
                   1206:                 * XXX: Should we log a warning?  Should we set up a timeout
                   1207:                 * to try again in a second or so?  The latter could break
                   1208:                 * a caller's assumptions about interrupt protection, etc.
                   1209:                 */
                   1210:        }
1.21      thorpej  1211:
                   1212:        simple_unlock(&pp->pr_slock);
1.3       pk       1213: }
                   1214:
                   1215: void
1.42      thorpej  1216: pool_sethiwat(struct pool *pp, int n)
1.3       pk       1217: {
1.15      pk       1218:
1.21      thorpej  1219:        simple_lock(&pp->pr_slock);
                   1220:
1.15      pk       1221:        pp->pr_maxpages = (n == 0)
                   1222:                ? 0
1.18      thorpej  1223:                : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21      thorpej  1224:
                   1225:        simple_unlock(&pp->pr_slock);
1.3       pk       1226: }
                   1227:
1.20      thorpej  1228: void
1.42      thorpej  1229: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20      thorpej  1230: {
                   1231:
1.21      thorpej  1232:        simple_lock(&pp->pr_slock);
1.20      thorpej  1233:
                   1234:        pp->pr_hardlimit = n;
                   1235:        pp->pr_hardlimit_warning = warnmess;
1.31      thorpej  1236:        pp->pr_hardlimit_ratecap.tv_sec = ratecap;
                   1237:        pp->pr_hardlimit_warning_last.tv_sec = 0;
                   1238:        pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20      thorpej  1239:
                   1240:        /*
1.21      thorpej  1241:         * In-line version of pool_sethiwat(), because we don't want to
                   1242:         * release the lock.
1.20      thorpej  1243:         */
                   1244:        pp->pr_maxpages = (n == 0)
                   1245:                ? 0
                   1246:                : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21      thorpej  1247:
                   1248:        simple_unlock(&pp->pr_slock);
1.20      thorpej  1249: }
1.3       pk       1250:
                   1251: /*
                   1252:  * Release all complete pages that have not been used recently.
                   1253:  */
1.66    ! thorpej  1254: int
1.59      thorpej  1255: #ifdef POOL_DIAGNOSTIC
1.42      thorpej  1256: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56      sommerfe 1257: #else
                   1258: pool_reclaim(struct pool *pp)
                   1259: #endif
1.3       pk       1260: {
                   1261:        struct pool_item_header *ph, *phnext;
1.43      thorpej  1262:        struct pool_cache *pc;
1.21      thorpej  1263:        struct timeval curtime;
1.61      chs      1264:        struct pool_pagelist pq;
1.21      thorpej  1265:        int s;
1.3       pk       1266:
1.20      thorpej  1267:        if (pp->pr_roflags & PR_STATIC)
1.66    ! thorpej  1268:                return (0);
1.3       pk       1269:
1.21      thorpej  1270:        if (simple_lock_try(&pp->pr_slock) == 0)
1.66    ! thorpej  1271:                return (0);
1.25      thorpej  1272:        pr_enter(pp, file, line);
1.61      chs      1273:        TAILQ_INIT(&pq);
1.3       pk       1274:
1.43      thorpej  1275:        /*
                   1276:         * Reclaim items from the pool's caches.
                   1277:         */
1.61      chs      1278:        TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1.43      thorpej  1279:                pool_cache_reclaim(pc);
                   1280:
1.21      thorpej  1281:        s = splclock();
                   1282:        curtime = mono_time;
                   1283:        splx(s);
                   1284:
1.3       pk       1285:        for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
                   1286:                phnext = TAILQ_NEXT(ph, ph_pagelist);
                   1287:
                   1288:                /* Check our minimum page claim */
                   1289:                if (pp->pr_npages <= pp->pr_minpages)
                   1290:                        break;
                   1291:
                   1292:                if (ph->ph_nmissing == 0) {
                   1293:                        struct timeval diff;
                   1294:                        timersub(&curtime, &ph->ph_time, &diff);
                   1295:                        if (diff.tv_sec < pool_inactive_time)
                   1296:                                continue;
1.21      thorpej  1297:
                   1298:                        /*
                   1299:                         * If freeing this page would put us below
                   1300:                         * the low water mark, stop now.
                   1301:                         */
                   1302:                        if ((pp->pr_nitems - pp->pr_itemsperpage) <
                   1303:                            pp->pr_minitems)
                   1304:                                break;
                   1305:
1.61      chs      1306:                        pr_rmpage(pp, ph, &pq);
1.3       pk       1307:                }
                   1308:        }
                   1309:
1.25      thorpej  1310:        pr_leave(pp);
1.21      thorpej  1311:        simple_unlock(&pp->pr_slock);
1.66    ! thorpej  1312:        if (TAILQ_EMPTY(&pq))
        !          1313:                return (0);
        !          1314:
1.61      chs      1315:        while ((ph = TAILQ_FIRST(&pq)) != NULL) {
                   1316:                TAILQ_REMOVE(&pq, ph, ph_pagelist);
1.66    ! thorpej  1317:                pool_allocator_free(pp, ph->ph_page);
1.61      chs      1318:                if (pp->pr_roflags & PR_PHINPAGE) {
                   1319:                        continue;
                   1320:                }
                   1321:                LIST_REMOVE(ph, ph_hashlist);
                   1322:                s = splhigh();
                   1323:                pool_put(&phpool, ph);
                   1324:                splx(s);
                   1325:        }
1.66    ! thorpej  1326:
        !          1327:        return (1);
1.3       pk       1328: }
                   1329:
                   1330: /*
                   1331:  * Drain pools, one at a time.
1.21      thorpej  1332:  *
                   1333:  * Note, we must never be called from an interrupt context.
1.3       pk       1334:  */
                   1335: void
1.42      thorpej  1336: pool_drain(void *arg)
1.3       pk       1337: {
                   1338:        struct pool *pp;
1.23      thorpej  1339:        int s;
1.3       pk       1340:
1.61      chs      1341:        pp = NULL;
1.49      thorpej  1342:        s = splvm();
1.23      thorpej  1343:        simple_lock(&pool_head_slock);
1.61      chs      1344:        if (drainpp == NULL) {
                   1345:                drainpp = TAILQ_FIRST(&pool_head);
                   1346:        }
                   1347:        if (drainpp) {
                   1348:                pp = drainpp;
                   1349:                drainpp = TAILQ_NEXT(pp, pr_poollist);
                   1350:        }
                   1351:        simple_unlock(&pool_head_slock);
1.63      chs      1352:        pool_reclaim(pp);
1.61      chs      1353:        splx(s);
1.3       pk       1354: }
                   1355:
                   1356: /*
                   1357:  * Diagnostic helpers.
                   1358:  */
                   1359: void
1.42      thorpej  1360: pool_print(struct pool *pp, const char *modif)
1.21      thorpej  1361: {
                   1362:        int s;
                   1363:
1.49      thorpej  1364:        s = splvm();
1.25      thorpej  1365:        if (simple_lock_try(&pp->pr_slock) == 0) {
                   1366:                printf("pool %s is locked; try again later\n",
                   1367:                    pp->pr_wchan);
                   1368:                splx(s);
                   1369:                return;
                   1370:        }
                   1371:        pool_print1(pp, modif, printf);
1.21      thorpej  1372:        simple_unlock(&pp->pr_slock);
                   1373:        splx(s);
                   1374: }
                   1375:
1.25      thorpej  1376: void
1.42      thorpej  1377: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25      thorpej  1378: {
                   1379:        int didlock = 0;
                   1380:
                   1381:        if (pp == NULL) {
                   1382:                (*pr)("Must specify a pool to print.\n");
                   1383:                return;
                   1384:        }
                   1385:
                   1386:        /*
                   1387:         * Called from DDB; interrupts should be blocked, and all
                   1388:         * other processors should be paused.  We can skip locking
                   1389:         * the pool in this case.
                   1390:         *
                   1391:         * We do a simple_lock_try() just to print the lock
                   1392:         * status, however.
                   1393:         */
                   1394:
                   1395:        if (simple_lock_try(&pp->pr_slock) == 0)
                   1396:                (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
                   1397:        else
                   1398:                didlock = 1;
                   1399:
                   1400:        pool_print1(pp, modif, pr);
                   1401:
                   1402:        if (didlock)
                   1403:                simple_unlock(&pp->pr_slock);
                   1404: }
                   1405:
1.21      thorpej  1406: static void
1.42      thorpej  1407: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3       pk       1408: {
1.25      thorpej  1409:        struct pool_item_header *ph;
1.44      thorpej  1410:        struct pool_cache *pc;
                   1411:        struct pool_cache_group *pcg;
1.25      thorpej  1412: #ifdef DIAGNOSTIC
                   1413:        struct pool_item *pi;
                   1414: #endif
1.44      thorpej  1415:        int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25      thorpej  1416:        char c;
                   1417:
                   1418:        while ((c = *modif++) != '\0') {
                   1419:                if (c == 'l')
                   1420:                        print_log = 1;
                   1421:                if (c == 'p')
                   1422:                        print_pagelist = 1;
1.44      thorpej  1423:                if (c == 'c')
                   1424:                        print_cache = 1;
1.25      thorpej  1425:                modif++;
                   1426:        }
                   1427:
                   1428:        (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
                   1429:            pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
                   1430:            pp->pr_roflags);
1.66    ! thorpej  1431:        (*pr)("\talloc %p\n", pp->pr_alloc);
1.25      thorpej  1432:        (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
                   1433:            pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
                   1434:        (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
                   1435:            pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
                   1436:
                   1437:        (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
                   1438:            pp->pr_nget, pp->pr_nfail, pp->pr_nput);
                   1439:        (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
                   1440:            pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
                   1441:
                   1442:        if (print_pagelist == 0)
                   1443:                goto skip_pagelist;
                   1444:
                   1445:        if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
                   1446:                (*pr)("\n\tpage list:\n");
                   1447:        for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
                   1448:                (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
                   1449:                    ph->ph_page, ph->ph_nmissing,
                   1450:                    (u_long)ph->ph_time.tv_sec,
                   1451:                    (u_long)ph->ph_time.tv_usec);
                   1452: #ifdef DIAGNOSTIC
1.61      chs      1453:                TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.25      thorpej  1454:                        if (pi->pi_magic != PI_MAGIC) {
                   1455:                                (*pr)("\t\t\titem %p, magic 0x%x\n",
                   1456:                                    pi, pi->pi_magic);
                   1457:                        }
                   1458:                }
                   1459: #endif
                   1460:        }
                   1461:        if (pp->pr_curpage == NULL)
                   1462:                (*pr)("\tno current page\n");
                   1463:        else
                   1464:                (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
                   1465:
                   1466:  skip_pagelist:
                   1467:
                   1468:        if (print_log == 0)
                   1469:                goto skip_log;
                   1470:
                   1471:        (*pr)("\n");
                   1472:        if ((pp->pr_roflags & PR_LOGGING) == 0)
                   1473:                (*pr)("\tno log\n");
                   1474:        else
                   1475:                pr_printlog(pp, NULL, pr);
1.3       pk       1476:
1.25      thorpej  1477:  skip_log:
1.44      thorpej  1478:
                   1479:        if (print_cache == 0)
                   1480:                goto skip_cache;
                   1481:
1.61      chs      1482:        TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1.44      thorpej  1483:                (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
                   1484:                    pc->pc_allocfrom, pc->pc_freeto);
1.48      thorpej  1485:                (*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
                   1486:                    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1.61      chs      1487:                TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.44      thorpej  1488:                        (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
                   1489:                        for (i = 0; i < PCG_NOBJECTS; i++)
                   1490:                                (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
                   1491:                }
                   1492:        }
                   1493:
                   1494:  skip_cache:
1.3       pk       1495:
1.25      thorpej  1496:        pr_enter_check(pp, pr);
1.3       pk       1497: }
                   1498:
                   1499: int
1.42      thorpej  1500: pool_chk(struct pool *pp, const char *label)
1.3       pk       1501: {
                   1502:        struct pool_item_header *ph;
                   1503:        int r = 0;
                   1504:
1.21      thorpej  1505:        simple_lock(&pp->pr_slock);
1.3       pk       1506:
1.61      chs      1507:        TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
1.3       pk       1508:                struct pool_item *pi;
                   1509:                int n;
                   1510:                caddr_t page;
                   1511:
1.66    ! thorpej  1512:                page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1.20      thorpej  1513:                if (page != ph->ph_page &&
                   1514:                    (pp->pr_roflags & PR_PHINPAGE) != 0) {
1.3       pk       1515:                        if (label != NULL)
                   1516:                                printf("%s: ", label);
1.16      briggs   1517:                        printf("pool(%p:%s): page inconsistency: page %p;"
                   1518:                               " at page head addr %p (p %p)\n", pp,
1.3       pk       1519:                                pp->pr_wchan, ph->ph_page,
                   1520:                                ph, page);
                   1521:                        r++;
                   1522:                        goto out;
                   1523:                }
                   1524:
                   1525:                for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
                   1526:                     pi != NULL;
                   1527:                     pi = TAILQ_NEXT(pi,pi_list), n++) {
                   1528:
                   1529: #ifdef DIAGNOSTIC
                   1530:                        if (pi->pi_magic != PI_MAGIC) {
                   1531:                                if (label != NULL)
                   1532:                                        printf("%s: ", label);
                   1533:                                printf("pool(%s): free list modified: magic=%x;"
                   1534:                                       " page %p; item ordinal %d;"
                   1535:                                       " addr %p (p %p)\n",
                   1536:                                        pp->pr_wchan, pi->pi_magic, ph->ph_page,
                   1537:                                        n, pi, page);
                   1538:                                panic("pool");
                   1539:                        }
                   1540: #endif
1.66    ! thorpej  1541:                        page =
        !          1542:                            (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1.3       pk       1543:                        if (page == ph->ph_page)
                   1544:                                continue;
                   1545:
                   1546:                        if (label != NULL)
                   1547:                                printf("%s: ", label);
1.16      briggs   1548:                        printf("pool(%p:%s): page inconsistency: page %p;"
                   1549:                               " item ordinal %d; addr %p (p %p)\n", pp,
1.3       pk       1550:                                pp->pr_wchan, ph->ph_page,
                   1551:                                n, pi, page);
                   1552:                        r++;
                   1553:                        goto out;
                   1554:                }
                   1555:        }
                   1556: out:
1.21      thorpej  1557:        simple_unlock(&pp->pr_slock);
1.3       pk       1558:        return (r);
1.43      thorpej  1559: }
                   1560:
                   1561: /*
                   1562:  * pool_cache_init:
                   1563:  *
                   1564:  *     Initialize a pool cache.
                   1565:  *
                   1566:  *     NOTE: If the pool must be protected from interrupts, we expect
                   1567:  *     to be called at the appropriate interrupt priority level.
                   1568:  */
                   1569: void
                   1570: pool_cache_init(struct pool_cache *pc, struct pool *pp,
                   1571:     int (*ctor)(void *, void *, int),
                   1572:     void (*dtor)(void *, void *),
                   1573:     void *arg)
                   1574: {
                   1575:
                   1576:        TAILQ_INIT(&pc->pc_grouplist);
                   1577:        simple_lock_init(&pc->pc_slock);
                   1578:
                   1579:        pc->pc_allocfrom = NULL;
                   1580:        pc->pc_freeto = NULL;
                   1581:        pc->pc_pool = pp;
                   1582:
                   1583:        pc->pc_ctor = ctor;
                   1584:        pc->pc_dtor = dtor;
                   1585:        pc->pc_arg  = arg;
                   1586:
1.48      thorpej  1587:        pc->pc_hits   = 0;
                   1588:        pc->pc_misses = 0;
                   1589:
                   1590:        pc->pc_ngroups = 0;
                   1591:
                   1592:        pc->pc_nitems = 0;
                   1593:
1.43      thorpej  1594:        simple_lock(&pp->pr_slock);
                   1595:        TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
                   1596:        simple_unlock(&pp->pr_slock);
                   1597: }
                   1598:
                   1599: /*
                   1600:  * pool_cache_destroy:
                   1601:  *
                   1602:  *     Destroy a pool cache.
                   1603:  */
                   1604: void
                   1605: pool_cache_destroy(struct pool_cache *pc)
                   1606: {
                   1607:        struct pool *pp = pc->pc_pool;
                   1608:
                   1609:        /* First, invalidate the entire cache. */
                   1610:        pool_cache_invalidate(pc);
                   1611:
                   1612:        /* ...and remove it from the pool's cache list. */
                   1613:        simple_lock(&pp->pr_slock);
                   1614:        TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
                   1615:        simple_unlock(&pp->pr_slock);
                   1616: }
                   1617:
                   1618: static __inline void *
                   1619: pcg_get(struct pool_cache_group *pcg)
                   1620: {
                   1621:        void *object;
                   1622:        u_int idx;
                   1623:
                   1624:        KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45      thorpej  1625:        KASSERT(pcg->pcg_avail != 0);
1.43      thorpej  1626:        idx = --pcg->pcg_avail;
                   1627:
                   1628:        KASSERT(pcg->pcg_objects[idx] != NULL);
                   1629:        object = pcg->pcg_objects[idx];
                   1630:        pcg->pcg_objects[idx] = NULL;
                   1631:
                   1632:        return (object);
                   1633: }
                   1634:
                   1635: static __inline void
                   1636: pcg_put(struct pool_cache_group *pcg, void *object)
                   1637: {
                   1638:        u_int idx;
                   1639:
                   1640:        KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
                   1641:        idx = pcg->pcg_avail++;
                   1642:
                   1643:        KASSERT(pcg->pcg_objects[idx] == NULL);
                   1644:        pcg->pcg_objects[idx] = object;
                   1645: }
                   1646:
                   1647: /*
                   1648:  * pool_cache_get:
                   1649:  *
                   1650:  *     Get an object from a pool cache.
                   1651:  */
                   1652: void *
                   1653: pool_cache_get(struct pool_cache *pc, int flags)
                   1654: {
                   1655:        struct pool_cache_group *pcg;
                   1656:        void *object;
1.58      thorpej  1657:
                   1658: #ifdef LOCKDEBUG
                   1659:        if (flags & PR_WAITOK)
                   1660:                simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
                   1661: #endif
1.43      thorpej  1662:
                   1663:        simple_lock(&pc->pc_slock);
                   1664:
                   1665:        if ((pcg = pc->pc_allocfrom) == NULL) {
1.61      chs      1666:                TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43      thorpej  1667:                        if (pcg->pcg_avail != 0) {
                   1668:                                pc->pc_allocfrom = pcg;
                   1669:                                goto have_group;
                   1670:                        }
                   1671:                }
                   1672:
                   1673:                /*
                   1674:                 * No groups with any available objects.  Allocate
                   1675:                 * a new object, construct it, and return it to
                   1676:                 * the caller.  We will allocate a group, if necessary,
                   1677:                 * when the object is freed back to the cache.
                   1678:                 */
1.48      thorpej  1679:                pc->pc_misses++;
1.43      thorpej  1680:                simple_unlock(&pc->pc_slock);
                   1681:                object = pool_get(pc->pc_pool, flags);
                   1682:                if (object != NULL && pc->pc_ctor != NULL) {
                   1683:                        if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
                   1684:                                pool_put(pc->pc_pool, object);
                   1685:                                return (NULL);
                   1686:                        }
                   1687:                }
                   1688:                return (object);
                   1689:        }
                   1690:
                   1691:  have_group:
1.48      thorpej  1692:        pc->pc_hits++;
                   1693:        pc->pc_nitems--;
1.43      thorpej  1694:        object = pcg_get(pcg);
                   1695:
                   1696:        if (pcg->pcg_avail == 0)
                   1697:                pc->pc_allocfrom = NULL;
1.45      thorpej  1698:
1.43      thorpej  1699:        simple_unlock(&pc->pc_slock);
                   1700:
                   1701:        return (object);
                   1702: }
                   1703:
                   1704: /*
                   1705:  * pool_cache_put:
                   1706:  *
                   1707:  *     Put an object back to the pool cache.
                   1708:  */
                   1709: void
                   1710: pool_cache_put(struct pool_cache *pc, void *object)
                   1711: {
                   1712:        struct pool_cache_group *pcg;
1.60      thorpej  1713:        int s;
1.43      thorpej  1714:
                   1715:        simple_lock(&pc->pc_slock);
                   1716:
                   1717:        if ((pcg = pc->pc_freeto) == NULL) {
1.61      chs      1718:                TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43      thorpej  1719:                        if (pcg->pcg_avail != PCG_NOBJECTS) {
                   1720:                                pc->pc_freeto = pcg;
                   1721:                                goto have_group;
                   1722:                        }
                   1723:                }
                   1724:
                   1725:                /*
                   1726:                 * No empty groups to free the object to.  Attempt to
1.47      thorpej  1727:                 * allocate one.
1.43      thorpej  1728:                 */
1.47      thorpej  1729:                simple_unlock(&pc->pc_slock);
1.60      thorpej  1730:                s = splvm();
1.43      thorpej  1731:                pcg = pool_get(&pcgpool, PR_NOWAIT);
1.60      thorpej  1732:                splx(s);
1.43      thorpej  1733:                if (pcg != NULL) {
                   1734:                        memset(pcg, 0, sizeof(*pcg));
1.47      thorpej  1735:                        simple_lock(&pc->pc_slock);
1.48      thorpej  1736:                        pc->pc_ngroups++;
1.43      thorpej  1737:                        TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1.47      thorpej  1738:                        if (pc->pc_freeto == NULL)
                   1739:                                pc->pc_freeto = pcg;
1.43      thorpej  1740:                        goto have_group;
                   1741:                }
                   1742:
                   1743:                /*
                   1744:                 * Unable to allocate a cache group; destruct the object
                   1745:                 * and free it back to the pool.
                   1746:                 */
1.51      thorpej  1747:                pool_cache_destruct_object(pc, object);
1.43      thorpej  1748:                return;
                   1749:        }
                   1750:
                   1751:  have_group:
1.48      thorpej  1752:        pc->pc_nitems++;
1.43      thorpej  1753:        pcg_put(pcg, object);
                   1754:
                   1755:        if (pcg->pcg_avail == PCG_NOBJECTS)
                   1756:                pc->pc_freeto = NULL;
                   1757:
                   1758:        simple_unlock(&pc->pc_slock);
1.51      thorpej  1759: }
                   1760:
                   1761: /*
                   1762:  * pool_cache_destruct_object:
                   1763:  *
                   1764:  *     Force destruction of an object and its release back into
                   1765:  *     the pool.
                   1766:  */
                   1767: void
                   1768: pool_cache_destruct_object(struct pool_cache *pc, void *object)
                   1769: {
                   1770:
                   1771:        if (pc->pc_dtor != NULL)
                   1772:                (*pc->pc_dtor)(pc->pc_arg, object);
                   1773:        pool_put(pc->pc_pool, object);
1.43      thorpej  1774: }
                   1775:
                   1776: /*
                   1777:  * pool_cache_do_invalidate:
                   1778:  *
                   1779:  *     This internal function implements pool_cache_invalidate() and
                   1780:  *     pool_cache_reclaim().
                   1781:  */
                   1782: static void
                   1783: pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1.56      sommerfe 1784:     void (*putit)(struct pool *, void *))
1.43      thorpej  1785: {
                   1786:        struct pool_cache_group *pcg, *npcg;
                   1787:        void *object;
1.60      thorpej  1788:        int s;
1.43      thorpej  1789:
                   1790:        for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
                   1791:             pcg = npcg) {
                   1792:                npcg = TAILQ_NEXT(pcg, pcg_list);
                   1793:                while (pcg->pcg_avail != 0) {
1.48      thorpej  1794:                        pc->pc_nitems--;
1.43      thorpej  1795:                        object = pcg_get(pcg);
1.45      thorpej  1796:                        if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
                   1797:                                pc->pc_allocfrom = NULL;
1.43      thorpej  1798:                        if (pc->pc_dtor != NULL)
                   1799:                                (*pc->pc_dtor)(pc->pc_arg, object);
1.56      sommerfe 1800:                        (*putit)(pc->pc_pool, object);
1.43      thorpej  1801:                }
                   1802:                if (free_groups) {
1.48      thorpej  1803:                        pc->pc_ngroups--;
1.43      thorpej  1804:                        TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1.46      thorpej  1805:                        if (pc->pc_freeto == pcg)
                   1806:                                pc->pc_freeto = NULL;
1.60      thorpej  1807:                        s = splvm();
1.43      thorpej  1808:                        pool_put(&pcgpool, pcg);
1.60      thorpej  1809:                        splx(s);
1.43      thorpej  1810:                }
                   1811:        }
                   1812: }
                   1813:
                   1814: /*
                   1815:  * pool_cache_invalidate:
                   1816:  *
                   1817:  *     Invalidate a pool cache (destruct and release all of the
                   1818:  *     cached objects).
                   1819:  */
                   1820: void
                   1821: pool_cache_invalidate(struct pool_cache *pc)
                   1822: {
                   1823:
                   1824:        simple_lock(&pc->pc_slock);
1.56      sommerfe 1825:        pool_cache_do_invalidate(pc, 0, pool_put);
1.43      thorpej  1826:        simple_unlock(&pc->pc_slock);
                   1827: }
                   1828:
                   1829: /*
                   1830:  * pool_cache_reclaim:
                   1831:  *
                   1832:  *     Reclaim a pool cache for pool_reclaim().
                   1833:  */
                   1834: static void
                   1835: pool_cache_reclaim(struct pool_cache *pc)
                   1836: {
                   1837:
1.47      thorpej  1838:        simple_lock(&pc->pc_slock);
1.43      thorpej  1839:        pool_cache_do_invalidate(pc, 1, pool_do_put);
                   1840:        simple_unlock(&pc->pc_slock);
1.3       pk       1841: }
1.66    ! thorpej  1842:
        !          1843: /*
        !          1844:  * Pool backend allocators.
        !          1845:  *
        !          1846:  * Each pool has a backend allocator that handles allocation, deallocation,
        !          1847:  * and any additional draining that might be needed.
        !          1848:  *
        !          1849:  * We provide two standard allocators:
        !          1850:  *
        !          1851:  *     pool_allocator_kmem - the default when no allocator is specified
        !          1852:  *
        !          1853:  *     pool_allocator_nointr - used for pools that will not be accessed
        !          1854:  *     in interrupt context.
        !          1855:  */
        !          1856: void   *pool_page_alloc(struct pool *, int);
        !          1857: void   pool_page_free(struct pool *, void *);
        !          1858:
        !          1859: struct pool_allocator pool_allocator_kmem = {
        !          1860:        pool_page_alloc, pool_page_free, 0,
        !          1861: };
        !          1862:
        !          1863: void   *pool_page_alloc_nointr(struct pool *, int);
        !          1864: void   pool_page_free_nointr(struct pool *, void *);
        !          1865:
        !          1866: struct pool_allocator pool_allocator_nointr = {
        !          1867:        pool_page_alloc_nointr, pool_page_free_nointr, 0,
        !          1868: };
        !          1869:
        !          1870: #ifdef POOL_SUBPAGE
        !          1871: void   *pool_subpage_alloc(struct pool *, int);
        !          1872: void   pool_subpage_free(struct pool *, void *);
        !          1873:
        !          1874: struct pool_allocator pool_allocator_kmem_subpage = {
        !          1875:        pool_subpage_alloc, pool_subpage_free, 0,
        !          1876: };
        !          1877: #endif /* POOL_SUBPAGE */
        !          1878:
        !          1879: /*
        !          1880:  * We have at least three different resources for the same allocation and
        !          1881:  * each resource can be depleted.  First, we have the ready elements in the
        !          1882:  * pool.  Then we have the resource (typically a vm_map) for this allocator.
        !          1883:  * Finally, we have physical memory.  Waiting for any of these can be
        !          1884:  * unnecessary when any other is freed, but the kernel doesn't support
        !          1885:  * sleeping on multiple wait channels, so we have to employ another strategy.
        !          1886:  *
        !          1887:  * The caller sleeps on the pool (so that it can be awakened when an item
        !          1888:  * is returned to the pool), but we set PA_WANT on the allocator.  When a
        !          1889:  * page is returned to the allocator and PA_WANT is set, pool_allocator_free
        !          1890:  * will wake up all sleeping pools belonging to this allocator.
        !          1891:  *
        !          1892:  * XXX Thundering herd.
        !          1893:  */
        !          1894: void *
        !          1895: pool_allocator_alloc(struct pool *org, int flags)
        !          1896: {
        !          1897:        struct pool_allocator *pa = org->pr_alloc;
        !          1898:        struct pool *pp, *start;
        !          1899:        int s, freed;
        !          1900:        void *res;
        !          1901:
        !          1902:        do {
        !          1903:                if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
        !          1904:                        return (res);
        !          1905:                if ((flags & PR_WAITOK) == 0)
        !          1906:                        break;
        !          1907:
        !          1908:                /*
        !          1909:                 * Drain all pools, except "org", that use this
        !          1910:                 * allocator.  We do this to reclaim VA space.
        !          1911:                 * pa_alloc is responsible for waiting for
        !          1912:                 * physical memory.
        !          1913:                 *
        !          1914:                 * XXX We risk looping forever if start if someone
        !          1915:                 * calls pool_destroy on "start".  But there is no
        !          1916:                 * other way to have potentially sleeping pool_reclaim,
        !          1917:                 * non-sleeping locks on pool_allocator, and some
        !          1918:                 * stirring of drained pools in the allocator.
        !          1919:                 */
        !          1920:                freed = 0;
        !          1921:
        !          1922:                s = splvm();
        !          1923:                simple_lock(&pa->pa_slock);
        !          1924:                pp = start = TAILQ_FIRST(&pa->pa_list);
        !          1925:                do {
        !          1926:                        TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
        !          1927:                        TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
        !          1928:                        if (pp == org)
        !          1929:                                continue;
        !          1930:                        simple_unlock(&pa->pa_list);
        !          1931:                        freed = pool_reclaim(pp);
        !          1932:                        simple_lock(&pa->pa_list);
        !          1933:                } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
        !          1934:                         freed == 0);
        !          1935:
        !          1936:                if (freed == 0) {
        !          1937:                        /*
        !          1938:                         * We set PA_WANT here, the caller will most likely
        !          1939:                         * sleep waiting for pages (if not, this won't hurt
        !          1940:                         * that much), and there is no way to set this in
        !          1941:                         * the caller without violating locking order.
        !          1942:                         */
        !          1943:                        pa->pa_flags |= PA_WANT;
        !          1944:                }
        !          1945:                simple_unlock(&pa->pa_slock);
        !          1946:                splx(s);
        !          1947:        } while (freed);
        !          1948:        return (NULL);
        !          1949: }
        !          1950:
        !          1951: void
        !          1952: pool_allocator_free(struct pool *pp, void *v)
        !          1953: {
        !          1954:        struct pool_allocator *pa = pp->pr_alloc;
        !          1955:        int s;
        !          1956:
        !          1957:        (*pa->pa_free)(pp, v);
        !          1958:
        !          1959:        s = splvm();
        !          1960:        simple_lock(&pa->pa_slock);
        !          1961:        if ((pa->pa_flags & PA_WANT) == 0) {
        !          1962:                simple_unlock(&pa->pa_slock);
        !          1963:                splx(s);
        !          1964:                return;
        !          1965:        }
        !          1966:
        !          1967:        TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
        !          1968:                simple_lock(&pp->pr_slock);
        !          1969:                if ((pp->pr_flags & PR_WANTED) != 0) {
        !          1970:                        pp->pr_flags &= ~PR_WANTED;
        !          1971:                        wakeup(pp);
        !          1972:                }
        !          1973:        }
        !          1974:        pa->pa_flags &= ~PA_WANT;
        !          1975:        simple_unlock(&pa->pa_slock);
        !          1976:        splx(s);
        !          1977: }
        !          1978:
        !          1979: void *
        !          1980: pool_page_alloc(struct pool *pp, int flags)
        !          1981: {
        !          1982:        boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
        !          1983:
        !          1984:        return ((void *) uvm_km_alloc_poolpage(waitok));
        !          1985: }
        !          1986:
        !          1987: void
        !          1988: pool_page_free(struct pool *pp, void *v)
        !          1989: {
        !          1990:
        !          1991:        uvm_km_free_poolpage((vaddr_t) v);
        !          1992: }
        !          1993:
        !          1994: #ifdef POOL_SUBPAGE
        !          1995: /* Sub-page allocator, for machines with large hardware pages. */
        !          1996: void *
        !          1997: pool_subpage_alloc(struct pool *pp, int flags)
        !          1998: {
        !          1999:
        !          2000:        return (pool_get(&psppool, flags));
        !          2001: }
        !          2002:
        !          2003: void
        !          2004: pool_subpage_free(struct pool *pp, void *v)
        !          2005: {
        !          2006:
        !          2007:        pool_put(&psppool, v);
        !          2008: }
        !          2009:
        !          2010: /* We don't provide a real nointr allocator.  Maybe later. */
        !          2011: void *
        !          2012: pool_page_alloc_nointr(struct pool *pp, int flags)
        !          2013: {
        !          2014:
        !          2015:        return (pool_subpage_alloc(pp, flags));
        !          2016: }
        !          2017:
        !          2018: void
        !          2019: pool_page_free_nointr(struct pool *pp, void *v)
        !          2020: {
        !          2021:
        !          2022:        pool_subpage_free(pp, v);
        !          2023: }
        !          2024: #else
        !          2025: void *
        !          2026: pool_page_alloc_nointr(struct pool *pp, int flags)
        !          2027: {
        !          2028:        boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
        !          2029:
        !          2030:        return ((void *) uvm_km_alloc_poolpage1(kernel_map,
        !          2031:            uvm.kernel_object, waitok));
        !          2032: }
        !          2033:
        !          2034: void
        !          2035: pool_page_free_nointr(struct pool *pp, void *v)
        !          2036: {
        !          2037:
        !          2038:        uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
        !          2039: }
        !          2040: #endif /* POOL_SUBPAGE */

CVSweb <webmaster@jp.NetBSD.org>