[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Annotation of src/sys/kern/subr_pool.c, Revision 1.94

1.94    ! simonb      1: /*     $NetBSD: subr_pool.c,v 1.93 2004/03/08 22:48:09 dbj Exp $       */
1.1       pk          2:
                      3: /*-
1.43      thorpej     4:  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
1.1       pk          5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
1.20      thorpej     8:  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
                      9:  * Simulation Facility, NASA Ames Research Center.
1.1       pk         10:  *
                     11:  * Redistribution and use in source and binary forms, with or without
                     12:  * modification, are permitted provided that the following conditions
                     13:  * are met:
                     14:  * 1. Redistributions of source code must retain the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer.
                     16:  * 2. Redistributions in binary form must reproduce the above copyright
                     17:  *    notice, this list of conditions and the following disclaimer in the
                     18:  *    documentation and/or other materials provided with the distribution.
                     19:  * 3. All advertising materials mentioning features or use of this software
                     20:  *    must display the following acknowledgement:
1.13      christos   21:  *     This product includes software developed by the NetBSD
                     22:  *     Foundation, Inc. and its contributors.
1.1       pk         23:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     24:  *    contributors may be used to endorse or promote products derived
                     25:  *    from this software without specific prior written permission.
                     26:  *
                     27:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     28:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     29:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     30:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     31:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     32:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     33:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     34:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     35:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     36:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     37:  * POSSIBILITY OF SUCH DAMAGE.
                     38:  */
1.64      lukem      39:
                     40: #include <sys/cdefs.h>
1.94    ! simonb     41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.93 2004/03/08 22:48:09 dbj Exp $");
1.24      scottr     42:
1.25      thorpej    43: #include "opt_pool.h"
1.24      scottr     44: #include "opt_poollog.h"
1.28      thorpej    45: #include "opt_lockdebug.h"
1.1       pk         46:
                     47: #include <sys/param.h>
                     48: #include <sys/systm.h>
                     49: #include <sys/proc.h>
                     50: #include <sys/errno.h>
                     51: #include <sys/kernel.h>
                     52: #include <sys/malloc.h>
                     53: #include <sys/lock.h>
                     54: #include <sys/pool.h>
1.20      thorpej    55: #include <sys/syslog.h>
1.3       pk         56:
                     57: #include <uvm/uvm.h>
                     58:
1.1       pk         59: /*
                     60:  * Pool resource management utility.
1.3       pk         61:  *
1.88      chs        62:  * Memory is allocated in pages which are split into pieces according to
                     63:  * the pool item size. Each page is kept on one of three lists in the
                     64:  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
                     65:  * for empty, full and partially-full pages respectively. The individual
                     66:  * pool items are on a linked list headed by `ph_itemlist' in each page
                     67:  * header. The memory for building the page list is either taken from
                     68:  * the allocated pages themselves (for small pool items) or taken from
                     69:  * an internal pool of page headers (`phpool').
1.1       pk         70:  */
                     71:
1.3       pk         72: /* List of all pools */
1.5       thorpej    73: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.3       pk         74:
                     75: /* Private pool for page header structures */
                     76: static struct pool phpool;
                     77:
1.62      bjh21      78: #ifdef POOL_SUBPAGE
                     79: /* Pool of subpages for use by normal pools. */
                     80: static struct pool psppool;
                     81: #endif
                     82:
1.3       pk         83: /* # of seconds to retain page after last use */
                     84: int pool_inactive_time = 10;
                     85:
                     86: /* Next candidate for drainage (see pool_drain()) */
1.23      thorpej    87: static struct pool     *drainpp;
                     88:
                     89: /* This spin lock protects both pool_head and drainpp. */
                     90: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3       pk         91:
                     92: struct pool_item_header {
                     93:        /* Page headers */
1.88      chs        94:        LIST_ENTRY(pool_item_header)
1.3       pk         95:                                ph_pagelist;    /* pool page list */
                     96:        TAILQ_HEAD(,pool_item)  ph_itemlist;    /* chunk list for this page */
1.88      chs        97:        SPLAY_ENTRY(pool_item_header)
                     98:                                ph_node;        /* Off-page page headers */
1.79      thorpej    99:        unsigned int            ph_nmissing;    /* # of chunks in use */
1.3       pk        100:        caddr_t                 ph_page;        /* this page's address */
                    101:        struct timeval          ph_time;        /* last referenced */
                    102: };
                    103:
1.1       pk        104: struct pool_item {
1.3       pk        105: #ifdef DIAGNOSTIC
1.82      thorpej   106:        u_int pi_magic;
1.33      chs       107: #endif
1.82      thorpej   108: #define        PI_MAGIC 0xdeadbeefU
1.3       pk        109:        /* Other entries use only this list entry */
                    110:        TAILQ_ENTRY(pool_item)  pi_list;
                    111: };
                    112:
1.53      thorpej   113: #define        POOL_NEEDS_CATCHUP(pp)                                          \
                    114:        ((pp)->pr_nitems < (pp)->pr_minitems)
                    115:
1.43      thorpej   116: /*
                    117:  * Pool cache management.
                    118:  *
                    119:  * Pool caches provide a way for constructed objects to be cached by the
                    120:  * pool subsystem.  This can lead to performance improvements by avoiding
                    121:  * needless object construction/destruction; it is deferred until absolutely
                    122:  * necessary.
                    123:  *
                    124:  * Caches are grouped into cache groups.  Each cache group references
                    125:  * up to 16 constructed objects.  When a cache allocates an object
                    126:  * from the pool, it calls the object's constructor and places it into
                    127:  * a cache group.  When a cache group frees an object back to the pool,
                    128:  * it first calls the object's destructor.  This allows the object to
                    129:  * persist in constructed form while freed to the cache.
                    130:  *
                    131:  * Multiple caches may exist for each pool.  This allows a single
                    132:  * object type to have multiple constructed forms.  The pool references
                    133:  * each cache, so that when a pool is drained by the pagedaemon, it can
                    134:  * drain each individual cache as well.  Each time a cache is drained,
                    135:  * the most idle cache group is freed to the pool in its entirety.
                    136:  *
                    137:  * Pool caches are layed on top of pools.  By layering them, we can avoid
                    138:  * the complexity of cache management for pools which would not benefit
                    139:  * from it.
                    140:  */
                    141:
                    142: /* The cache group pool. */
                    143: static struct pool pcgpool;
1.3       pk        144:
1.43      thorpej   145: static void    pool_cache_reclaim(struct pool_cache *);
1.3       pk        146:
1.42      thorpej   147: static int     pool_catchup(struct pool *);
1.55      thorpej   148: static void    pool_prime_page(struct pool *, caddr_t,
                    149:                    struct pool_item_header *);
1.88      chs       150: static void    pool_update_curpage(struct pool *);
1.66      thorpej   151:
                    152: void           *pool_allocator_alloc(struct pool *, int);
                    153: void           pool_allocator_free(struct pool *, void *);
1.3       pk        154:
1.88      chs       155: static void pool_print_pagelist(struct pool_pagelist *,
                    156:        void (*)(const char *, ...));
1.42      thorpej   157: static void pool_print1(struct pool *, const char *,
                    158:        void (*)(const char *, ...));
1.3       pk        159:
1.88      chs       160: static int pool_chk_page(struct pool *, const char *,
                    161:                         struct pool_item_header *);
                    162:
1.3       pk        163: /*
1.52      thorpej   164:  * Pool log entry. An array of these is allocated in pool_init().
1.3       pk        165:  */
                    166: struct pool_log {
                    167:        const char      *pl_file;
                    168:        long            pl_line;
                    169:        int             pl_action;
1.25      thorpej   170: #define        PRLOG_GET       1
                    171: #define        PRLOG_PUT       2
1.3       pk        172:        void            *pl_addr;
1.1       pk        173: };
                    174:
1.86      matt      175: #ifdef POOL_DIAGNOSTIC
1.3       pk        176: /* Number of entries in pool log buffers */
1.17      thorpej   177: #ifndef POOL_LOGSIZE
                    178: #define        POOL_LOGSIZE    10
                    179: #endif
                    180:
                    181: int pool_logsize = POOL_LOGSIZE;
1.1       pk        182:
1.42      thorpej   183: static __inline void
                    184: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3       pk        185: {
                    186:        int n = pp->pr_curlogentry;
                    187:        struct pool_log *pl;
                    188:
1.20      thorpej   189:        if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3       pk        190:                return;
                    191:
                    192:        /*
                    193:         * Fill in the current entry. Wrap around and overwrite
                    194:         * the oldest entry if necessary.
                    195:         */
                    196:        pl = &pp->pr_log[n];
                    197:        pl->pl_file = file;
                    198:        pl->pl_line = line;
                    199:        pl->pl_action = action;
                    200:        pl->pl_addr = v;
                    201:        if (++n >= pp->pr_logsize)
                    202:                n = 0;
                    203:        pp->pr_curlogentry = n;
                    204: }
                    205:
                    206: static void
1.42      thorpej   207: pr_printlog(struct pool *pp, struct pool_item *pi,
                    208:     void (*pr)(const char *, ...))
1.3       pk        209: {
                    210:        int i = pp->pr_logsize;
                    211:        int n = pp->pr_curlogentry;
                    212:
1.20      thorpej   213:        if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3       pk        214:                return;
                    215:
                    216:        /*
                    217:         * Print all entries in this pool's log.
                    218:         */
                    219:        while (i-- > 0) {
                    220:                struct pool_log *pl = &pp->pr_log[n];
                    221:                if (pl->pl_action != 0) {
1.25      thorpej   222:                        if (pi == NULL || pi == pl->pl_addr) {
                    223:                                (*pr)("\tlog entry %d:\n", i);
                    224:                                (*pr)("\t\taction = %s, addr = %p\n",
                    225:                                    pl->pl_action == PRLOG_GET ? "get" : "put",
                    226:                                    pl->pl_addr);
                    227:                                (*pr)("\t\tfile: %s at line %lu\n",
                    228:                                    pl->pl_file, pl->pl_line);
                    229:                        }
1.3       pk        230:                }
                    231:                if (++n >= pp->pr_logsize)
                    232:                        n = 0;
                    233:        }
                    234: }
1.25      thorpej   235:
1.42      thorpej   236: static __inline void
                    237: pr_enter(struct pool *pp, const char *file, long line)
1.25      thorpej   238: {
                    239:
1.34      thorpej   240:        if (__predict_false(pp->pr_entered_file != NULL)) {
1.25      thorpej   241:                printf("pool %s: reentrancy at file %s line %ld\n",
                    242:                    pp->pr_wchan, file, line);
                    243:                printf("         previous entry at file %s line %ld\n",
                    244:                    pp->pr_entered_file, pp->pr_entered_line);
                    245:                panic("pr_enter");
                    246:        }
                    247:
                    248:        pp->pr_entered_file = file;
                    249:        pp->pr_entered_line = line;
                    250: }
                    251:
1.42      thorpej   252: static __inline void
                    253: pr_leave(struct pool *pp)
1.25      thorpej   254: {
                    255:
1.34      thorpej   256:        if (__predict_false(pp->pr_entered_file == NULL)) {
1.25      thorpej   257:                printf("pool %s not entered?\n", pp->pr_wchan);
                    258:                panic("pr_leave");
                    259:        }
                    260:
                    261:        pp->pr_entered_file = NULL;
                    262:        pp->pr_entered_line = 0;
                    263: }
                    264:
1.42      thorpej   265: static __inline void
                    266: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25      thorpej   267: {
                    268:
                    269:        if (pp->pr_entered_file != NULL)
                    270:                (*pr)("\n\tcurrently entered from file %s line %ld\n",
                    271:                    pp->pr_entered_file, pp->pr_entered_line);
                    272: }
1.3       pk        273: #else
1.25      thorpej   274: #define        pr_log(pp, v, action, file, line)
                    275: #define        pr_printlog(pp, pi, pr)
                    276: #define        pr_enter(pp, file, line)
                    277: #define        pr_leave(pp)
                    278: #define        pr_enter_check(pp, pr)
1.59      thorpej   279: #endif /* POOL_DIAGNOSTIC */
1.3       pk        280:
1.88      chs       281: static __inline int
                    282: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
                    283: {
                    284:        if (a->ph_page < b->ph_page)
                    285:                return (-1);
                    286:        else if (a->ph_page > b->ph_page)
                    287:                return (1);
                    288:        else
                    289:                return (0);
                    290: }
                    291:
                    292: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
                    293: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
                    294:
1.3       pk        295: /*
                    296:  * Return the pool page header based on page address.
                    297:  */
1.42      thorpej   298: static __inline struct pool_item_header *
                    299: pr_find_pagehead(struct pool *pp, caddr_t page)
1.3       pk        300: {
1.88      chs       301:        struct pool_item_header *ph, tmp;
1.3       pk        302:
1.20      thorpej   303:        if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3       pk        304:                return ((struct pool_item_header *)(page + pp->pr_phoffset));
                    305:
1.88      chs       306:        tmp.ph_page = page;
                    307:        ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
                    308:        return ph;
1.3       pk        309: }
                    310:
                    311: /*
                    312:  * Remove a page from the pool.
                    313:  */
1.42      thorpej   314: static __inline void
1.61      chs       315: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
                    316:      struct pool_pagelist *pq)
1.3       pk        317: {
1.61      chs       318:        int s;
1.3       pk        319:
1.91      yamt      320:        LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL);
                    321:
1.3       pk        322:        /*
1.7       thorpej   323:         * If the page was idle, decrement the idle page count.
1.3       pk        324:         */
1.6       thorpej   325:        if (ph->ph_nmissing == 0) {
                    326: #ifdef DIAGNOSTIC
                    327:                if (pp->pr_nidle == 0)
                    328:                        panic("pr_rmpage: nidle inconsistent");
1.20      thorpej   329:                if (pp->pr_nitems < pp->pr_itemsperpage)
                    330:                        panic("pr_rmpage: nitems inconsistent");
1.6       thorpej   331: #endif
                    332:                pp->pr_nidle--;
                    333:        }
1.7       thorpej   334:
1.20      thorpej   335:        pp->pr_nitems -= pp->pr_itemsperpage;
                    336:
1.7       thorpej   337:        /*
1.61      chs       338:         * Unlink a page from the pool and release it (or queue it for release).
1.7       thorpej   339:         */
1.88      chs       340:        LIST_REMOVE(ph, ph_pagelist);
1.91      yamt      341:        if ((pp->pr_roflags & PR_PHINPAGE) == 0)
                    342:                SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.61      chs       343:        if (pq) {
1.88      chs       344:                LIST_INSERT_HEAD(pq, ph, ph_pagelist);
1.61      chs       345:        } else {
1.66      thorpej   346:                pool_allocator_free(pp, ph->ph_page);
1.61      chs       347:                if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
1.85      pk        348:                        s = splvm();
1.61      chs       349:                        pool_put(&phpool, ph);
                    350:                        splx(s);
                    351:                }
                    352:        }
1.7       thorpej   353:        pp->pr_npages--;
                    354:        pp->pr_npagefree++;
1.6       thorpej   355:
1.88      chs       356:        pool_update_curpage(pp);
1.3       pk        357: }
                    358:
                    359: /*
1.94    ! simonb    360:  * Initialize all the pools listed in the "pools" link set.
        !           361:  */
        !           362: void
        !           363: link_pool_init(void)
        !           364: {
        !           365:        __link_set_decl(pools, struct link_pool_init);
        !           366:        struct link_pool_init * const *pi;
        !           367:
        !           368:        __link_set_foreach(pi, pools)
        !           369:                pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
        !           370:                    (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
        !           371:                    (*pi)->palloc);
        !           372: }
        !           373:
        !           374: /*
1.3       pk        375:  * Initialize the given pool resource structure.
                    376:  *
                    377:  * We export this routine to allow other kernel parts to declare
                    378:  * static pools that must be initialized before malloc() is available.
                    379:  */
                    380: void
1.42      thorpej   381: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.66      thorpej   382:     const char *wchan, struct pool_allocator *palloc)
1.3       pk        383: {
1.88      chs       384:        int off, slack;
1.92      enami     385:        size_t trysize, phsize;
1.93      dbj       386:        int s;
1.3       pk        387:
1.25      thorpej   388: #ifdef POOL_DIAGNOSTIC
                    389:        /*
                    390:         * Always log if POOL_DIAGNOSTIC is defined.
                    391:         */
                    392:        if (pool_logsize != 0)
                    393:                flags |= PR_LOGGING;
                    394: #endif
                    395:
1.66      thorpej   396: #ifdef POOL_SUBPAGE
                    397:        /*
                    398:         * XXX We don't provide a real `nointr' back-end
                    399:         * yet; all sub-pages come from a kmem back-end.
                    400:         * maybe some day...
                    401:         */
                    402:        if (palloc == NULL) {
                    403:                extern struct pool_allocator pool_allocator_kmem_subpage;
                    404:                palloc = &pool_allocator_kmem_subpage;
                    405:        }
1.3       pk        406:        /*
1.66      thorpej   407:         * We'll assume any user-specified back-end allocator
                    408:         * will deal with sub-pages, or simply don't care.
1.3       pk        409:         */
1.66      thorpej   410: #else
                    411:        if (palloc == NULL)
                    412:                palloc = &pool_allocator_kmem;
                    413: #endif /* POOL_SUBPAGE */
                    414:        if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
                    415:                if (palloc->pa_pagesz == 0) {
1.62      bjh21     416: #ifdef POOL_SUBPAGE
1.66      thorpej   417:                        if (palloc == &pool_allocator_kmem)
                    418:                                palloc->pa_pagesz = PAGE_SIZE;
                    419:                        else
                    420:                                palloc->pa_pagesz = POOL_SUBPAGE;
1.62      bjh21     421: #else
1.66      thorpej   422:                        palloc->pa_pagesz = PAGE_SIZE;
                    423: #endif /* POOL_SUBPAGE */
                    424:                }
                    425:
                    426:                TAILQ_INIT(&palloc->pa_list);
                    427:
                    428:                simple_lock_init(&palloc->pa_slock);
                    429:                palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
                    430:                palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
                    431:                palloc->pa_flags |= PA_INITIALIZED;
1.4       thorpej   432:        }
1.3       pk        433:
                    434:        if (align == 0)
                    435:                align = ALIGN(1);
1.14      thorpej   436:
                    437:        if (size < sizeof(struct pool_item))
                    438:                size = sizeof(struct pool_item);
1.3       pk        439:
1.78      thorpej   440:        size = roundup(size, align);
1.66      thorpej   441: #ifdef DIAGNOSTIC
                    442:        if (size > palloc->pa_pagesz)
1.35      pk        443:                panic("pool_init: pool item size (%lu) too large",
                    444:                      (u_long)size);
1.66      thorpej   445: #endif
1.35      pk        446:
1.3       pk        447:        /*
                    448:         * Initialize the pool structure.
                    449:         */
1.88      chs       450:        LIST_INIT(&pp->pr_emptypages);
                    451:        LIST_INIT(&pp->pr_fullpages);
                    452:        LIST_INIT(&pp->pr_partpages);
1.43      thorpej   453:        TAILQ_INIT(&pp->pr_cachelist);
1.3       pk        454:        pp->pr_curpage = NULL;
                    455:        pp->pr_npages = 0;
                    456:        pp->pr_minitems = 0;
                    457:        pp->pr_minpages = 0;
                    458:        pp->pr_maxpages = UINT_MAX;
1.20      thorpej   459:        pp->pr_roflags = flags;
                    460:        pp->pr_flags = 0;
1.35      pk        461:        pp->pr_size = size;
1.3       pk        462:        pp->pr_align = align;
                    463:        pp->pr_wchan = wchan;
1.66      thorpej   464:        pp->pr_alloc = palloc;
1.20      thorpej   465:        pp->pr_nitems = 0;
                    466:        pp->pr_nout = 0;
                    467:        pp->pr_hardlimit = UINT_MAX;
                    468:        pp->pr_hardlimit_warning = NULL;
1.31      thorpej   469:        pp->pr_hardlimit_ratecap.tv_sec = 0;
                    470:        pp->pr_hardlimit_ratecap.tv_usec = 0;
                    471:        pp->pr_hardlimit_warning_last.tv_sec = 0;
                    472:        pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68      thorpej   473:        pp->pr_drain_hook = NULL;
                    474:        pp->pr_drain_hook_arg = NULL;
1.3       pk        475:
                    476:        /*
                    477:         * Decide whether to put the page header off page to avoid
1.92      enami     478:         * wasting too large a part of the page or too big item.
                    479:         * Off-page page headers go on a hash table, so we can match
                    480:         * a returned item with its header based on the page address.
                    481:         * We use 1/16 of the page size and about 8 times of the item
                    482:         * size as the threshold (XXX: tune)
                    483:         *
                    484:         * However, we'll put the header into the page if we can put
                    485:         * it without wasting any items.
                    486:         *
                    487:         * Silently enforce `0 <= ioff < align'.
1.3       pk        488:         */
1.92      enami     489:        pp->pr_itemoffset = ioff %= align;
                    490:        /* See the comment below about reserved bytes. */
                    491:        trysize = palloc->pa_pagesz - ((align - ioff) % align);
                    492:        phsize = ALIGN(sizeof(struct pool_item_header));
                    493:        if (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
                    494:            trysize / pp->pr_size == (trysize - phsize) / pp->pr_size) {
1.3       pk        495:                /* Use the end of the page for the page header */
1.20      thorpej   496:                pp->pr_roflags |= PR_PHINPAGE;
1.92      enami     497:                pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2       pk        498:        } else {
1.3       pk        499:                /* The page header will be taken from our page header pool */
                    500:                pp->pr_phoffset = 0;
1.66      thorpej   501:                off = palloc->pa_pagesz;
1.88      chs       502:                SPLAY_INIT(&pp->pr_phtree);
1.2       pk        503:        }
1.1       pk        504:
1.3       pk        505:        /*
                    506:         * Alignment is to take place at `ioff' within the item. This means
                    507:         * we must reserve up to `align - 1' bytes on the page to allow
                    508:         * appropriate positioning of each item.
                    509:         */
                    510:        pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43      thorpej   511:        KASSERT(pp->pr_itemsperpage != 0);
1.3       pk        512:
                    513:        /*
                    514:         * Use the slack between the chunks and the page header
                    515:         * for "cache coloring".
                    516:         */
                    517:        slack = off - pp->pr_itemsperpage * pp->pr_size;
                    518:        pp->pr_maxcolor = (slack / align) * align;
                    519:        pp->pr_curcolor = 0;
                    520:
                    521:        pp->pr_nget = 0;
                    522:        pp->pr_nfail = 0;
                    523:        pp->pr_nput = 0;
                    524:        pp->pr_npagealloc = 0;
                    525:        pp->pr_npagefree = 0;
1.1       pk        526:        pp->pr_hiwat = 0;
1.8       thorpej   527:        pp->pr_nidle = 0;
1.3       pk        528:
1.59      thorpej   529: #ifdef POOL_DIAGNOSTIC
1.25      thorpej   530:        if (flags & PR_LOGGING) {
                    531:                if (kmem_map == NULL ||
                    532:                    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
                    533:                     M_TEMP, M_NOWAIT)) == NULL)
1.20      thorpej   534:                        pp->pr_roflags &= ~PR_LOGGING;
1.3       pk        535:                pp->pr_curlogentry = 0;
                    536:                pp->pr_logsize = pool_logsize;
                    537:        }
1.59      thorpej   538: #endif
1.25      thorpej   539:
                    540:        pp->pr_entered_file = NULL;
                    541:        pp->pr_entered_line = 0;
1.3       pk        542:
1.21      thorpej   543:        simple_lock_init(&pp->pr_slock);
1.1       pk        544:
1.3       pk        545:        /*
1.43      thorpej   546:         * Initialize private page header pool and cache magazine pool if we
                    547:         * haven't done so yet.
1.23      thorpej   548:         * XXX LOCKING.
1.3       pk        549:         */
                    550:        if (phpool.pr_size == 0) {
1.62      bjh21     551: #ifdef POOL_SUBPAGE
                    552:                pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
1.66      thorpej   553:                    "phpool", &pool_allocator_kmem);
1.62      bjh21     554:                pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.66      thorpej   555:                    PR_RECURSIVE, "psppool", &pool_allocator_kmem);
1.62      bjh21     556: #else
1.3       pk        557:                pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
1.66      thorpej   558:                    0, "phpool", NULL);
1.62      bjh21     559: #endif
1.43      thorpej   560:                pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
1.66      thorpej   561:                    0, "pcgpool", NULL);
1.1       pk        562:        }
                    563:
1.23      thorpej   564:        /* Insert into the list of all pools. */
                    565:        simple_lock(&pool_head_slock);
                    566:        TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
                    567:        simple_unlock(&pool_head_slock);
1.66      thorpej   568:
                    569:        /* Insert this into the list of pools using this allocator. */
1.93      dbj       570:        s = splvm();
1.66      thorpej   571:        simple_lock(&palloc->pa_slock);
                    572:        TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
                    573:        simple_unlock(&palloc->pa_slock);
1.93      dbj       574:        splx(s);
1.1       pk        575: }
                    576:
                    577: /*
                    578:  * De-commision a pool resource.
                    579:  */
                    580: void
1.42      thorpej   581: pool_destroy(struct pool *pp)
1.1       pk        582: {
1.3       pk        583:        struct pool_item_header *ph;
1.43      thorpej   584:        struct pool_cache *pc;
1.93      dbj       585:        int s;
1.43      thorpej   586:
1.66      thorpej   587:        /* Locking order: pool_allocator -> pool */
1.93      dbj       588:        s = splvm();
1.66      thorpej   589:        simple_lock(&pp->pr_alloc->pa_slock);
                    590:        TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
                    591:        simple_unlock(&pp->pr_alloc->pa_slock);
1.93      dbj       592:        splx(s);
1.66      thorpej   593:
1.43      thorpej   594:        /* Destroy all caches for this pool. */
                    595:        while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
                    596:                pool_cache_destroy(pc);
1.3       pk        597:
                    598: #ifdef DIAGNOSTIC
1.20      thorpej   599:        if (pp->pr_nout != 0) {
1.25      thorpej   600:                pr_printlog(pp, NULL, printf);
1.80      provos    601:                panic("pool_destroy: pool busy: still out: %u",
1.20      thorpej   602:                    pp->pr_nout);
1.3       pk        603:        }
                    604: #endif
1.1       pk        605:
1.3       pk        606:        /* Remove all pages */
1.88      chs       607:        while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.70      thorpej   608:                pr_rmpage(pp, ph, NULL);
1.88      chs       609:        KASSERT(LIST_EMPTY(&pp->pr_fullpages));
                    610:        KASSERT(LIST_EMPTY(&pp->pr_partpages));
1.3       pk        611:
                    612:        /* Remove from global pool list */
1.23      thorpej   613:        simple_lock(&pool_head_slock);
1.3       pk        614:        TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.61      chs       615:        if (drainpp == pp) {
                    616:                drainpp = NULL;
                    617:        }
1.23      thorpej   618:        simple_unlock(&pool_head_slock);
1.3       pk        619:
1.59      thorpej   620: #ifdef POOL_DIAGNOSTIC
1.20      thorpej   621:        if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3       pk        622:                free(pp->pr_log, M_TEMP);
1.59      thorpej   623: #endif
1.1       pk        624: }
                    625:
1.68      thorpej   626: void
                    627: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
                    628: {
                    629:
                    630:        /* XXX no locking -- must be used just after pool_init() */
                    631: #ifdef DIAGNOSTIC
                    632:        if (pp->pr_drain_hook != NULL)
                    633:                panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
                    634: #endif
                    635:        pp->pr_drain_hook = fn;
                    636:        pp->pr_drain_hook_arg = arg;
                    637: }
                    638:
1.88      chs       639: static struct pool_item_header *
1.55      thorpej   640: pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
                    641: {
                    642:        struct pool_item_header *ph;
                    643:        int s;
                    644:
                    645:        LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
                    646:
                    647:        if ((pp->pr_roflags & PR_PHINPAGE) != 0)
                    648:                ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
                    649:        else {
1.85      pk        650:                s = splvm();
1.55      thorpej   651:                ph = pool_get(&phpool, flags);
                    652:                splx(s);
                    653:        }
                    654:
                    655:        return (ph);
                    656: }
1.1       pk        657:
                    658: /*
1.3       pk        659:  * Grab an item from the pool; must be called at appropriate spl level
1.1       pk        660:  */
1.3       pk        661: void *
1.59      thorpej   662: #ifdef POOL_DIAGNOSTIC
1.42      thorpej   663: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56      sommerfe  664: #else
                    665: pool_get(struct pool *pp, int flags)
                    666: #endif
1.1       pk        667: {
                    668:        struct pool_item *pi;
1.3       pk        669:        struct pool_item_header *ph;
1.55      thorpej   670:        void *v;
1.1       pk        671:
1.2       pk        672: #ifdef DIAGNOSTIC
1.84      thorpej   673:        if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37      sommerfe  674:                            (flags & PR_WAITOK) != 0))
1.77      matt      675:                panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58      thorpej   676:
                    677: #ifdef LOCKDEBUG
                    678:        if (flags & PR_WAITOK)
                    679:                simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
1.56      sommerfe  680: #endif
1.58      thorpej   681: #endif /* DIAGNOSTIC */
1.1       pk        682:
1.21      thorpej   683:        simple_lock(&pp->pr_slock);
1.25      thorpej   684:        pr_enter(pp, file, line);
1.20      thorpej   685:
                    686:  startover:
                    687:        /*
                    688:         * Check to see if we've reached the hard limit.  If we have,
                    689:         * and we can wait, then wait until an item has been returned to
                    690:         * the pool.
                    691:         */
                    692: #ifdef DIAGNOSTIC
1.34      thorpej   693:        if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25      thorpej   694:                pr_leave(pp);
1.21      thorpej   695:                simple_unlock(&pp->pr_slock);
1.20      thorpej   696:                panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
                    697:        }
                    698: #endif
1.34      thorpej   699:        if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68      thorpej   700:                if (pp->pr_drain_hook != NULL) {
                    701:                        /*
                    702:                         * Since the drain hook is going to free things
                    703:                         * back to the pool, unlock, call the hook, re-lock,
                    704:                         * and check the hardlimit condition again.
                    705:                         */
                    706:                        pr_leave(pp);
                    707:                        simple_unlock(&pp->pr_slock);
                    708:                        (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
                    709:                        simple_lock(&pp->pr_slock);
                    710:                        pr_enter(pp, file, line);
                    711:                        if (pp->pr_nout < pp->pr_hardlimit)
                    712:                                goto startover;
                    713:                }
                    714:
1.29      sommerfe  715:                if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20      thorpej   716:                        /*
                    717:                         * XXX: A warning isn't logged in this case.  Should
                    718:                         * it be?
                    719:                         */
                    720:                        pp->pr_flags |= PR_WANTED;
1.25      thorpej   721:                        pr_leave(pp);
1.40      sommerfe  722:                        ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25      thorpej   723:                        pr_enter(pp, file, line);
1.20      thorpej   724:                        goto startover;
                    725:                }
1.31      thorpej   726:
                    727:                /*
                    728:                 * Log a message that the hard limit has been hit.
                    729:                 */
                    730:                if (pp->pr_hardlimit_warning != NULL &&
                    731:                    ratecheck(&pp->pr_hardlimit_warning_last,
                    732:                              &pp->pr_hardlimit_ratecap))
                    733:                        log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21      thorpej   734:
                    735:                pp->pr_nfail++;
                    736:
1.25      thorpej   737:                pr_leave(pp);
1.21      thorpej   738:                simple_unlock(&pp->pr_slock);
1.20      thorpej   739:                return (NULL);
                    740:        }
                    741:
1.3       pk        742:        /*
                    743:         * The convention we use is that if `curpage' is not NULL, then
                    744:         * it points at a non-empty bucket. In particular, `curpage'
                    745:         * never points at a page header which has PR_PHINPAGE set and
                    746:         * has no items in its bucket.
                    747:         */
1.20      thorpej   748:        if ((ph = pp->pr_curpage) == NULL) {
                    749: #ifdef DIAGNOSTIC
                    750:                if (pp->pr_nitems != 0) {
1.21      thorpej   751:                        simple_unlock(&pp->pr_slock);
1.20      thorpej   752:                        printf("pool_get: %s: curpage NULL, nitems %u\n",
                    753:                            pp->pr_wchan, pp->pr_nitems);
1.80      provos    754:                        panic("pool_get: nitems inconsistent");
1.20      thorpej   755:                }
                    756: #endif
                    757:
1.21      thorpej   758:                /*
                    759:                 * Call the back-end page allocator for more memory.
                    760:                 * Release the pool lock, as the back-end page allocator
                    761:                 * may block.
                    762:                 */
1.25      thorpej   763:                pr_leave(pp);
1.21      thorpej   764:                simple_unlock(&pp->pr_slock);
1.66      thorpej   765:                v = pool_allocator_alloc(pp, flags);
1.55      thorpej   766:                if (__predict_true(v != NULL))
                    767:                        ph = pool_alloc_item_header(pp, v, flags);
1.15      pk        768:
1.55      thorpej   769:                if (__predict_false(v == NULL || ph == NULL)) {
                    770:                        if (v != NULL)
1.66      thorpej   771:                                pool_allocator_free(pp, v);
1.55      thorpej   772:
1.91      yamt      773:                        simple_lock(&pp->pr_slock);
                    774:                        pr_enter(pp, file, line);
                    775:
1.21      thorpej   776:                        /*
1.55      thorpej   777:                         * We were unable to allocate a page or item
                    778:                         * header, but we released the lock during
                    779:                         * allocation, so perhaps items were freed
                    780:                         * back to the pool.  Check for this case.
1.21      thorpej   781:                         */
                    782:                        if (pp->pr_curpage != NULL)
                    783:                                goto startover;
1.15      pk        784:
1.3       pk        785:                        if ((flags & PR_WAITOK) == 0) {
                    786:                                pp->pr_nfail++;
1.25      thorpej   787:                                pr_leave(pp);
1.21      thorpej   788:                                simple_unlock(&pp->pr_slock);
1.1       pk        789:                                return (NULL);
1.3       pk        790:                        }
                    791:
1.15      pk        792:                        /*
                    793:                         * Wait for items to be returned to this pool.
1.21      thorpej   794:                         *
1.20      thorpej   795:                         * XXX: maybe we should wake up once a second and
                    796:                         * try again?
1.15      pk        797:                         */
1.1       pk        798:                        pp->pr_flags |= PR_WANTED;
1.66      thorpej   799:                        /* PA_WANTED is already set on the allocator. */
1.25      thorpej   800:                        pr_leave(pp);
1.40      sommerfe  801:                        ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25      thorpej   802:                        pr_enter(pp, file, line);
1.20      thorpej   803:                        goto startover;
1.1       pk        804:                }
1.3       pk        805:
1.15      pk        806:                /* We have more memory; add it to the pool */
1.91      yamt      807:                simple_lock(&pp->pr_slock);
                    808:                pr_enter(pp, file, line);
1.55      thorpej   809:                pool_prime_page(pp, v, ph);
1.15      pk        810:                pp->pr_npagealloc++;
                    811:
1.20      thorpej   812:                /* Start the allocation process over. */
                    813:                goto startover;
1.3       pk        814:        }
1.34      thorpej   815:        if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
1.25      thorpej   816:                pr_leave(pp);
1.21      thorpej   817:                simple_unlock(&pp->pr_slock);
1.3       pk        818:                panic("pool_get: %s: page empty", pp->pr_wchan);
1.21      thorpej   819:        }
1.20      thorpej   820: #ifdef DIAGNOSTIC
1.34      thorpej   821:        if (__predict_false(pp->pr_nitems == 0)) {
1.25      thorpej   822:                pr_leave(pp);
1.21      thorpej   823:                simple_unlock(&pp->pr_slock);
1.20      thorpej   824:                printf("pool_get: %s: items on itemlist, nitems %u\n",
                    825:                    pp->pr_wchan, pp->pr_nitems);
1.80      provos    826:                panic("pool_get: nitems inconsistent");
1.20      thorpej   827:        }
1.65      enami     828: #endif
1.56      sommerfe  829:
1.65      enami     830: #ifdef POOL_DIAGNOSTIC
1.3       pk        831:        pr_log(pp, v, PRLOG_GET, file, line);
1.65      enami     832: #endif
1.3       pk        833:
1.65      enami     834: #ifdef DIAGNOSTIC
1.34      thorpej   835:        if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1.25      thorpej   836:                pr_printlog(pp, pi, printf);
1.3       pk        837:                panic("pool_get(%s): free list modified: magic=%x; page %p;"
                    838:                       " item addr %p\n",
                    839:                        pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
                    840:        }
                    841: #endif
                    842:
                    843:        /*
                    844:         * Remove from item list.
                    845:         */
                    846:        TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
1.20      thorpej   847:        pp->pr_nitems--;
                    848:        pp->pr_nout++;
1.6       thorpej   849:        if (ph->ph_nmissing == 0) {
                    850: #ifdef DIAGNOSTIC
1.34      thorpej   851:                if (__predict_false(pp->pr_nidle == 0))
1.6       thorpej   852:                        panic("pool_get: nidle inconsistent");
                    853: #endif
                    854:                pp->pr_nidle--;
1.88      chs       855:
                    856:                /*
                    857:                 * This page was previously empty.  Move it to the list of
                    858:                 * partially-full pages.  This page is already curpage.
                    859:                 */
                    860:                LIST_REMOVE(ph, ph_pagelist);
                    861:                LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6       thorpej   862:        }
1.3       pk        863:        ph->ph_nmissing++;
1.88      chs       864:        if (TAILQ_EMPTY(&ph->ph_itemlist)) {
1.21      thorpej   865: #ifdef DIAGNOSTIC
1.34      thorpej   866:                if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
1.25      thorpej   867:                        pr_leave(pp);
1.21      thorpej   868:                        simple_unlock(&pp->pr_slock);
                    869:                        panic("pool_get: %s: nmissing inconsistent",
                    870:                            pp->pr_wchan);
                    871:                }
                    872: #endif
1.3       pk        873:                /*
1.88      chs       874:                 * This page is now full.  Move it to the full list
                    875:                 * and select a new current page.
1.3       pk        876:                 */
1.88      chs       877:                LIST_REMOVE(ph, ph_pagelist);
                    878:                LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
                    879:                pool_update_curpage(pp);
1.1       pk        880:        }
1.3       pk        881:
                    882:        pp->pr_nget++;
1.20      thorpej   883:
                    884:        /*
                    885:         * If we have a low water mark and we are now below that low
                    886:         * water mark, add more items to the pool.
                    887:         */
1.53      thorpej   888:        if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20      thorpej   889:                /*
                    890:                 * XXX: Should we log a warning?  Should we set up a timeout
                    891:                 * to try again in a second or so?  The latter could break
                    892:                 * a caller's assumptions about interrupt protection, etc.
                    893:                 */
                    894:        }
                    895:
1.25      thorpej   896:        pr_leave(pp);
1.21      thorpej   897:        simple_unlock(&pp->pr_slock);
1.1       pk        898:        return (v);
                    899: }
                    900:
                    901: /*
1.43      thorpej   902:  * Internal version of pool_put().  Pool is already locked/entered.
1.1       pk        903:  */
1.43      thorpej   904: static void
1.56      sommerfe  905: pool_do_put(struct pool *pp, void *v)
1.1       pk        906: {
                    907:        struct pool_item *pi = v;
1.3       pk        908:        struct pool_item_header *ph;
                    909:        caddr_t page;
1.21      thorpej   910:        int s;
1.3       pk        911:
1.61      chs       912:        LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
                    913:
1.66      thorpej   914:        page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1.1       pk        915:
1.30      thorpej   916: #ifdef DIAGNOSTIC
1.34      thorpej   917:        if (__predict_false(pp->pr_nout == 0)) {
1.30      thorpej   918:                printf("pool %s: putting with none out\n",
                    919:                    pp->pr_wchan);
                    920:                panic("pool_put");
                    921:        }
                    922: #endif
1.3       pk        923:
1.34      thorpej   924:        if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1.25      thorpej   925:                pr_printlog(pp, NULL, printf);
1.3       pk        926:                panic("pool_put: %s: page header missing", pp->pr_wchan);
                    927:        }
1.28      thorpej   928:
                    929: #ifdef LOCKDEBUG
                    930:        /*
                    931:         * Check if we're freeing a locked simple lock.
                    932:         */
                    933:        simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
                    934: #endif
1.3       pk        935:
                    936:        /*
                    937:         * Return to item list.
                    938:         */
1.2       pk        939: #ifdef DIAGNOSTIC
1.3       pk        940:        pi->pi_magic = PI_MAGIC;
                    941: #endif
1.32      chs       942: #ifdef DEBUG
                    943:        {
                    944:                int i, *ip = v;
                    945:
                    946:                for (i = 0; i < pp->pr_size / sizeof(int); i++) {
                    947:                        *ip++ = PI_MAGIC;
                    948:                }
                    949:        }
                    950: #endif
                    951:
1.3       pk        952:        TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.79      thorpej   953:        KDASSERT(ph->ph_nmissing != 0);
1.3       pk        954:        ph->ph_nmissing--;
                    955:        pp->pr_nput++;
1.20      thorpej   956:        pp->pr_nitems++;
                    957:        pp->pr_nout--;
1.3       pk        958:
                    959:        /* Cancel "pool empty" condition if it exists */
                    960:        if (pp->pr_curpage == NULL)
                    961:                pp->pr_curpage = ph;
                    962:
                    963:        if (pp->pr_flags & PR_WANTED) {
                    964:                pp->pr_flags &= ~PR_WANTED;
1.15      pk        965:                if (ph->ph_nmissing == 0)
                    966:                        pp->pr_nidle++;
1.3       pk        967:                wakeup((caddr_t)pp);
                    968:                return;
                    969:        }
                    970:
                    971:        /*
1.88      chs       972:         * If this page is now empty, do one of two things:
1.21      thorpej   973:         *
1.88      chs       974:         *      (1) If we have more pages than the page high water mark,
1.90      thorpej   975:         *          or if we are flagged as immediately freeing back idle
                    976:         *          pages, free the page back to the system.  ONLY CONSIDER
                    977:         *          FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
                    978:         *          CLAIM.
1.21      thorpej   979:         *
1.88      chs       980:         *      (2) Otherwise, move the page to the empty page list.
                    981:         *
                    982:         * Either way, select a new current page (so we use a partially-full
                    983:         * page if one is available).
1.3       pk        984:         */
                    985:        if (ph->ph_nmissing == 0) {
1.6       thorpej   986:                pp->pr_nidle++;
1.90      thorpej   987:                if (pp->pr_npages > pp->pr_minpages &&
                    988:                    (pp->pr_npages > pp->pr_maxpages ||
                    989:                     (pp->pr_roflags & PR_IMMEDRELEASE) != 0 ||
                    990:                     (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
1.91      yamt      991:                        simple_unlock(&pp->pr_slock);
1.61      chs       992:                        pr_rmpage(pp, ph, NULL);
1.91      yamt      993:                        simple_lock(&pp->pr_slock);
1.3       pk        994:                } else {
1.88      chs       995:                        LIST_REMOVE(ph, ph_pagelist);
                    996:                        LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3       pk        997:
1.21      thorpej   998:                        /*
                    999:                         * Update the timestamp on the page.  A page must
                   1000:                         * be idle for some period of time before it can
                   1001:                         * be reclaimed by the pagedaemon.  This minimizes
                   1002:                         * ping-pong'ing for memory.
                   1003:                         */
                   1004:                        s = splclock();
                   1005:                        ph->ph_time = mono_time;
                   1006:                        splx(s);
1.1       pk       1007:                }
1.88      chs      1008:                pool_update_curpage(pp);
1.1       pk       1009:        }
1.88      chs      1010:
1.21      thorpej  1011:        /*
1.88      chs      1012:         * If the page was previously completely full, move it to the
                   1013:         * partially-full list and make it the current page.  The next
                   1014:         * allocation will get the item from this page, instead of
                   1015:         * further fragmenting the pool.
1.21      thorpej  1016:         */
                   1017:        else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88      chs      1018:                LIST_REMOVE(ph, ph_pagelist);
                   1019:                LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21      thorpej  1020:                pp->pr_curpage = ph;
                   1021:        }
1.43      thorpej  1022: }
                   1023:
                   1024: /*
                   1025:  * Return resource to the pool; must be called at appropriate spl level
                   1026:  */
1.59      thorpej  1027: #ifdef POOL_DIAGNOSTIC
1.43      thorpej  1028: void
                   1029: _pool_put(struct pool *pp, void *v, const char *file, long line)
                   1030: {
                   1031:
                   1032:        simple_lock(&pp->pr_slock);
                   1033:        pr_enter(pp, file, line);
                   1034:
1.56      sommerfe 1035:        pr_log(pp, v, PRLOG_PUT, file, line);
                   1036:
                   1037:        pool_do_put(pp, v);
1.21      thorpej  1038:
1.25      thorpej  1039:        pr_leave(pp);
1.21      thorpej  1040:        simple_unlock(&pp->pr_slock);
1.1       pk       1041: }
1.57      sommerfe 1042: #undef pool_put
1.59      thorpej  1043: #endif /* POOL_DIAGNOSTIC */
1.1       pk       1044:
1.56      sommerfe 1045: void
                   1046: pool_put(struct pool *pp, void *v)
                   1047: {
                   1048:
                   1049:        simple_lock(&pp->pr_slock);
                   1050:
                   1051:        pool_do_put(pp, v);
                   1052:
                   1053:        simple_unlock(&pp->pr_slock);
                   1054: }
1.57      sommerfe 1055:
1.59      thorpej  1056: #ifdef POOL_DIAGNOSTIC
1.57      sommerfe 1057: #define                pool_put(h, v)  _pool_put((h), (v), __FILE__, __LINE__)
1.56      sommerfe 1058: #endif
1.74      thorpej  1059:
                   1060: /*
                   1061:  * Add N items to the pool.
                   1062:  */
                   1063: int
                   1064: pool_prime(struct pool *pp, int n)
                   1065: {
1.83      scw      1066:        struct pool_item_header *ph = NULL;
1.74      thorpej  1067:        caddr_t cp;
1.75      simonb   1068:        int newpages;
1.74      thorpej  1069:
                   1070:        simple_lock(&pp->pr_slock);
                   1071:
                   1072:        newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
                   1073:
                   1074:        while (newpages-- > 0) {
                   1075:                simple_unlock(&pp->pr_slock);
                   1076:                cp = pool_allocator_alloc(pp, PR_NOWAIT);
                   1077:                if (__predict_true(cp != NULL))
                   1078:                        ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
                   1079:
                   1080:                if (__predict_false(cp == NULL || ph == NULL)) {
                   1081:                        if (cp != NULL)
                   1082:                                pool_allocator_free(pp, cp);
1.91      yamt     1083:                        simple_lock(&pp->pr_slock);
1.74      thorpej  1084:                        break;
                   1085:                }
                   1086:
1.91      yamt     1087:                simple_lock(&pp->pr_slock);
1.74      thorpej  1088:                pool_prime_page(pp, cp, ph);
                   1089:                pp->pr_npagealloc++;
                   1090:                pp->pr_minpages++;
                   1091:        }
                   1092:
                   1093:        if (pp->pr_minpages >= pp->pr_maxpages)
                   1094:                pp->pr_maxpages = pp->pr_minpages + 1;  /* XXX */
                   1095:
                   1096:        simple_unlock(&pp->pr_slock);
                   1097:        return (0);
                   1098: }
1.55      thorpej  1099:
                   1100: /*
1.3       pk       1101:  * Add a page worth of items to the pool.
1.21      thorpej  1102:  *
                   1103:  * Note, we must be called with the pool descriptor LOCKED.
1.3       pk       1104:  */
1.55      thorpej  1105: static void
                   1106: pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1.3       pk       1107: {
                   1108:        struct pool_item *pi;
                   1109:        caddr_t cp = storage;
                   1110:        unsigned int align = pp->pr_align;
                   1111:        unsigned int ioff = pp->pr_itemoffset;
1.55      thorpej  1112:        int n;
1.89      yamt     1113:        int s;
1.36      pk       1114:
1.91      yamt     1115:        LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
                   1116:
1.66      thorpej  1117: #ifdef DIAGNOSTIC
                   1118:        if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36      pk       1119:                panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66      thorpej  1120: #endif
1.3       pk       1121:
                   1122:        /*
                   1123:         * Insert page header.
                   1124:         */
1.88      chs      1125:        LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3       pk       1126:        TAILQ_INIT(&ph->ph_itemlist);
                   1127:        ph->ph_page = storage;
                   1128:        ph->ph_nmissing = 0;
1.89      yamt     1129:        s = splclock();
                   1130:        ph->ph_time = mono_time;
                   1131:        splx(s);
1.88      chs      1132:        if ((pp->pr_roflags & PR_PHINPAGE) == 0)
                   1133:                SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3       pk       1134:
1.6       thorpej  1135:        pp->pr_nidle++;
                   1136:
1.3       pk       1137:        /*
                   1138:         * Color this page.
                   1139:         */
                   1140:        cp = (caddr_t)(cp + pp->pr_curcolor);
                   1141:        if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
                   1142:                pp->pr_curcolor = 0;
                   1143:
                   1144:        /*
                   1145:         * Adjust storage to apply aligment to `pr_itemoffset' in each item.
                   1146:         */
                   1147:        if (ioff != 0)
                   1148:                cp = (caddr_t)(cp + (align - ioff));
                   1149:
                   1150:        /*
                   1151:         * Insert remaining chunks on the bucket list.
                   1152:         */
                   1153:        n = pp->pr_itemsperpage;
1.20      thorpej  1154:        pp->pr_nitems += n;
1.3       pk       1155:
                   1156:        while (n--) {
                   1157:                pi = (struct pool_item *)cp;
1.78      thorpej  1158:
                   1159:                KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3       pk       1160:
                   1161:                /* Insert on page list */
                   1162:                TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
                   1163: #ifdef DIAGNOSTIC
                   1164:                pi->pi_magic = PI_MAGIC;
                   1165: #endif
                   1166:                cp = (caddr_t)(cp + pp->pr_size);
                   1167:        }
                   1168:
                   1169:        /*
                   1170:         * If the pool was depleted, point at the new page.
                   1171:         */
                   1172:        if (pp->pr_curpage == NULL)
                   1173:                pp->pr_curpage = ph;
                   1174:
                   1175:        if (++pp->pr_npages > pp->pr_hiwat)
                   1176:                pp->pr_hiwat = pp->pr_npages;
                   1177: }
                   1178:
1.20      thorpej  1179: /*
1.52      thorpej  1180:  * Used by pool_get() when nitems drops below the low water mark.  This
1.88      chs      1181:  * is used to catch up pr_nitems with the low water mark.
1.20      thorpej  1182:  *
1.21      thorpej  1183:  * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20      thorpej  1184:  *
1.73      thorpej  1185:  * Note 2, we must be called with the pool already locked, and we return
1.20      thorpej  1186:  * with it locked.
                   1187:  */
                   1188: static int
1.42      thorpej  1189: pool_catchup(struct pool *pp)
1.20      thorpej  1190: {
1.83      scw      1191:        struct pool_item_header *ph = NULL;
1.20      thorpej  1192:        caddr_t cp;
                   1193:        int error = 0;
                   1194:
1.54      thorpej  1195:        while (POOL_NEEDS_CATCHUP(pp)) {
1.20      thorpej  1196:                /*
1.21      thorpej  1197:                 * Call the page back-end allocator for more memory.
                   1198:                 *
                   1199:                 * XXX: We never wait, so should we bother unlocking
                   1200:                 * the pool descriptor?
1.20      thorpej  1201:                 */
1.21      thorpej  1202:                simple_unlock(&pp->pr_slock);
1.66      thorpej  1203:                cp = pool_allocator_alloc(pp, PR_NOWAIT);
1.55      thorpej  1204:                if (__predict_true(cp != NULL))
                   1205:                        ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
                   1206:                if (__predict_false(cp == NULL || ph == NULL)) {
                   1207:                        if (cp != NULL)
1.66      thorpej  1208:                                pool_allocator_free(pp, cp);
1.20      thorpej  1209:                        error = ENOMEM;
1.91      yamt     1210:                        simple_lock(&pp->pr_slock);
1.20      thorpej  1211:                        break;
                   1212:                }
1.91      yamt     1213:                simple_lock(&pp->pr_slock);
1.55      thorpej  1214:                pool_prime_page(pp, cp, ph);
1.26      thorpej  1215:                pp->pr_npagealloc++;
1.20      thorpej  1216:        }
                   1217:
                   1218:        return (error);
                   1219: }
                   1220:
1.88      chs      1221: static void
                   1222: pool_update_curpage(struct pool *pp)
                   1223: {
                   1224:
                   1225:        pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
                   1226:        if (pp->pr_curpage == NULL) {
                   1227:                pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
                   1228:        }
                   1229: }
                   1230:
1.3       pk       1231: void
1.42      thorpej  1232: pool_setlowat(struct pool *pp, int n)
1.3       pk       1233: {
1.15      pk       1234:
1.21      thorpej  1235:        simple_lock(&pp->pr_slock);
                   1236:
1.3       pk       1237:        pp->pr_minitems = n;
1.15      pk       1238:        pp->pr_minpages = (n == 0)
                   1239:                ? 0
1.18      thorpej  1240:                : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20      thorpej  1241:
                   1242:        /* Make sure we're caught up with the newly-set low water mark. */
1.75      simonb   1243:        if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20      thorpej  1244:                /*
                   1245:                 * XXX: Should we log a warning?  Should we set up a timeout
                   1246:                 * to try again in a second or so?  The latter could break
                   1247:                 * a caller's assumptions about interrupt protection, etc.
                   1248:                 */
                   1249:        }
1.21      thorpej  1250:
                   1251:        simple_unlock(&pp->pr_slock);
1.3       pk       1252: }
                   1253:
                   1254: void
1.42      thorpej  1255: pool_sethiwat(struct pool *pp, int n)
1.3       pk       1256: {
1.15      pk       1257:
1.21      thorpej  1258:        simple_lock(&pp->pr_slock);
                   1259:
1.15      pk       1260:        pp->pr_maxpages = (n == 0)
                   1261:                ? 0
1.18      thorpej  1262:                : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21      thorpej  1263:
                   1264:        simple_unlock(&pp->pr_slock);
1.3       pk       1265: }
                   1266:
1.20      thorpej  1267: void
1.42      thorpej  1268: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20      thorpej  1269: {
                   1270:
1.21      thorpej  1271:        simple_lock(&pp->pr_slock);
1.20      thorpej  1272:
                   1273:        pp->pr_hardlimit = n;
                   1274:        pp->pr_hardlimit_warning = warnmess;
1.31      thorpej  1275:        pp->pr_hardlimit_ratecap.tv_sec = ratecap;
                   1276:        pp->pr_hardlimit_warning_last.tv_sec = 0;
                   1277:        pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20      thorpej  1278:
                   1279:        /*
1.21      thorpej  1280:         * In-line version of pool_sethiwat(), because we don't want to
                   1281:         * release the lock.
1.20      thorpej  1282:         */
                   1283:        pp->pr_maxpages = (n == 0)
                   1284:                ? 0
                   1285:                : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21      thorpej  1286:
                   1287:        simple_unlock(&pp->pr_slock);
1.20      thorpej  1288: }
1.3       pk       1289:
                   1290: /*
                   1291:  * Release all complete pages that have not been used recently.
                   1292:  */
1.66      thorpej  1293: int
1.59      thorpej  1294: #ifdef POOL_DIAGNOSTIC
1.42      thorpej  1295: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56      sommerfe 1296: #else
                   1297: pool_reclaim(struct pool *pp)
                   1298: #endif
1.3       pk       1299: {
                   1300:        struct pool_item_header *ph, *phnext;
1.43      thorpej  1301:        struct pool_cache *pc;
1.21      thorpej  1302:        struct timeval curtime;
1.61      chs      1303:        struct pool_pagelist pq;
1.88      chs      1304:        struct timeval diff;
1.21      thorpej  1305:        int s;
1.3       pk       1306:
1.68      thorpej  1307:        if (pp->pr_drain_hook != NULL) {
                   1308:                /*
                   1309:                 * The drain hook must be called with the pool unlocked.
                   1310:                 */
                   1311:                (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
                   1312:        }
                   1313:
1.21      thorpej  1314:        if (simple_lock_try(&pp->pr_slock) == 0)
1.66      thorpej  1315:                return (0);
1.25      thorpej  1316:        pr_enter(pp, file, line);
1.68      thorpej  1317:
1.88      chs      1318:        LIST_INIT(&pq);
1.3       pk       1319:
1.43      thorpej  1320:        /*
                   1321:         * Reclaim items from the pool's caches.
                   1322:         */
1.61      chs      1323:        TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1.43      thorpej  1324:                pool_cache_reclaim(pc);
                   1325:
1.21      thorpej  1326:        s = splclock();
                   1327:        curtime = mono_time;
                   1328:        splx(s);
                   1329:
1.88      chs      1330:        for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
                   1331:                phnext = LIST_NEXT(ph, ph_pagelist);
1.3       pk       1332:
                   1333:                /* Check our minimum page claim */
                   1334:                if (pp->pr_npages <= pp->pr_minpages)
                   1335:                        break;
                   1336:
1.88      chs      1337:                KASSERT(ph->ph_nmissing == 0);
                   1338:                timersub(&curtime, &ph->ph_time, &diff);
                   1339:                if (diff.tv_sec < pool_inactive_time)
                   1340:                        continue;
1.21      thorpej  1341:
1.88      chs      1342:                /*
                   1343:                 * If freeing this page would put us below
                   1344:                 * the low water mark, stop now.
                   1345:                 */
                   1346:                if ((pp->pr_nitems - pp->pr_itemsperpage) <
                   1347:                    pp->pr_minitems)
                   1348:                        break;
1.21      thorpej  1349:
1.88      chs      1350:                pr_rmpage(pp, ph, &pq);
1.3       pk       1351:        }
                   1352:
1.25      thorpej  1353:        pr_leave(pp);
1.21      thorpej  1354:        simple_unlock(&pp->pr_slock);
1.88      chs      1355:        if (LIST_EMPTY(&pq))
1.66      thorpej  1356:                return (0);
                   1357:
1.88      chs      1358:        while ((ph = LIST_FIRST(&pq)) != NULL) {
                   1359:                LIST_REMOVE(ph, ph_pagelist);
1.66      thorpej  1360:                pool_allocator_free(pp, ph->ph_page);
1.61      chs      1361:                if (pp->pr_roflags & PR_PHINPAGE) {
                   1362:                        continue;
                   1363:                }
1.85      pk       1364:                s = splvm();
1.61      chs      1365:                pool_put(&phpool, ph);
                   1366:                splx(s);
                   1367:        }
1.66      thorpej  1368:
                   1369:        return (1);
1.3       pk       1370: }
                   1371:
                   1372: /*
                   1373:  * Drain pools, one at a time.
1.21      thorpej  1374:  *
                   1375:  * Note, we must never be called from an interrupt context.
1.3       pk       1376:  */
                   1377: void
1.42      thorpej  1378: pool_drain(void *arg)
1.3       pk       1379: {
                   1380:        struct pool *pp;
1.23      thorpej  1381:        int s;
1.3       pk       1382:
1.61      chs      1383:        pp = NULL;
1.49      thorpej  1384:        s = splvm();
1.23      thorpej  1385:        simple_lock(&pool_head_slock);
1.61      chs      1386:        if (drainpp == NULL) {
                   1387:                drainpp = TAILQ_FIRST(&pool_head);
                   1388:        }
                   1389:        if (drainpp) {
                   1390:                pp = drainpp;
                   1391:                drainpp = TAILQ_NEXT(pp, pr_poollist);
                   1392:        }
                   1393:        simple_unlock(&pool_head_slock);
1.63      chs      1394:        pool_reclaim(pp);
1.61      chs      1395:        splx(s);
1.3       pk       1396: }
                   1397:
                   1398: /*
                   1399:  * Diagnostic helpers.
                   1400:  */
                   1401: void
1.42      thorpej  1402: pool_print(struct pool *pp, const char *modif)
1.21      thorpej  1403: {
                   1404:        int s;
                   1405:
1.49      thorpej  1406:        s = splvm();
1.25      thorpej  1407:        if (simple_lock_try(&pp->pr_slock) == 0) {
                   1408:                printf("pool %s is locked; try again later\n",
                   1409:                    pp->pr_wchan);
                   1410:                splx(s);
                   1411:                return;
                   1412:        }
                   1413:        pool_print1(pp, modif, printf);
1.21      thorpej  1414:        simple_unlock(&pp->pr_slock);
                   1415:        splx(s);
                   1416: }
                   1417:
1.25      thorpej  1418: void
1.42      thorpej  1419: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25      thorpej  1420: {
                   1421:        int didlock = 0;
                   1422:
                   1423:        if (pp == NULL) {
                   1424:                (*pr)("Must specify a pool to print.\n");
                   1425:                return;
                   1426:        }
                   1427:
                   1428:        /*
                   1429:         * Called from DDB; interrupts should be blocked, and all
                   1430:         * other processors should be paused.  We can skip locking
                   1431:         * the pool in this case.
                   1432:         *
                   1433:         * We do a simple_lock_try() just to print the lock
                   1434:         * status, however.
                   1435:         */
                   1436:
                   1437:        if (simple_lock_try(&pp->pr_slock) == 0)
                   1438:                (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
                   1439:        else
                   1440:                didlock = 1;
                   1441:
                   1442:        pool_print1(pp, modif, pr);
                   1443:
                   1444:        if (didlock)
                   1445:                simple_unlock(&pp->pr_slock);
                   1446: }
                   1447:
1.21      thorpej  1448: static void
1.88      chs      1449: pool_print_pagelist(struct pool_pagelist *pl, void (*pr)(const char *, ...))
                   1450: {
                   1451:        struct pool_item_header *ph;
                   1452: #ifdef DIAGNOSTIC
                   1453:        struct pool_item *pi;
                   1454: #endif
                   1455:
                   1456:        LIST_FOREACH(ph, pl, ph_pagelist) {
                   1457:                (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
                   1458:                    ph->ph_page, ph->ph_nmissing,
                   1459:                    (u_long)ph->ph_time.tv_sec,
                   1460:                    (u_long)ph->ph_time.tv_usec);
                   1461: #ifdef DIAGNOSTIC
                   1462:                TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
                   1463:                        if (pi->pi_magic != PI_MAGIC) {
                   1464:                                (*pr)("\t\t\titem %p, magic 0x%x\n",
                   1465:                                    pi, pi->pi_magic);
                   1466:                        }
                   1467:                }
                   1468: #endif
                   1469:        }
                   1470: }
                   1471:
                   1472: static void
1.42      thorpej  1473: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3       pk       1474: {
1.25      thorpej  1475:        struct pool_item_header *ph;
1.44      thorpej  1476:        struct pool_cache *pc;
                   1477:        struct pool_cache_group *pcg;
                   1478:        int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25      thorpej  1479:        char c;
                   1480:
                   1481:        while ((c = *modif++) != '\0') {
                   1482:                if (c == 'l')
                   1483:                        print_log = 1;
                   1484:                if (c == 'p')
                   1485:                        print_pagelist = 1;
1.44      thorpej  1486:                if (c == 'c')
                   1487:                        print_cache = 1;
1.25      thorpej  1488:        }
                   1489:
                   1490:        (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
                   1491:            pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
                   1492:            pp->pr_roflags);
1.66      thorpej  1493:        (*pr)("\talloc %p\n", pp->pr_alloc);
1.25      thorpej  1494:        (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
                   1495:            pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
                   1496:        (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
                   1497:            pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
                   1498:
                   1499:        (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
                   1500:            pp->pr_nget, pp->pr_nfail, pp->pr_nput);
                   1501:        (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
                   1502:            pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
                   1503:
                   1504:        if (print_pagelist == 0)
                   1505:                goto skip_pagelist;
                   1506:
1.88      chs      1507:        if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
                   1508:                (*pr)("\n\tempty page list:\n");
                   1509:        pool_print_pagelist(&pp->pr_emptypages, pr);
                   1510:        if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
                   1511:                (*pr)("\n\tfull page list:\n");
                   1512:        pool_print_pagelist(&pp->pr_fullpages, pr);
                   1513:        if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
                   1514:                (*pr)("\n\tpartial-page list:\n");
                   1515:        pool_print_pagelist(&pp->pr_partpages, pr);
                   1516:
1.25      thorpej  1517:        if (pp->pr_curpage == NULL)
                   1518:                (*pr)("\tno current page\n");
                   1519:        else
                   1520:                (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
                   1521:
                   1522:  skip_pagelist:
                   1523:        if (print_log == 0)
                   1524:                goto skip_log;
                   1525:
                   1526:        (*pr)("\n");
                   1527:        if ((pp->pr_roflags & PR_LOGGING) == 0)
                   1528:                (*pr)("\tno log\n");
                   1529:        else
                   1530:                pr_printlog(pp, NULL, pr);
1.3       pk       1531:
1.25      thorpej  1532:  skip_log:
1.44      thorpej  1533:        if (print_cache == 0)
                   1534:                goto skip_cache;
                   1535:
1.61      chs      1536:        TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1.44      thorpej  1537:                (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
                   1538:                    pc->pc_allocfrom, pc->pc_freeto);
1.48      thorpej  1539:                (*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
                   1540:                    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1.61      chs      1541:                TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.44      thorpej  1542:                        (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1.87      thorpej  1543:                        for (i = 0; i < PCG_NOBJECTS; i++) {
                   1544:                                if (pcg->pcg_objects[i].pcgo_pa !=
                   1545:                                    POOL_PADDR_INVALID) {
                   1546:                                        (*pr)("\t\t\t%p, 0x%llx\n",
                   1547:                                            pcg->pcg_objects[i].pcgo_va,
                   1548:                                            (unsigned long long)
                   1549:                                            pcg->pcg_objects[i].pcgo_pa);
                   1550:                                } else {
                   1551:                                        (*pr)("\t\t\t%p\n",
                   1552:                                            pcg->pcg_objects[i].pcgo_va);
                   1553:                                }
                   1554:                        }
1.44      thorpej  1555:                }
                   1556:        }
                   1557:
                   1558:  skip_cache:
1.88      chs      1559:        pr_enter_check(pp, pr);
                   1560: }
                   1561:
                   1562: static int
                   1563: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
                   1564: {
                   1565:        struct pool_item *pi;
                   1566:        caddr_t page;
                   1567:        int n;
                   1568:
                   1569:        page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
                   1570:        if (page != ph->ph_page &&
                   1571:            (pp->pr_roflags & PR_PHINPAGE) != 0) {
                   1572:                if (label != NULL)
                   1573:                        printf("%s: ", label);
                   1574:                printf("pool(%p:%s): page inconsistency: page %p;"
                   1575:                       " at page head addr %p (p %p)\n", pp,
                   1576:                        pp->pr_wchan, ph->ph_page,
                   1577:                        ph, page);
                   1578:                return 1;
                   1579:        }
1.3       pk       1580:
1.88      chs      1581:        for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
                   1582:             pi != NULL;
                   1583:             pi = TAILQ_NEXT(pi,pi_list), n++) {
                   1584:
                   1585: #ifdef DIAGNOSTIC
                   1586:                if (pi->pi_magic != PI_MAGIC) {
                   1587:                        if (label != NULL)
                   1588:                                printf("%s: ", label);
                   1589:                        printf("pool(%s): free list modified: magic=%x;"
                   1590:                               " page %p; item ordinal %d;"
                   1591:                               " addr %p (p %p)\n",
                   1592:                                pp->pr_wchan, pi->pi_magic, ph->ph_page,
                   1593:                                n, pi, page);
                   1594:                        panic("pool");
                   1595:                }
                   1596: #endif
                   1597:                page =
                   1598:                    (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
                   1599:                if (page == ph->ph_page)
                   1600:                        continue;
                   1601:
                   1602:                if (label != NULL)
                   1603:                        printf("%s: ", label);
                   1604:                printf("pool(%p:%s): page inconsistency: page %p;"
                   1605:                       " item ordinal %d; addr %p (p %p)\n", pp,
                   1606:                        pp->pr_wchan, ph->ph_page,
                   1607:                        n, pi, page);
                   1608:                return 1;
                   1609:        }
                   1610:        return 0;
1.3       pk       1611: }
                   1612:
1.88      chs      1613:
1.3       pk       1614: int
1.42      thorpej  1615: pool_chk(struct pool *pp, const char *label)
1.3       pk       1616: {
                   1617:        struct pool_item_header *ph;
                   1618:        int r = 0;
                   1619:
1.21      thorpej  1620:        simple_lock(&pp->pr_slock);
1.88      chs      1621:        LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
                   1622:                r = pool_chk_page(pp, label, ph);
                   1623:                if (r) {
                   1624:                        goto out;
                   1625:                }
                   1626:        }
                   1627:        LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
                   1628:                r = pool_chk_page(pp, label, ph);
                   1629:                if (r) {
1.3       pk       1630:                        goto out;
                   1631:                }
1.88      chs      1632:        }
                   1633:        LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
                   1634:                r = pool_chk_page(pp, label, ph);
                   1635:                if (r) {
1.3       pk       1636:                        goto out;
                   1637:                }
                   1638:        }
1.88      chs      1639:
1.3       pk       1640: out:
1.21      thorpej  1641:        simple_unlock(&pp->pr_slock);
1.3       pk       1642:        return (r);
1.43      thorpej  1643: }
                   1644:
                   1645: /*
                   1646:  * pool_cache_init:
                   1647:  *
                   1648:  *     Initialize a pool cache.
                   1649:  *
                   1650:  *     NOTE: If the pool must be protected from interrupts, we expect
                   1651:  *     to be called at the appropriate interrupt priority level.
                   1652:  */
                   1653: void
                   1654: pool_cache_init(struct pool_cache *pc, struct pool *pp,
                   1655:     int (*ctor)(void *, void *, int),
                   1656:     void (*dtor)(void *, void *),
                   1657:     void *arg)
                   1658: {
                   1659:
                   1660:        TAILQ_INIT(&pc->pc_grouplist);
                   1661:        simple_lock_init(&pc->pc_slock);
                   1662:
                   1663:        pc->pc_allocfrom = NULL;
                   1664:        pc->pc_freeto = NULL;
                   1665:        pc->pc_pool = pp;
                   1666:
                   1667:        pc->pc_ctor = ctor;
                   1668:        pc->pc_dtor = dtor;
                   1669:        pc->pc_arg  = arg;
                   1670:
1.48      thorpej  1671:        pc->pc_hits   = 0;
                   1672:        pc->pc_misses = 0;
                   1673:
                   1674:        pc->pc_ngroups = 0;
                   1675:
                   1676:        pc->pc_nitems = 0;
                   1677:
1.43      thorpej  1678:        simple_lock(&pp->pr_slock);
                   1679:        TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
                   1680:        simple_unlock(&pp->pr_slock);
                   1681: }
                   1682:
                   1683: /*
                   1684:  * pool_cache_destroy:
                   1685:  *
                   1686:  *     Destroy a pool cache.
                   1687:  */
                   1688: void
                   1689: pool_cache_destroy(struct pool_cache *pc)
                   1690: {
                   1691:        struct pool *pp = pc->pc_pool;
                   1692:
                   1693:        /* First, invalidate the entire cache. */
                   1694:        pool_cache_invalidate(pc);
                   1695:
                   1696:        /* ...and remove it from the pool's cache list. */
                   1697:        simple_lock(&pp->pr_slock);
                   1698:        TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
                   1699:        simple_unlock(&pp->pr_slock);
                   1700: }
                   1701:
                   1702: static __inline void *
1.87      thorpej  1703: pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1.43      thorpej  1704: {
                   1705:        void *object;
                   1706:        u_int idx;
                   1707:
                   1708:        KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45      thorpej  1709:        KASSERT(pcg->pcg_avail != 0);
1.43      thorpej  1710:        idx = --pcg->pcg_avail;
                   1711:
1.87      thorpej  1712:        KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
                   1713:        object = pcg->pcg_objects[idx].pcgo_va;
                   1714:        if (pap != NULL)
                   1715:                *pap = pcg->pcg_objects[idx].pcgo_pa;
                   1716:        pcg->pcg_objects[idx].pcgo_va = NULL;
1.43      thorpej  1717:
                   1718:        return (object);
                   1719: }
                   1720:
                   1721: static __inline void
1.87      thorpej  1722: pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1.43      thorpej  1723: {
                   1724:        u_int idx;
                   1725:
                   1726:        KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
                   1727:        idx = pcg->pcg_avail++;
                   1728:
1.87      thorpej  1729:        KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
                   1730:        pcg->pcg_objects[idx].pcgo_va = object;
                   1731:        pcg->pcg_objects[idx].pcgo_pa = pa;
1.43      thorpej  1732: }
                   1733:
                   1734: /*
1.87      thorpej  1735:  * pool_cache_get{,_paddr}:
1.43      thorpej  1736:  *
1.87      thorpej  1737:  *     Get an object from a pool cache (optionally returning
                   1738:  *     the physical address of the object).
1.43      thorpej  1739:  */
                   1740: void *
1.87      thorpej  1741: pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1.43      thorpej  1742: {
                   1743:        struct pool_cache_group *pcg;
                   1744:        void *object;
1.58      thorpej  1745:
                   1746: #ifdef LOCKDEBUG
                   1747:        if (flags & PR_WAITOK)
                   1748:                simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
                   1749: #endif
1.43      thorpej  1750:
                   1751:        simple_lock(&pc->pc_slock);
                   1752:
                   1753:        if ((pcg = pc->pc_allocfrom) == NULL) {
1.61      chs      1754:                TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43      thorpej  1755:                        if (pcg->pcg_avail != 0) {
                   1756:                                pc->pc_allocfrom = pcg;
                   1757:                                goto have_group;
                   1758:                        }
                   1759:                }
                   1760:
                   1761:                /*
                   1762:                 * No groups with any available objects.  Allocate
                   1763:                 * a new object, construct it, and return it to
                   1764:                 * the caller.  We will allocate a group, if necessary,
                   1765:                 * when the object is freed back to the cache.
                   1766:                 */
1.48      thorpej  1767:                pc->pc_misses++;
1.43      thorpej  1768:                simple_unlock(&pc->pc_slock);
                   1769:                object = pool_get(pc->pc_pool, flags);
                   1770:                if (object != NULL && pc->pc_ctor != NULL) {
                   1771:                        if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
                   1772:                                pool_put(pc->pc_pool, object);
                   1773:                                return (NULL);
                   1774:                        }
                   1775:                }
1.87      thorpej  1776:                if (object != NULL && pap != NULL) {
                   1777: #ifdef POOL_VTOPHYS
                   1778:                        *pap = POOL_VTOPHYS(object);
                   1779: #else
                   1780:                        *pap = POOL_PADDR_INVALID;
                   1781: #endif
                   1782:                }
1.43      thorpej  1783:                return (object);
                   1784:        }
                   1785:
                   1786:  have_group:
1.48      thorpej  1787:        pc->pc_hits++;
                   1788:        pc->pc_nitems--;
1.87      thorpej  1789:        object = pcg_get(pcg, pap);
1.43      thorpej  1790:
                   1791:        if (pcg->pcg_avail == 0)
                   1792:                pc->pc_allocfrom = NULL;
1.45      thorpej  1793:
1.43      thorpej  1794:        simple_unlock(&pc->pc_slock);
                   1795:
                   1796:        return (object);
                   1797: }
                   1798:
                   1799: /*
1.87      thorpej  1800:  * pool_cache_put{,_paddr}:
1.43      thorpej  1801:  *
1.87      thorpej  1802:  *     Put an object back to the pool cache (optionally caching the
                   1803:  *     physical address of the object).
1.43      thorpej  1804:  */
                   1805: void
1.87      thorpej  1806: pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1.43      thorpej  1807: {
                   1808:        struct pool_cache_group *pcg;
1.60      thorpej  1809:        int s;
1.43      thorpej  1810:
                   1811:        simple_lock(&pc->pc_slock);
                   1812:
                   1813:        if ((pcg = pc->pc_freeto) == NULL) {
1.61      chs      1814:                TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43      thorpej  1815:                        if (pcg->pcg_avail != PCG_NOBJECTS) {
                   1816:                                pc->pc_freeto = pcg;
                   1817:                                goto have_group;
                   1818:                        }
                   1819:                }
                   1820:
                   1821:                /*
                   1822:                 * No empty groups to free the object to.  Attempt to
1.47      thorpej  1823:                 * allocate one.
1.43      thorpej  1824:                 */
1.47      thorpej  1825:                simple_unlock(&pc->pc_slock);
1.60      thorpej  1826:                s = splvm();
1.43      thorpej  1827:                pcg = pool_get(&pcgpool, PR_NOWAIT);
1.60      thorpej  1828:                splx(s);
1.43      thorpej  1829:                if (pcg != NULL) {
                   1830:                        memset(pcg, 0, sizeof(*pcg));
1.47      thorpej  1831:                        simple_lock(&pc->pc_slock);
1.48      thorpej  1832:                        pc->pc_ngroups++;
1.43      thorpej  1833:                        TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1.47      thorpej  1834:                        if (pc->pc_freeto == NULL)
                   1835:                                pc->pc_freeto = pcg;
1.43      thorpej  1836:                        goto have_group;
                   1837:                }
                   1838:
                   1839:                /*
                   1840:                 * Unable to allocate a cache group; destruct the object
                   1841:                 * and free it back to the pool.
                   1842:                 */
1.51      thorpej  1843:                pool_cache_destruct_object(pc, object);
1.43      thorpej  1844:                return;
                   1845:        }
                   1846:
                   1847:  have_group:
1.48      thorpej  1848:        pc->pc_nitems++;
1.87      thorpej  1849:        pcg_put(pcg, object, pa);
1.43      thorpej  1850:
                   1851:        if (pcg->pcg_avail == PCG_NOBJECTS)
                   1852:                pc->pc_freeto = NULL;
                   1853:
                   1854:        simple_unlock(&pc->pc_slock);
1.51      thorpej  1855: }
                   1856:
                   1857: /*
                   1858:  * pool_cache_destruct_object:
                   1859:  *
                   1860:  *     Force destruction of an object and its release back into
                   1861:  *     the pool.
                   1862:  */
                   1863: void
                   1864: pool_cache_destruct_object(struct pool_cache *pc, void *object)
                   1865: {
                   1866:
                   1867:        if (pc->pc_dtor != NULL)
                   1868:                (*pc->pc_dtor)(pc->pc_arg, object);
                   1869:        pool_put(pc->pc_pool, object);
1.43      thorpej  1870: }
                   1871:
                   1872: /*
                   1873:  * pool_cache_do_invalidate:
                   1874:  *
                   1875:  *     This internal function implements pool_cache_invalidate() and
                   1876:  *     pool_cache_reclaim().
                   1877:  */
                   1878: static void
                   1879: pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1.56      sommerfe 1880:     void (*putit)(struct pool *, void *))
1.43      thorpej  1881: {
                   1882:        struct pool_cache_group *pcg, *npcg;
                   1883:        void *object;
1.60      thorpej  1884:        int s;
1.43      thorpej  1885:
                   1886:        for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
                   1887:             pcg = npcg) {
                   1888:                npcg = TAILQ_NEXT(pcg, pcg_list);
                   1889:                while (pcg->pcg_avail != 0) {
1.48      thorpej  1890:                        pc->pc_nitems--;
1.87      thorpej  1891:                        object = pcg_get(pcg, NULL);
1.45      thorpej  1892:                        if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
                   1893:                                pc->pc_allocfrom = NULL;
1.43      thorpej  1894:                        if (pc->pc_dtor != NULL)
                   1895:                                (*pc->pc_dtor)(pc->pc_arg, object);
1.56      sommerfe 1896:                        (*putit)(pc->pc_pool, object);
1.43      thorpej  1897:                }
                   1898:                if (free_groups) {
1.48      thorpej  1899:                        pc->pc_ngroups--;
1.43      thorpej  1900:                        TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1.46      thorpej  1901:                        if (pc->pc_freeto == pcg)
                   1902:                                pc->pc_freeto = NULL;
1.60      thorpej  1903:                        s = splvm();
1.43      thorpej  1904:                        pool_put(&pcgpool, pcg);
1.60      thorpej  1905:                        splx(s);
1.43      thorpej  1906:                }
                   1907:        }
                   1908: }
                   1909:
                   1910: /*
                   1911:  * pool_cache_invalidate:
                   1912:  *
                   1913:  *     Invalidate a pool cache (destruct and release all of the
                   1914:  *     cached objects).
                   1915:  */
                   1916: void
                   1917: pool_cache_invalidate(struct pool_cache *pc)
                   1918: {
                   1919:
                   1920:        simple_lock(&pc->pc_slock);
1.56      sommerfe 1921:        pool_cache_do_invalidate(pc, 0, pool_put);
1.43      thorpej  1922:        simple_unlock(&pc->pc_slock);
                   1923: }
                   1924:
                   1925: /*
                   1926:  * pool_cache_reclaim:
                   1927:  *
                   1928:  *     Reclaim a pool cache for pool_reclaim().
                   1929:  */
                   1930: static void
                   1931: pool_cache_reclaim(struct pool_cache *pc)
                   1932: {
                   1933:
1.47      thorpej  1934:        simple_lock(&pc->pc_slock);
1.43      thorpej  1935:        pool_cache_do_invalidate(pc, 1, pool_do_put);
                   1936:        simple_unlock(&pc->pc_slock);
1.3       pk       1937: }
1.66      thorpej  1938:
                   1939: /*
                   1940:  * Pool backend allocators.
                   1941:  *
                   1942:  * Each pool has a backend allocator that handles allocation, deallocation,
                   1943:  * and any additional draining that might be needed.
                   1944:  *
                   1945:  * We provide two standard allocators:
                   1946:  *
                   1947:  *     pool_allocator_kmem - the default when no allocator is specified
                   1948:  *
                   1949:  *     pool_allocator_nointr - used for pools that will not be accessed
                   1950:  *     in interrupt context.
                   1951:  */
                   1952: void   *pool_page_alloc(struct pool *, int);
                   1953: void   pool_page_free(struct pool *, void *);
                   1954:
                   1955: struct pool_allocator pool_allocator_kmem = {
                   1956:        pool_page_alloc, pool_page_free, 0,
                   1957: };
                   1958:
                   1959: void   *pool_page_alloc_nointr(struct pool *, int);
                   1960: void   pool_page_free_nointr(struct pool *, void *);
                   1961:
                   1962: struct pool_allocator pool_allocator_nointr = {
                   1963:        pool_page_alloc_nointr, pool_page_free_nointr, 0,
                   1964: };
                   1965:
                   1966: #ifdef POOL_SUBPAGE
                   1967: void   *pool_subpage_alloc(struct pool *, int);
                   1968: void   pool_subpage_free(struct pool *, void *);
                   1969:
                   1970: struct pool_allocator pool_allocator_kmem_subpage = {
                   1971:        pool_subpage_alloc, pool_subpage_free, 0,
                   1972: };
                   1973: #endif /* POOL_SUBPAGE */
                   1974:
                   1975: /*
                   1976:  * We have at least three different resources for the same allocation and
                   1977:  * each resource can be depleted.  First, we have the ready elements in the
                   1978:  * pool.  Then we have the resource (typically a vm_map) for this allocator.
                   1979:  * Finally, we have physical memory.  Waiting for any of these can be
                   1980:  * unnecessary when any other is freed, but the kernel doesn't support
                   1981:  * sleeping on multiple wait channels, so we have to employ another strategy.
                   1982:  *
                   1983:  * The caller sleeps on the pool (so that it can be awakened when an item
                   1984:  * is returned to the pool), but we set PA_WANT on the allocator.  When a
                   1985:  * page is returned to the allocator and PA_WANT is set, pool_allocator_free
                   1986:  * will wake up all sleeping pools belonging to this allocator.
                   1987:  *
                   1988:  * XXX Thundering herd.
                   1989:  */
                   1990: void *
                   1991: pool_allocator_alloc(struct pool *org, int flags)
                   1992: {
                   1993:        struct pool_allocator *pa = org->pr_alloc;
                   1994:        struct pool *pp, *start;
                   1995:        int s, freed;
                   1996:        void *res;
                   1997:
1.91      yamt     1998:        LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
                   1999:
1.66      thorpej  2000:        do {
                   2001:                if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
                   2002:                        return (res);
1.68      thorpej  2003:                if ((flags & PR_WAITOK) == 0) {
                   2004:                        /*
                   2005:                         * We only run the drain hookhere if PR_NOWAIT.
                   2006:                         * In other cases, the hook will be run in
                   2007:                         * pool_reclaim().
                   2008:                         */
                   2009:                        if (org->pr_drain_hook != NULL) {
                   2010:                                (*org->pr_drain_hook)(org->pr_drain_hook_arg,
                   2011:                                    flags);
                   2012:                                if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
                   2013:                                        return (res);
                   2014:                        }
1.66      thorpej  2015:                        break;
1.68      thorpej  2016:                }
1.66      thorpej  2017:
                   2018:                /*
                   2019:                 * Drain all pools, except "org", that use this
                   2020:                 * allocator.  We do this to reclaim VA space.
                   2021:                 * pa_alloc is responsible for waiting for
                   2022:                 * physical memory.
                   2023:                 *
                   2024:                 * XXX We risk looping forever if start if someone
                   2025:                 * calls pool_destroy on "start".  But there is no
                   2026:                 * other way to have potentially sleeping pool_reclaim,
                   2027:                 * non-sleeping locks on pool_allocator, and some
                   2028:                 * stirring of drained pools in the allocator.
1.68      thorpej  2029:                 *
                   2030:                 * XXX Maybe we should use pool_head_slock for locking
                   2031:                 * the allocators?
1.66      thorpej  2032:                 */
                   2033:                freed = 0;
                   2034:
                   2035:                s = splvm();
                   2036:                simple_lock(&pa->pa_slock);
                   2037:                pp = start = TAILQ_FIRST(&pa->pa_list);
                   2038:                do {
                   2039:                        TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
                   2040:                        TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
                   2041:                        if (pp == org)
                   2042:                                continue;
1.73      thorpej  2043:                        simple_unlock(&pa->pa_slock);
1.66      thorpej  2044:                        freed = pool_reclaim(pp);
1.73      thorpej  2045:                        simple_lock(&pa->pa_slock);
1.66      thorpej  2046:                } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
                   2047:                         freed == 0);
                   2048:
                   2049:                if (freed == 0) {
                   2050:                        /*
                   2051:                         * We set PA_WANT here, the caller will most likely
                   2052:                         * sleep waiting for pages (if not, this won't hurt
                   2053:                         * that much), and there is no way to set this in
                   2054:                         * the caller without violating locking order.
                   2055:                         */
                   2056:                        pa->pa_flags |= PA_WANT;
                   2057:                }
                   2058:                simple_unlock(&pa->pa_slock);
                   2059:                splx(s);
                   2060:        } while (freed);
                   2061:        return (NULL);
                   2062: }
                   2063:
                   2064: void
                   2065: pool_allocator_free(struct pool *pp, void *v)
                   2066: {
                   2067:        struct pool_allocator *pa = pp->pr_alloc;
                   2068:        int s;
                   2069:
1.91      yamt     2070:        LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
                   2071:
1.66      thorpej  2072:        (*pa->pa_free)(pp, v);
                   2073:
                   2074:        s = splvm();
                   2075:        simple_lock(&pa->pa_slock);
                   2076:        if ((pa->pa_flags & PA_WANT) == 0) {
                   2077:                simple_unlock(&pa->pa_slock);
                   2078:                splx(s);
                   2079:                return;
                   2080:        }
                   2081:
                   2082:        TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
                   2083:                simple_lock(&pp->pr_slock);
                   2084:                if ((pp->pr_flags & PR_WANTED) != 0) {
                   2085:                        pp->pr_flags &= ~PR_WANTED;
                   2086:                        wakeup(pp);
                   2087:                }
1.69      thorpej  2088:                simple_unlock(&pp->pr_slock);
1.66      thorpej  2089:        }
                   2090:        pa->pa_flags &= ~PA_WANT;
                   2091:        simple_unlock(&pa->pa_slock);
                   2092:        splx(s);
                   2093: }
                   2094:
                   2095: void *
                   2096: pool_page_alloc(struct pool *pp, int flags)
                   2097: {
                   2098:        boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
                   2099:
                   2100:        return ((void *) uvm_km_alloc_poolpage(waitok));
                   2101: }
                   2102:
                   2103: void
                   2104: pool_page_free(struct pool *pp, void *v)
                   2105: {
                   2106:
                   2107:        uvm_km_free_poolpage((vaddr_t) v);
                   2108: }
                   2109:
                   2110: #ifdef POOL_SUBPAGE
                   2111: /* Sub-page allocator, for machines with large hardware pages. */
                   2112: void *
                   2113: pool_subpage_alloc(struct pool *pp, int flags)
                   2114: {
1.93      dbj      2115:        void *v;
                   2116:        int s;
                   2117:        s = splvm();
                   2118:        v = pool_get(&psppool, flags);
                   2119:        splx(s);
                   2120:        return v;
1.66      thorpej  2121: }
                   2122:
                   2123: void
                   2124: pool_subpage_free(struct pool *pp, void *v)
                   2125: {
1.93      dbj      2126:        int s;
                   2127:        s = splvm();
1.66      thorpej  2128:        pool_put(&psppool, v);
1.93      dbj      2129:        splx(s);
1.66      thorpej  2130: }
                   2131:
                   2132: /* We don't provide a real nointr allocator.  Maybe later. */
                   2133: void *
                   2134: pool_page_alloc_nointr(struct pool *pp, int flags)
                   2135: {
                   2136:
                   2137:        return (pool_subpage_alloc(pp, flags));
                   2138: }
                   2139:
                   2140: void
                   2141: pool_page_free_nointr(struct pool *pp, void *v)
                   2142: {
                   2143:
                   2144:        pool_subpage_free(pp, v);
                   2145: }
                   2146: #else
                   2147: void *
                   2148: pool_page_alloc_nointr(struct pool *pp, int flags)
                   2149: {
                   2150:        boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
                   2151:
                   2152:        return ((void *) uvm_km_alloc_poolpage1(kernel_map,
                   2153:            uvm.kernel_object, waitok));
                   2154: }
                   2155:
                   2156: void
                   2157: pool_page_free_nointr(struct pool *pp, void *v)
                   2158: {
                   2159:
                   2160:        uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
                   2161: }
                   2162: #endif /* POOL_SUBPAGE */

CVSweb <webmaster@jp.NetBSD.org>