Annotation of src/sys/kern/subr_pool.c, Revision 1.199
1.199 ! christos 1: /* $NetBSD: subr_pool.c,v 1.198 2012/08/28 15:52:19 christos Exp $ */
1.1 pk 2:
3: /*-
1.183 ad 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010
5: * The NetBSD Foundation, Inc.
1.1 pk 6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 9: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.134 ad 10: * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
1.1 pk 11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: *
21: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31: * POSSIBILITY OF SUCH DAMAGE.
32: */
1.64 lukem 33:
34: #include <sys/cdefs.h>
1.199 ! christos 35: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.198 2012/08/28 15:52:19 christos Exp $");
1.24 scottr 36:
1.141 yamt 37: #include "opt_ddb.h"
1.28 thorpej 38: #include "opt_lockdebug.h"
1.1 pk 39:
40: #include <sys/param.h>
41: #include <sys/systm.h>
1.135 yamt 42: #include <sys/bitops.h>
1.1 pk 43: #include <sys/proc.h>
44: #include <sys/errno.h>
45: #include <sys/kernel.h>
1.191 para 46: #include <sys/vmem.h>
1.1 pk 47: #include <sys/pool.h>
1.20 thorpej 48: #include <sys/syslog.h>
1.125 ad 49: #include <sys/debug.h>
1.134 ad 50: #include <sys/lockdebug.h>
51: #include <sys/xcall.h>
52: #include <sys/cpu.h>
1.145 ad 53: #include <sys/atomic.h>
1.3 pk 54:
1.187 uebayasi 55: #include <uvm/uvm_extern.h>
1.3 pk 56:
1.1 pk 57: /*
58: * Pool resource management utility.
1.3 pk 59: *
1.88 chs 60: * Memory is allocated in pages which are split into pieces according to
61: * the pool item size. Each page is kept on one of three lists in the
62: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
63: * for empty, full and partially-full pages respectively. The individual
64: * pool items are on a linked list headed by `ph_itemlist' in each page
65: * header. The memory for building the page list is either taken from
66: * the allocated pages themselves (for small pool items) or taken from
67: * an internal pool of page headers (`phpool').
1.1 pk 68: */
69:
1.3 pk 70: /* List of all pools */
1.173 rmind 71: static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.134 ad 72:
1.3 pk 73: /* Private pool for page header structures */
1.97 yamt 74: #define PHPOOL_MAX 8
75: static struct pool phpool[PHPOOL_MAX];
1.135 yamt 76: #define PHPOOL_FREELIST_NELEM(idx) \
77: (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
1.3 pk 78:
1.62 bjh21 79: #ifdef POOL_SUBPAGE
80: /* Pool of subpages for use by normal pools. */
81: static struct pool psppool;
82: #endif
83:
1.98 yamt 84: static void *pool_page_alloc_meta(struct pool *, int);
85: static void pool_page_free_meta(struct pool *, void *);
86:
87: /* allocator for pool metadata */
1.134 ad 88: struct pool_allocator pool_allocator_meta = {
1.191 para 89: .pa_alloc = pool_page_alloc_meta,
90: .pa_free = pool_page_free_meta,
91: .pa_pagesz = 0
1.98 yamt 92: };
93:
1.3 pk 94: /* # of seconds to retain page after last use */
95: int pool_inactive_time = 10;
96:
97: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 98: static struct pool *drainpp;
99:
1.134 ad 100: /* This lock protects both pool_head and drainpp. */
101: static kmutex_t pool_head_lock;
102: static kcondvar_t pool_busy;
1.3 pk 103:
1.178 elad 104: /* This lock protects initialization of a potentially shared pool allocator */
105: static kmutex_t pool_allocator_lock;
106:
1.135 yamt 107: typedef uint32_t pool_item_bitmap_t;
108: #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
109: #define BITMAP_MASK (BITMAP_SIZE - 1)
1.99 yamt 110:
1.3 pk 111: struct pool_item_header {
112: /* Page headers */
1.88 chs 113: LIST_ENTRY(pool_item_header)
1.3 pk 114: ph_pagelist; /* pool page list */
1.88 chs 115: SPLAY_ENTRY(pool_item_header)
116: ph_node; /* Off-page page headers */
1.128 christos 117: void * ph_page; /* this page's address */
1.151 yamt 118: uint32_t ph_time; /* last referenced */
1.135 yamt 119: uint16_t ph_nmissing; /* # of chunks in use */
1.141 yamt 120: uint16_t ph_off; /* start offset in page */
1.97 yamt 121: union {
122: /* !PR_NOTOUCH */
123: struct {
1.102 chs 124: LIST_HEAD(, pool_item)
1.97 yamt 125: phu_itemlist; /* chunk list for this page */
126: } phu_normal;
127: /* PR_NOTOUCH */
128: struct {
1.141 yamt 129: pool_item_bitmap_t phu_bitmap[1];
1.97 yamt 130: } phu_notouch;
131: } ph_u;
1.3 pk 132: };
1.97 yamt 133: #define ph_itemlist ph_u.phu_normal.phu_itemlist
1.135 yamt 134: #define ph_bitmap ph_u.phu_notouch.phu_bitmap
1.3 pk 135:
1.1 pk 136: struct pool_item {
1.3 pk 137: #ifdef DIAGNOSTIC
1.82 thorpej 138: u_int pi_magic;
1.33 chs 139: #endif
1.134 ad 140: #define PI_MAGIC 0xdeaddeadU
1.3 pk 141: /* Other entries use only this list entry */
1.102 chs 142: LIST_ENTRY(pool_item) pi_list;
1.3 pk 143: };
144:
1.53 thorpej 145: #define POOL_NEEDS_CATCHUP(pp) \
146: ((pp)->pr_nitems < (pp)->pr_minitems)
147:
1.43 thorpej 148: /*
149: * Pool cache management.
150: *
151: * Pool caches provide a way for constructed objects to be cached by the
152: * pool subsystem. This can lead to performance improvements by avoiding
153: * needless object construction/destruction; it is deferred until absolutely
154: * necessary.
155: *
1.134 ad 156: * Caches are grouped into cache groups. Each cache group references up
157: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
158: * object from the pool, it calls the object's constructor and places it
159: * into a cache group. When a cache group frees an object back to the
160: * pool, it first calls the object's destructor. This allows the object
161: * to persist in constructed form while freed to the cache.
162: *
163: * The pool references each cache, so that when a pool is drained by the
164: * pagedaemon, it can drain each individual cache as well. Each time a
165: * cache is drained, the most idle cache group is freed to the pool in
166: * its entirety.
1.43 thorpej 167: *
168: * Pool caches are layed on top of pools. By layering them, we can avoid
169: * the complexity of cache management for pools which would not benefit
170: * from it.
171: */
172:
1.142 ad 173: static struct pool pcg_normal_pool;
174: static struct pool pcg_large_pool;
1.134 ad 175: static struct pool cache_pool;
176: static struct pool cache_cpu_pool;
1.3 pk 177:
1.189 pooka 178: pool_cache_t pnbuf_cache; /* pathname buffer cache */
179:
1.145 ad 180: /* List of all caches. */
181: TAILQ_HEAD(,pool_cache) pool_cache_head =
182: TAILQ_HEAD_INITIALIZER(pool_cache_head);
183:
1.162 ad 184: int pool_cache_disable; /* global disable for caching */
1.169 yamt 185: static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */
1.145 ad 186:
1.162 ad 187: static bool pool_cache_put_slow(pool_cache_cpu_t *, int,
188: void *);
189: static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
190: void **, paddr_t *, int);
1.134 ad 191: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
192: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
1.175 jym 193: static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
1.196 jym 194: static void pool_cache_transfer(pool_cache_t);
1.3 pk 195:
1.42 thorpej 196: static int pool_catchup(struct pool *);
1.128 christos 197: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 198: struct pool_item_header *);
1.88 chs 199: static void pool_update_curpage(struct pool *);
1.66 thorpej 200:
1.113 yamt 201: static int pool_grow(struct pool *, int);
1.117 yamt 202: static void *pool_allocator_alloc(struct pool *, int);
203: static void pool_allocator_free(struct pool *, void *);
1.3 pk 204:
1.97 yamt 205: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.199 ! christos 206: void (*)(const char *, ...) __printflike(1, 2));
1.42 thorpej 207: static void pool_print1(struct pool *, const char *,
1.199 ! christos 208: void (*)(const char *, ...) __printflike(1, 2));
1.3 pk 209:
1.88 chs 210: static int pool_chk_page(struct pool *, const char *,
211: struct pool_item_header *);
212:
1.135 yamt 213: static inline unsigned int
1.97 yamt 214: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
215: const void *v)
216: {
217: const char *cp = v;
1.135 yamt 218: unsigned int idx;
1.97 yamt 219:
220: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 221: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 222: KASSERT(idx < pp->pr_itemsperpage);
223: return idx;
224: }
225:
1.110 perry 226: static inline void
1.97 yamt 227: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
228: void *obj)
229: {
1.135 yamt 230: unsigned int idx = pr_item_notouch_index(pp, ph, obj);
231: pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
232: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
1.97 yamt 233:
1.135 yamt 234: KASSERT((*bitmap & mask) == 0);
235: *bitmap |= mask;
1.97 yamt 236: }
237:
1.110 perry 238: static inline void *
1.97 yamt 239: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
240: {
1.135 yamt 241: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
242: unsigned int idx;
243: int i;
1.97 yamt 244:
1.135 yamt 245: for (i = 0; ; i++) {
246: int bit;
1.97 yamt 247:
1.135 yamt 248: KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
249: bit = ffs32(bitmap[i]);
250: if (bit) {
251: pool_item_bitmap_t mask;
252:
253: bit--;
254: idx = (i * BITMAP_SIZE) + bit;
255: mask = 1 << bit;
256: KASSERT((bitmap[i] & mask) != 0);
257: bitmap[i] &= ~mask;
258: break;
259: }
260: }
261: KASSERT(idx < pp->pr_itemsperpage);
1.128 christos 262: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 263: }
264:
1.135 yamt 265: static inline void
1.141 yamt 266: pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
1.135 yamt 267: {
268: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
269: const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
270: int i;
271:
272: for (i = 0; i < n; i++) {
273: bitmap[i] = (pool_item_bitmap_t)-1;
274: }
275: }
276:
1.110 perry 277: static inline int
1.88 chs 278: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
279: {
1.121 yamt 280:
281: /*
282: * we consider pool_item_header with smaller ph_page bigger.
283: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
284: */
285:
1.88 chs 286: if (a->ph_page < b->ph_page)
1.121 yamt 287: return (1);
288: else if (a->ph_page > b->ph_page)
1.88 chs 289: return (-1);
290: else
291: return (0);
292: }
293:
294: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
295: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
296:
1.141 yamt 297: static inline struct pool_item_header *
298: pr_find_pagehead_noalign(struct pool *pp, void *v)
299: {
300: struct pool_item_header *ph, tmp;
301:
302: tmp.ph_page = (void *)(uintptr_t)v;
303: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
304: if (ph == NULL) {
305: ph = SPLAY_ROOT(&pp->pr_phtree);
306: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
307: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
308: }
309: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
310: }
311:
312: return ph;
313: }
314:
1.3 pk 315: /*
1.121 yamt 316: * Return the pool page header based on item address.
1.3 pk 317: */
1.110 perry 318: static inline struct pool_item_header *
1.121 yamt 319: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 320: {
1.88 chs 321: struct pool_item_header *ph, tmp;
1.3 pk 322:
1.121 yamt 323: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.141 yamt 324: ph = pr_find_pagehead_noalign(pp, v);
1.121 yamt 325: } else {
1.128 christos 326: void *page =
327: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 328:
329: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 330: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 331: } else {
332: tmp.ph_page = page;
333: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
334: }
335: }
1.3 pk 336:
1.121 yamt 337: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 338: ((char *)ph->ph_page <= (char *)v &&
339: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 340: return ph;
1.3 pk 341: }
342:
1.101 thorpej 343: static void
344: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
345: {
346: struct pool_item_header *ph;
347:
348: while ((ph = LIST_FIRST(pq)) != NULL) {
349: LIST_REMOVE(ph, ph_pagelist);
350: pool_allocator_free(pp, ph->ph_page);
1.134 ad 351: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 352: pool_put(pp->pr_phpool, ph);
353: }
354: }
355:
1.3 pk 356: /*
357: * Remove a page from the pool.
358: */
1.110 perry 359: static inline void
1.61 chs 360: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
361: struct pool_pagelist *pq)
1.3 pk 362: {
363:
1.134 ad 364: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 365:
1.3 pk 366: /*
1.7 thorpej 367: * If the page was idle, decrement the idle page count.
1.3 pk 368: */
1.6 thorpej 369: if (ph->ph_nmissing == 0) {
370: #ifdef DIAGNOSTIC
371: if (pp->pr_nidle == 0)
372: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 373: if (pp->pr_nitems < pp->pr_itemsperpage)
374: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 375: #endif
376: pp->pr_nidle--;
377: }
1.7 thorpej 378:
1.20 thorpej 379: pp->pr_nitems -= pp->pr_itemsperpage;
380:
1.7 thorpej 381: /*
1.101 thorpej 382: * Unlink the page from the pool and queue it for release.
1.7 thorpej 383: */
1.88 chs 384: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 385: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
386: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 387: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
388:
1.7 thorpej 389: pp->pr_npages--;
390: pp->pr_npagefree++;
1.6 thorpej 391:
1.88 chs 392: pool_update_curpage(pp);
1.3 pk 393: }
394:
395: /*
1.94 simonb 396: * Initialize all the pools listed in the "pools" link set.
397: */
398: void
1.117 yamt 399: pool_subsystem_init(void)
1.94 simonb 400: {
1.192 rmind 401: size_t size;
1.191 para 402: int idx;
1.94 simonb 403:
1.134 ad 404: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
1.179 mlelstv 405: mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
1.134 ad 406: cv_init(&pool_busy, "poolbusy");
407:
1.191 para 408: /*
409: * Initialize private page header pool and cache magazine pool if we
410: * haven't done so yet.
411: */
412: for (idx = 0; idx < PHPOOL_MAX; idx++) {
413: static char phpool_names[PHPOOL_MAX][6+1+6+1];
414: int nelem;
415: size_t sz;
416:
417: nelem = PHPOOL_FREELIST_NELEM(idx);
418: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
419: "phpool-%d", nelem);
420: sz = sizeof(struct pool_item_header);
421: if (nelem) {
422: sz = offsetof(struct pool_item_header,
423: ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
424: }
425: pool_init(&phpool[idx], sz, 0, 0, 0,
426: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.117 yamt 427: }
1.191 para 428: #ifdef POOL_SUBPAGE
429: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
430: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
431: #endif
432:
433: size = sizeof(pcg_t) +
434: (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
435: pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
436: "pcgnormal", &pool_allocator_meta, IPL_VM);
437:
438: size = sizeof(pcg_t) +
439: (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
440: pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
441: "pcglarge", &pool_allocator_meta, IPL_VM);
1.134 ad 442:
1.156 ad 443: pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
1.191 para 444: 0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
1.134 ad 445:
1.156 ad 446: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
1.191 para 447: 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
1.94 simonb 448: }
449:
450: /*
1.3 pk 451: * Initialize the given pool resource structure.
452: *
453: * We export this routine to allow other kernel parts to declare
1.195 rmind 454: * static pools that must be initialized before kmem(9) is available.
1.3 pk 455: */
456: void
1.42 thorpej 457: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.129 ad 458: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 459: {
1.116 simonb 460: struct pool *pp1;
1.92 enami 461: size_t trysize, phsize;
1.134 ad 462: int off, slack;
1.3 pk 463:
1.116 simonb 464: #ifdef DEBUG
1.198 christos 465: if (__predict_true(!cold))
466: mutex_enter(&pool_head_lock);
1.116 simonb 467: /*
468: * Check that the pool hasn't already been initialised and
469: * added to the list of all pools.
470: */
1.145 ad 471: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
1.116 simonb 472: if (pp == pp1)
473: panic("pool_init: pool %s already initialised",
474: wchan);
475: }
1.198 christos 476: if (__predict_true(!cold))
477: mutex_exit(&pool_head_lock);
1.116 simonb 478: #endif
479:
1.66 thorpej 480: if (palloc == NULL)
481: palloc = &pool_allocator_kmem;
1.112 bjh21 482: #ifdef POOL_SUBPAGE
483: if (size > palloc->pa_pagesz) {
484: if (palloc == &pool_allocator_kmem)
485: palloc = &pool_allocator_kmem_fullpage;
486: else if (palloc == &pool_allocator_nointr)
487: palloc = &pool_allocator_nointr_fullpage;
488: }
1.66 thorpej 489: #endif /* POOL_SUBPAGE */
1.180 mlelstv 490: if (!cold)
491: mutex_enter(&pool_allocator_lock);
1.178 elad 492: if (palloc->pa_refcnt++ == 0) {
1.112 bjh21 493: if (palloc->pa_pagesz == 0)
1.66 thorpej 494: palloc->pa_pagesz = PAGE_SIZE;
495:
496: TAILQ_INIT(&palloc->pa_list);
497:
1.134 ad 498: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 499: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
500: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.4 thorpej 501: }
1.180 mlelstv 502: if (!cold)
503: mutex_exit(&pool_allocator_lock);
1.3 pk 504:
505: if (align == 0)
506: align = ALIGN(1);
1.14 thorpej 507:
1.120 yamt 508: if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
1.14 thorpej 509: size = sizeof(struct pool_item);
1.3 pk 510:
1.78 thorpej 511: size = roundup(size, align);
1.66 thorpej 512: #ifdef DIAGNOSTIC
513: if (size > palloc->pa_pagesz)
1.121 yamt 514: panic("pool_init: pool item size (%zu) too large", size);
1.66 thorpej 515: #endif
1.35 pk 516:
1.3 pk 517: /*
518: * Initialize the pool structure.
519: */
1.88 chs 520: LIST_INIT(&pp->pr_emptypages);
521: LIST_INIT(&pp->pr_fullpages);
522: LIST_INIT(&pp->pr_partpages);
1.134 ad 523: pp->pr_cache = NULL;
1.3 pk 524: pp->pr_curpage = NULL;
525: pp->pr_npages = 0;
526: pp->pr_minitems = 0;
527: pp->pr_minpages = 0;
528: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 529: pp->pr_roflags = flags;
530: pp->pr_flags = 0;
1.35 pk 531: pp->pr_size = size;
1.3 pk 532: pp->pr_align = align;
533: pp->pr_wchan = wchan;
1.66 thorpej 534: pp->pr_alloc = palloc;
1.20 thorpej 535: pp->pr_nitems = 0;
536: pp->pr_nout = 0;
537: pp->pr_hardlimit = UINT_MAX;
538: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 539: pp->pr_hardlimit_ratecap.tv_sec = 0;
540: pp->pr_hardlimit_ratecap.tv_usec = 0;
541: pp->pr_hardlimit_warning_last.tv_sec = 0;
542: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 543: pp->pr_drain_hook = NULL;
544: pp->pr_drain_hook_arg = NULL;
1.125 ad 545: pp->pr_freecheck = NULL;
1.3 pk 546:
547: /*
548: * Decide whether to put the page header off page to avoid
1.92 enami 549: * wasting too large a part of the page or too big item.
550: * Off-page page headers go on a hash table, so we can match
551: * a returned item with its header based on the page address.
552: * We use 1/16 of the page size and about 8 times of the item
553: * size as the threshold (XXX: tune)
554: *
555: * However, we'll put the header into the page if we can put
556: * it without wasting any items.
557: *
558: * Silently enforce `0 <= ioff < align'.
1.3 pk 559: */
1.92 enami 560: pp->pr_itemoffset = ioff %= align;
561: /* See the comment below about reserved bytes. */
562: trysize = palloc->pa_pagesz - ((align - ioff) % align);
563: phsize = ALIGN(sizeof(struct pool_item_header));
1.121 yamt 564: if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 565: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
566: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 567: /* Use the end of the page for the page header */
1.20 thorpej 568: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 569: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 570: } else {
1.3 pk 571: /* The page header will be taken from our page header pool */
572: pp->pr_phoffset = 0;
1.66 thorpej 573: off = palloc->pa_pagesz;
1.88 chs 574: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 575: }
1.1 pk 576:
1.3 pk 577: /*
578: * Alignment is to take place at `ioff' within the item. This means
579: * we must reserve up to `align - 1' bytes on the page to allow
580: * appropriate positioning of each item.
581: */
582: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 583: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 584: if ((pp->pr_roflags & PR_NOTOUCH)) {
585: int idx;
586:
587: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
588: idx++) {
589: /* nothing */
590: }
591: if (idx >= PHPOOL_MAX) {
592: /*
593: * if you see this panic, consider to tweak
594: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
595: */
596: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
597: pp->pr_wchan, pp->pr_itemsperpage);
598: }
599: pp->pr_phpool = &phpool[idx];
600: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
601: pp->pr_phpool = &phpool[0];
602: }
603: #if defined(DIAGNOSTIC)
604: else {
605: pp->pr_phpool = NULL;
606: }
607: #endif
1.3 pk 608:
609: /*
610: * Use the slack between the chunks and the page header
611: * for "cache coloring".
612: */
613: slack = off - pp->pr_itemsperpage * pp->pr_size;
614: pp->pr_maxcolor = (slack / align) * align;
615: pp->pr_curcolor = 0;
616:
617: pp->pr_nget = 0;
618: pp->pr_nfail = 0;
619: pp->pr_nput = 0;
620: pp->pr_npagealloc = 0;
621: pp->pr_npagefree = 0;
1.1 pk 622: pp->pr_hiwat = 0;
1.8 thorpej 623: pp->pr_nidle = 0;
1.134 ad 624: pp->pr_refcnt = 0;
1.3 pk 625:
1.157 ad 626: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
1.134 ad 627: cv_init(&pp->pr_cv, wchan);
628: pp->pr_ipl = ipl;
1.1 pk 629:
1.145 ad 630: /* Insert into the list of all pools. */
1.181 mlelstv 631: if (!cold)
1.134 ad 632: mutex_enter(&pool_head_lock);
1.145 ad 633: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
634: if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
635: break;
636: }
637: if (pp1 == NULL)
638: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
639: else
640: TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
1.181 mlelstv 641: if (!cold)
1.134 ad 642: mutex_exit(&pool_head_lock);
643:
1.167 skrll 644: /* Insert this into the list of pools using this allocator. */
1.181 mlelstv 645: if (!cold)
1.134 ad 646: mutex_enter(&palloc->pa_lock);
1.145 ad 647: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
1.181 mlelstv 648: if (!cold)
1.134 ad 649: mutex_exit(&palloc->pa_lock);
1.1 pk 650: }
651:
652: /*
653: * De-commision a pool resource.
654: */
655: void
1.42 thorpej 656: pool_destroy(struct pool *pp)
1.1 pk 657: {
1.101 thorpej 658: struct pool_pagelist pq;
1.3 pk 659: struct pool_item_header *ph;
1.43 thorpej 660:
1.101 thorpej 661: /* Remove from global pool list */
1.134 ad 662: mutex_enter(&pool_head_lock);
663: while (pp->pr_refcnt != 0)
664: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 665: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.101 thorpej 666: if (drainpp == pp)
667: drainpp = NULL;
1.134 ad 668: mutex_exit(&pool_head_lock);
1.101 thorpej 669:
670: /* Remove this pool from its allocator's list of pools. */
1.134 ad 671: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 672: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.134 ad 673: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 674:
1.178 elad 675: mutex_enter(&pool_allocator_lock);
676: if (--pp->pr_alloc->pa_refcnt == 0)
677: mutex_destroy(&pp->pr_alloc->pa_lock);
678: mutex_exit(&pool_allocator_lock);
679:
1.134 ad 680: mutex_enter(&pp->pr_lock);
1.101 thorpej 681:
1.134 ad 682: KASSERT(pp->pr_cache == NULL);
1.3 pk 683:
684: #ifdef DIAGNOSTIC
1.20 thorpej 685: if (pp->pr_nout != 0) {
1.80 provos 686: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 687: pp->pr_nout);
1.3 pk 688: }
689: #endif
1.1 pk 690:
1.101 thorpej 691: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
692: KASSERT(LIST_EMPTY(&pp->pr_partpages));
693:
1.3 pk 694: /* Remove all pages */
1.101 thorpej 695: LIST_INIT(&pq);
1.88 chs 696: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 697: pr_rmpage(pp, ph, &pq);
698:
1.134 ad 699: mutex_exit(&pp->pr_lock);
1.3 pk 700:
1.101 thorpej 701: pr_pagelist_free(pp, &pq);
1.134 ad 702: cv_destroy(&pp->pr_cv);
703: mutex_destroy(&pp->pr_lock);
1.1 pk 704: }
705:
1.68 thorpej 706: void
707: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
708: {
709:
710: /* XXX no locking -- must be used just after pool_init() */
711: #ifdef DIAGNOSTIC
712: if (pp->pr_drain_hook != NULL)
713: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
714: #endif
715: pp->pr_drain_hook = fn;
716: pp->pr_drain_hook_arg = arg;
717: }
718:
1.88 chs 719: static struct pool_item_header *
1.128 christos 720: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 721: {
722: struct pool_item_header *ph;
723:
724: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 725: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.134 ad 726: else
1.97 yamt 727: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 728:
729: return (ph);
730: }
1.1 pk 731:
732: /*
1.134 ad 733: * Grab an item from the pool.
1.1 pk 734: */
1.3 pk 735: void *
1.56 sommerfe 736: pool_get(struct pool *pp, int flags)
1.1 pk 737: {
738: struct pool_item *pi;
1.3 pk 739: struct pool_item_header *ph;
1.55 thorpej 740: void *v;
1.1 pk 741:
1.2 pk 742: #ifdef DIAGNOSTIC
1.184 rmind 743: if (pp->pr_itemsperpage == 0)
744: panic("pool_get: pool '%s': pr_itemsperpage is zero, "
745: "pool not initialized?", pp->pr_wchan);
1.185 rmind 746: if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE &&
747: !cold && panicstr == NULL)
1.184 rmind 748: panic("pool '%s' is IPL_NONE, but called from "
749: "interrupt context\n", pp->pr_wchan);
750: #endif
1.155 ad 751: if (flags & PR_WAITOK) {
1.154 yamt 752: ASSERT_SLEEPABLE();
1.155 ad 753: }
1.1 pk 754:
1.134 ad 755: mutex_enter(&pp->pr_lock);
1.20 thorpej 756: startover:
757: /*
758: * Check to see if we've reached the hard limit. If we have,
759: * and we can wait, then wait until an item has been returned to
760: * the pool.
761: */
762: #ifdef DIAGNOSTIC
1.34 thorpej 763: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.134 ad 764: mutex_exit(&pp->pr_lock);
1.20 thorpej 765: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
766: }
767: #endif
1.34 thorpej 768: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 769: if (pp->pr_drain_hook != NULL) {
770: /*
771: * Since the drain hook is going to free things
772: * back to the pool, unlock, call the hook, re-lock,
773: * and check the hardlimit condition again.
774: */
1.134 ad 775: mutex_exit(&pp->pr_lock);
1.68 thorpej 776: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.134 ad 777: mutex_enter(&pp->pr_lock);
1.68 thorpej 778: if (pp->pr_nout < pp->pr_hardlimit)
779: goto startover;
780: }
781:
1.29 sommerfe 782: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 783: /*
784: * XXX: A warning isn't logged in this case. Should
785: * it be?
786: */
787: pp->pr_flags |= PR_WANTED;
1.134 ad 788: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.20 thorpej 789: goto startover;
790: }
1.31 thorpej 791:
792: /*
793: * Log a message that the hard limit has been hit.
794: */
795: if (pp->pr_hardlimit_warning != NULL &&
796: ratecheck(&pp->pr_hardlimit_warning_last,
797: &pp->pr_hardlimit_ratecap))
798: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 799:
800: pp->pr_nfail++;
801:
1.134 ad 802: mutex_exit(&pp->pr_lock);
1.20 thorpej 803: return (NULL);
804: }
805:
1.3 pk 806: /*
807: * The convention we use is that if `curpage' is not NULL, then
808: * it points at a non-empty bucket. In particular, `curpage'
809: * never points at a page header which has PR_PHINPAGE set and
810: * has no items in its bucket.
811: */
1.20 thorpej 812: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 813: int error;
814:
1.20 thorpej 815: #ifdef DIAGNOSTIC
816: if (pp->pr_nitems != 0) {
1.134 ad 817: mutex_exit(&pp->pr_lock);
1.20 thorpej 818: printf("pool_get: %s: curpage NULL, nitems %u\n",
819: pp->pr_wchan, pp->pr_nitems);
1.80 provos 820: panic("pool_get: nitems inconsistent");
1.20 thorpej 821: }
822: #endif
823:
1.21 thorpej 824: /*
825: * Call the back-end page allocator for more memory.
826: * Release the pool lock, as the back-end page allocator
827: * may block.
828: */
1.113 yamt 829: error = pool_grow(pp, flags);
830: if (error != 0) {
1.21 thorpej 831: /*
1.55 thorpej 832: * We were unable to allocate a page or item
833: * header, but we released the lock during
834: * allocation, so perhaps items were freed
835: * back to the pool. Check for this case.
1.21 thorpej 836: */
837: if (pp->pr_curpage != NULL)
838: goto startover;
1.15 pk 839:
1.117 yamt 840: pp->pr_nfail++;
1.134 ad 841: mutex_exit(&pp->pr_lock);
1.117 yamt 842: return (NULL);
1.1 pk 843: }
1.3 pk 844:
1.20 thorpej 845: /* Start the allocation process over. */
846: goto startover;
1.3 pk 847: }
1.97 yamt 848: if (pp->pr_roflags & PR_NOTOUCH) {
849: #ifdef DIAGNOSTIC
850: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1.134 ad 851: mutex_exit(&pp->pr_lock);
1.97 yamt 852: panic("pool_get: %s: page empty", pp->pr_wchan);
853: }
854: #endif
855: v = pr_item_notouch_get(pp, ph);
856: } else {
1.102 chs 857: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 858: if (__predict_false(v == NULL)) {
1.134 ad 859: mutex_exit(&pp->pr_lock);
1.97 yamt 860: panic("pool_get: %s: page empty", pp->pr_wchan);
861: }
1.20 thorpej 862: #ifdef DIAGNOSTIC
1.97 yamt 863: if (__predict_false(pp->pr_nitems == 0)) {
1.134 ad 864: mutex_exit(&pp->pr_lock);
1.97 yamt 865: printf("pool_get: %s: items on itemlist, nitems %u\n",
866: pp->pr_wchan, pp->pr_nitems);
867: panic("pool_get: nitems inconsistent");
868: }
1.65 enami 869: #endif
1.56 sommerfe 870:
1.65 enami 871: #ifdef DIAGNOSTIC
1.97 yamt 872: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
873: panic("pool_get(%s): free list modified: "
874: "magic=%x; page %p; item addr %p\n",
875: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
876: }
1.3 pk 877: #endif
878:
1.97 yamt 879: /*
880: * Remove from item list.
881: */
1.102 chs 882: LIST_REMOVE(pi, pi_list);
1.97 yamt 883: }
1.20 thorpej 884: pp->pr_nitems--;
885: pp->pr_nout++;
1.6 thorpej 886: if (ph->ph_nmissing == 0) {
887: #ifdef DIAGNOSTIC
1.34 thorpej 888: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 889: panic("pool_get: nidle inconsistent");
890: #endif
891: pp->pr_nidle--;
1.88 chs 892:
893: /*
894: * This page was previously empty. Move it to the list of
895: * partially-full pages. This page is already curpage.
896: */
897: LIST_REMOVE(ph, ph_pagelist);
898: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 899: }
1.3 pk 900: ph->ph_nmissing++;
1.97 yamt 901: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 902: #ifdef DIAGNOSTIC
1.97 yamt 903: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 904: !LIST_EMPTY(&ph->ph_itemlist))) {
1.134 ad 905: mutex_exit(&pp->pr_lock);
1.21 thorpej 906: panic("pool_get: %s: nmissing inconsistent",
907: pp->pr_wchan);
908: }
909: #endif
1.3 pk 910: /*
1.88 chs 911: * This page is now full. Move it to the full list
912: * and select a new current page.
1.3 pk 913: */
1.88 chs 914: LIST_REMOVE(ph, ph_pagelist);
915: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
916: pool_update_curpage(pp);
1.1 pk 917: }
1.3 pk 918:
919: pp->pr_nget++;
1.20 thorpej 920:
921: /*
922: * If we have a low water mark and we are now below that low
923: * water mark, add more items to the pool.
924: */
1.53 thorpej 925: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 926: /*
927: * XXX: Should we log a warning? Should we set up a timeout
928: * to try again in a second or so? The latter could break
929: * a caller's assumptions about interrupt protection, etc.
930: */
931: }
932:
1.134 ad 933: mutex_exit(&pp->pr_lock);
1.125 ad 934: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
935: FREECHECK_OUT(&pp->pr_freecheck, v);
1.1 pk 936: return (v);
937: }
938:
939: /*
1.43 thorpej 940: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 941: */
1.43 thorpej 942: static void
1.101 thorpej 943: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 944: {
945: struct pool_item *pi = v;
1.3 pk 946: struct pool_item_header *ph;
947:
1.134 ad 948: KASSERT(mutex_owned(&pp->pr_lock));
1.125 ad 949: FREECHECK_IN(&pp->pr_freecheck, v);
1.134 ad 950: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 951:
1.30 thorpej 952: #ifdef DIAGNOSTIC
1.34 thorpej 953: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 954: printf("pool %s: putting with none out\n",
955: pp->pr_wchan);
956: panic("pool_put");
957: }
958: #endif
1.3 pk 959:
1.121 yamt 960: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.3 pk 961: panic("pool_put: %s: page header missing", pp->pr_wchan);
962: }
1.28 thorpej 963:
1.3 pk 964: /*
965: * Return to item list.
966: */
1.97 yamt 967: if (pp->pr_roflags & PR_NOTOUCH) {
968: pr_item_notouch_put(pp, ph, v);
969: } else {
1.2 pk 970: #ifdef DIAGNOSTIC
1.97 yamt 971: pi->pi_magic = PI_MAGIC;
1.3 pk 972: #endif
1.32 chs 973: #ifdef DEBUG
1.97 yamt 974: {
975: int i, *ip = v;
1.32 chs 976:
1.97 yamt 977: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
978: *ip++ = PI_MAGIC;
979: }
1.32 chs 980: }
981: #endif
982:
1.102 chs 983: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 984: }
1.79 thorpej 985: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 986: ph->ph_nmissing--;
987: pp->pr_nput++;
1.20 thorpej 988: pp->pr_nitems++;
989: pp->pr_nout--;
1.3 pk 990:
991: /* Cancel "pool empty" condition if it exists */
992: if (pp->pr_curpage == NULL)
993: pp->pr_curpage = ph;
994:
995: if (pp->pr_flags & PR_WANTED) {
996: pp->pr_flags &= ~PR_WANTED;
1.134 ad 997: cv_broadcast(&pp->pr_cv);
1.3 pk 998: }
999:
1000: /*
1.88 chs 1001: * If this page is now empty, do one of two things:
1.21 thorpej 1002: *
1.88 chs 1003: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1004: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1005: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1006: * CLAIM.
1.21 thorpej 1007: *
1.88 chs 1008: * (2) Otherwise, move the page to the empty page list.
1009: *
1010: * Either way, select a new current page (so we use a partially-full
1011: * page if one is available).
1.3 pk 1012: */
1013: if (ph->ph_nmissing == 0) {
1.6 thorpej 1014: pp->pr_nidle++;
1.90 thorpej 1015: if (pp->pr_npages > pp->pr_minpages &&
1.152 yamt 1016: pp->pr_npages > pp->pr_maxpages) {
1.101 thorpej 1017: pr_rmpage(pp, ph, pq);
1.3 pk 1018: } else {
1.88 chs 1019: LIST_REMOVE(ph, ph_pagelist);
1020: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1021:
1.21 thorpej 1022: /*
1023: * Update the timestamp on the page. A page must
1024: * be idle for some period of time before it can
1025: * be reclaimed by the pagedaemon. This minimizes
1026: * ping-pong'ing for memory.
1.151 yamt 1027: *
1028: * note for 64-bit time_t: truncating to 32-bit is not
1029: * a problem for our usage.
1.21 thorpej 1030: */
1.151 yamt 1031: ph->ph_time = time_uptime;
1.1 pk 1032: }
1.88 chs 1033: pool_update_curpage(pp);
1.1 pk 1034: }
1.88 chs 1035:
1.21 thorpej 1036: /*
1.88 chs 1037: * If the page was previously completely full, move it to the
1038: * partially-full list and make it the current page. The next
1039: * allocation will get the item from this page, instead of
1040: * further fragmenting the pool.
1.21 thorpej 1041: */
1042: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1043: LIST_REMOVE(ph, ph_pagelist);
1044: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1045: pp->pr_curpage = ph;
1046: }
1.43 thorpej 1047: }
1048:
1.56 sommerfe 1049: void
1050: pool_put(struct pool *pp, void *v)
1051: {
1.101 thorpej 1052: struct pool_pagelist pq;
1053:
1054: LIST_INIT(&pq);
1.56 sommerfe 1055:
1.134 ad 1056: mutex_enter(&pp->pr_lock);
1.101 thorpej 1057: pool_do_put(pp, v, &pq);
1.134 ad 1058: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1059:
1.102 chs 1060: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1061: }
1.57 sommerfe 1062:
1.74 thorpej 1063: /*
1.113 yamt 1064: * pool_grow: grow a pool by a page.
1065: *
1066: * => called with pool locked.
1067: * => unlock and relock the pool.
1068: * => return with pool locked.
1069: */
1070:
1071: static int
1072: pool_grow(struct pool *pp, int flags)
1073: {
1074: struct pool_item_header *ph = NULL;
1075: char *cp;
1076:
1.134 ad 1077: mutex_exit(&pp->pr_lock);
1.113 yamt 1078: cp = pool_allocator_alloc(pp, flags);
1079: if (__predict_true(cp != NULL)) {
1080: ph = pool_alloc_item_header(pp, cp, flags);
1081: }
1082: if (__predict_false(cp == NULL || ph == NULL)) {
1083: if (cp != NULL) {
1084: pool_allocator_free(pp, cp);
1085: }
1.134 ad 1086: mutex_enter(&pp->pr_lock);
1.113 yamt 1087: return ENOMEM;
1088: }
1089:
1.134 ad 1090: mutex_enter(&pp->pr_lock);
1.113 yamt 1091: pool_prime_page(pp, cp, ph);
1092: pp->pr_npagealloc++;
1093: return 0;
1094: }
1095:
1096: /*
1.74 thorpej 1097: * Add N items to the pool.
1098: */
1099: int
1100: pool_prime(struct pool *pp, int n)
1101: {
1.75 simonb 1102: int newpages;
1.113 yamt 1103: int error = 0;
1.74 thorpej 1104:
1.134 ad 1105: mutex_enter(&pp->pr_lock);
1.74 thorpej 1106:
1107: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1108:
1109: while (newpages-- > 0) {
1.113 yamt 1110: error = pool_grow(pp, PR_NOWAIT);
1111: if (error) {
1.74 thorpej 1112: break;
1113: }
1114: pp->pr_minpages++;
1115: }
1116:
1117: if (pp->pr_minpages >= pp->pr_maxpages)
1118: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1119:
1.134 ad 1120: mutex_exit(&pp->pr_lock);
1.113 yamt 1121: return error;
1.74 thorpej 1122: }
1.55 thorpej 1123:
1124: /*
1.3 pk 1125: * Add a page worth of items to the pool.
1.21 thorpej 1126: *
1127: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1128: */
1.55 thorpej 1129: static void
1.128 christos 1130: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1131: {
1132: struct pool_item *pi;
1.128 christos 1133: void *cp = storage;
1.125 ad 1134: const unsigned int align = pp->pr_align;
1135: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1136: int n;
1.36 pk 1137:
1.134 ad 1138: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 1139:
1.66 thorpej 1140: #ifdef DIAGNOSTIC
1.121 yamt 1141: if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1.150 skrll 1142: ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1143: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1144: #endif
1.3 pk 1145:
1146: /*
1147: * Insert page header.
1148: */
1.88 chs 1149: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1150: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1151: ph->ph_page = storage;
1152: ph->ph_nmissing = 0;
1.151 yamt 1153: ph->ph_time = time_uptime;
1.88 chs 1154: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1155: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1156:
1.6 thorpej 1157: pp->pr_nidle++;
1158:
1.3 pk 1159: /*
1160: * Color this page.
1161: */
1.141 yamt 1162: ph->ph_off = pp->pr_curcolor;
1163: cp = (char *)cp + ph->ph_off;
1.3 pk 1164: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1165: pp->pr_curcolor = 0;
1166:
1167: /*
1168: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1169: */
1170: if (ioff != 0)
1.128 christos 1171: cp = (char *)cp + align - ioff;
1.3 pk 1172:
1.125 ad 1173: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1174:
1.3 pk 1175: /*
1176: * Insert remaining chunks on the bucket list.
1177: */
1178: n = pp->pr_itemsperpage;
1.20 thorpej 1179: pp->pr_nitems += n;
1.3 pk 1180:
1.97 yamt 1181: if (pp->pr_roflags & PR_NOTOUCH) {
1.141 yamt 1182: pr_item_notouch_init(pp, ph);
1.97 yamt 1183: } else {
1184: while (n--) {
1185: pi = (struct pool_item *)cp;
1.78 thorpej 1186:
1.97 yamt 1187: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1188:
1.97 yamt 1189: /* Insert on page list */
1.102 chs 1190: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1191: #ifdef DIAGNOSTIC
1.97 yamt 1192: pi->pi_magic = PI_MAGIC;
1.3 pk 1193: #endif
1.128 christos 1194: cp = (char *)cp + pp->pr_size;
1.125 ad 1195:
1196: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1197: }
1.3 pk 1198: }
1199:
1200: /*
1201: * If the pool was depleted, point at the new page.
1202: */
1203: if (pp->pr_curpage == NULL)
1204: pp->pr_curpage = ph;
1205:
1206: if (++pp->pr_npages > pp->pr_hiwat)
1207: pp->pr_hiwat = pp->pr_npages;
1208: }
1209:
1.20 thorpej 1210: /*
1.52 thorpej 1211: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1212: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1213: *
1.21 thorpej 1214: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1215: *
1.73 thorpej 1216: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1217: * with it locked.
1218: */
1219: static int
1.42 thorpej 1220: pool_catchup(struct pool *pp)
1.20 thorpej 1221: {
1222: int error = 0;
1223:
1.54 thorpej 1224: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1225: error = pool_grow(pp, PR_NOWAIT);
1226: if (error) {
1.20 thorpej 1227: break;
1228: }
1229: }
1.113 yamt 1230: return error;
1.20 thorpej 1231: }
1232:
1.88 chs 1233: static void
1234: pool_update_curpage(struct pool *pp)
1235: {
1236:
1237: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1238: if (pp->pr_curpage == NULL) {
1239: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1240: }
1.168 yamt 1241: KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1242: (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1.88 chs 1243: }
1244:
1.3 pk 1245: void
1.42 thorpej 1246: pool_setlowat(struct pool *pp, int n)
1.3 pk 1247: {
1.15 pk 1248:
1.134 ad 1249: mutex_enter(&pp->pr_lock);
1.21 thorpej 1250:
1.3 pk 1251: pp->pr_minitems = n;
1.15 pk 1252: pp->pr_minpages = (n == 0)
1253: ? 0
1.18 thorpej 1254: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1255:
1256: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1257: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1258: /*
1259: * XXX: Should we log a warning? Should we set up a timeout
1260: * to try again in a second or so? The latter could break
1261: * a caller's assumptions about interrupt protection, etc.
1262: */
1263: }
1.21 thorpej 1264:
1.134 ad 1265: mutex_exit(&pp->pr_lock);
1.3 pk 1266: }
1267:
1268: void
1.42 thorpej 1269: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1270: {
1.15 pk 1271:
1.134 ad 1272: mutex_enter(&pp->pr_lock);
1.21 thorpej 1273:
1.15 pk 1274: pp->pr_maxpages = (n == 0)
1275: ? 0
1.18 thorpej 1276: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1277:
1.134 ad 1278: mutex_exit(&pp->pr_lock);
1.3 pk 1279: }
1280:
1.20 thorpej 1281: void
1.42 thorpej 1282: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1283: {
1284:
1.134 ad 1285: mutex_enter(&pp->pr_lock);
1.20 thorpej 1286:
1287: pp->pr_hardlimit = n;
1288: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1289: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1290: pp->pr_hardlimit_warning_last.tv_sec = 0;
1291: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1292:
1293: /*
1.21 thorpej 1294: * In-line version of pool_sethiwat(), because we don't want to
1295: * release the lock.
1.20 thorpej 1296: */
1297: pp->pr_maxpages = (n == 0)
1298: ? 0
1299: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1300:
1.134 ad 1301: mutex_exit(&pp->pr_lock);
1.20 thorpej 1302: }
1.3 pk 1303:
1304: /*
1305: * Release all complete pages that have not been used recently.
1.184 rmind 1306: *
1.197 jym 1307: * Must not be called from interrupt context.
1.3 pk 1308: */
1.66 thorpej 1309: int
1.56 sommerfe 1310: pool_reclaim(struct pool *pp)
1.3 pk 1311: {
1312: struct pool_item_header *ph, *phnext;
1.61 chs 1313: struct pool_pagelist pq;
1.151 yamt 1314: uint32_t curtime;
1.134 ad 1315: bool klock;
1316: int rv;
1.3 pk 1317:
1.197 jym 1318: KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1.184 rmind 1319:
1.68 thorpej 1320: if (pp->pr_drain_hook != NULL) {
1321: /*
1322: * The drain hook must be called with the pool unlocked.
1323: */
1324: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1325: }
1326:
1.134 ad 1327: /*
1.157 ad 1328: * XXXSMP Because we do not want to cause non-MPSAFE code
1329: * to block.
1.134 ad 1330: */
1331: if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1332: pp->pr_ipl == IPL_SOFTSERIAL) {
1333: KERNEL_LOCK(1, NULL);
1334: klock = true;
1335: } else
1336: klock = false;
1337:
1338: /* Reclaim items from the pool's cache (if any). */
1339: if (pp->pr_cache != NULL)
1340: pool_cache_invalidate(pp->pr_cache);
1341:
1342: if (mutex_tryenter(&pp->pr_lock) == 0) {
1343: if (klock) {
1344: KERNEL_UNLOCK_ONE(NULL);
1345: }
1.66 thorpej 1346: return (0);
1.134 ad 1347: }
1.68 thorpej 1348:
1.88 chs 1349: LIST_INIT(&pq);
1.43 thorpej 1350:
1.151 yamt 1351: curtime = time_uptime;
1.21 thorpej 1352:
1.88 chs 1353: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1354: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1355:
1356: /* Check our minimum page claim */
1357: if (pp->pr_npages <= pp->pr_minpages)
1358: break;
1359:
1.88 chs 1360: KASSERT(ph->ph_nmissing == 0);
1.191 para 1361: if (curtime - ph->ph_time < pool_inactive_time)
1.88 chs 1362: continue;
1.21 thorpej 1363:
1.88 chs 1364: /*
1365: * If freeing this page would put us below
1366: * the low water mark, stop now.
1367: */
1368: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1369: pp->pr_minitems)
1370: break;
1.21 thorpej 1371:
1.88 chs 1372: pr_rmpage(pp, ph, &pq);
1.3 pk 1373: }
1374:
1.134 ad 1375: mutex_exit(&pp->pr_lock);
1376:
1377: if (LIST_EMPTY(&pq))
1378: rv = 0;
1379: else {
1380: pr_pagelist_free(pp, &pq);
1381: rv = 1;
1382: }
1383:
1384: if (klock) {
1385: KERNEL_UNLOCK_ONE(NULL);
1386: }
1.66 thorpej 1387:
1.134 ad 1388: return (rv);
1.3 pk 1389: }
1390:
1391: /*
1.197 jym 1392: * Drain pools, one at a time. The drained pool is returned within ppp.
1.131 ad 1393: *
1.134 ad 1394: * Note, must never be called from interrupt context.
1.3 pk 1395: */
1.197 jym 1396: bool
1397: pool_drain(struct pool **ppp)
1.3 pk 1398: {
1.197 jym 1399: bool reclaimed;
1.3 pk 1400: struct pool *pp;
1.134 ad 1401:
1.145 ad 1402: KASSERT(!TAILQ_EMPTY(&pool_head));
1.3 pk 1403:
1.61 chs 1404: pp = NULL;
1.134 ad 1405:
1406: /* Find next pool to drain, and add a reference. */
1407: mutex_enter(&pool_head_lock);
1408: do {
1409: if (drainpp == NULL) {
1.145 ad 1410: drainpp = TAILQ_FIRST(&pool_head);
1.134 ad 1411: }
1412: if (drainpp != NULL) {
1413: pp = drainpp;
1.145 ad 1414: drainpp = TAILQ_NEXT(pp, pr_poollist);
1.134 ad 1415: }
1416: /*
1417: * Skip completely idle pools. We depend on at least
1418: * one pool in the system being active.
1419: */
1420: } while (pp == NULL || pp->pr_npages == 0);
1421: pp->pr_refcnt++;
1422: mutex_exit(&pool_head_lock);
1423:
1424: /* Drain the cache (if any) and pool.. */
1.186 pooka 1425: reclaimed = pool_reclaim(pp);
1.134 ad 1426:
1427: /* Finally, unlock the pool. */
1428: mutex_enter(&pool_head_lock);
1429: pp->pr_refcnt--;
1430: cv_broadcast(&pool_busy);
1431: mutex_exit(&pool_head_lock);
1.186 pooka 1432:
1.197 jym 1433: if (ppp != NULL)
1434: *ppp = pp;
1435:
1.186 pooka 1436: return reclaimed;
1.3 pk 1437: }
1438:
1439: /*
1440: * Diagnostic helpers.
1441: */
1.21 thorpej 1442:
1.25 thorpej 1443: void
1.108 yamt 1444: pool_printall(const char *modif, void (*pr)(const char *, ...))
1445: {
1446: struct pool *pp;
1447:
1.145 ad 1448: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.108 yamt 1449: pool_printit(pp, modif, pr);
1450: }
1451: }
1452:
1453: void
1.42 thorpej 1454: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1455: {
1456:
1457: if (pp == NULL) {
1458: (*pr)("Must specify a pool to print.\n");
1459: return;
1460: }
1461:
1462: pool_print1(pp, modif, pr);
1463: }
1464:
1.21 thorpej 1465: static void
1.124 yamt 1466: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1467: void (*pr)(const char *, ...))
1.88 chs 1468: {
1469: struct pool_item_header *ph;
1470: #ifdef DIAGNOSTIC
1471: struct pool_item *pi;
1472: #endif
1473:
1474: LIST_FOREACH(ph, pl, ph_pagelist) {
1.151 yamt 1475: (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1476: ph->ph_page, ph->ph_nmissing, ph->ph_time);
1.88 chs 1477: #ifdef DIAGNOSTIC
1.97 yamt 1478: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1479: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1480: if (pi->pi_magic != PI_MAGIC) {
1481: (*pr)("\t\t\titem %p, magic 0x%x\n",
1482: pi, pi->pi_magic);
1483: }
1.88 chs 1484: }
1485: }
1486: #endif
1487: }
1488: }
1489:
1490: static void
1.42 thorpej 1491: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1492: {
1.25 thorpej 1493: struct pool_item_header *ph;
1.134 ad 1494: pool_cache_t pc;
1495: pcg_t *pcg;
1496: pool_cache_cpu_t *cc;
1497: uint64_t cpuhit, cpumiss;
1.44 thorpej 1498: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1499: char c;
1500:
1501: while ((c = *modif++) != '\0') {
1502: if (c == 'l')
1503: print_log = 1;
1504: if (c == 'p')
1505: print_pagelist = 1;
1.44 thorpej 1506: if (c == 'c')
1507: print_cache = 1;
1.25 thorpej 1508: }
1509:
1.134 ad 1510: if ((pc = pp->pr_cache) != NULL) {
1511: (*pr)("POOL CACHE");
1512: } else {
1513: (*pr)("POOL");
1514: }
1515:
1516: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1517: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1518: pp->pr_roflags);
1.66 thorpej 1519: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1520: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1521: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1522: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1523: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1524:
1.134 ad 1525: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1526: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1527: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1528: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1529:
1530: if (print_pagelist == 0)
1531: goto skip_pagelist;
1532:
1.88 chs 1533: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1534: (*pr)("\n\tempty page list:\n");
1.97 yamt 1535: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1536: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1537: (*pr)("\n\tfull page list:\n");
1.97 yamt 1538: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1539: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1540: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1541: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1542:
1.25 thorpej 1543: if (pp->pr_curpage == NULL)
1544: (*pr)("\tno current page\n");
1545: else
1546: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1547:
1548: skip_pagelist:
1549: if (print_log == 0)
1550: goto skip_log;
1551:
1552: (*pr)("\n");
1.3 pk 1553:
1.25 thorpej 1554: skip_log:
1.44 thorpej 1555:
1.102 chs 1556: #define PR_GROUPLIST(pcg) \
1557: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1.142 ad 1558: for (i = 0; i < pcg->pcg_size; i++) { \
1.102 chs 1559: if (pcg->pcg_objects[i].pcgo_pa != \
1560: POOL_PADDR_INVALID) { \
1561: (*pr)("\t\t\t%p, 0x%llx\n", \
1562: pcg->pcg_objects[i].pcgo_va, \
1563: (unsigned long long) \
1564: pcg->pcg_objects[i].pcgo_pa); \
1565: } else { \
1566: (*pr)("\t\t\t%p\n", \
1567: pcg->pcg_objects[i].pcgo_va); \
1568: } \
1569: }
1570:
1.134 ad 1571: if (pc != NULL) {
1572: cpuhit = 0;
1573: cpumiss = 0;
1.183 ad 1574: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.134 ad 1575: if ((cc = pc->pc_cpus[i]) == NULL)
1576: continue;
1577: cpuhit += cc->cc_hits;
1578: cpumiss += cc->cc_misses;
1579: }
1580: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1581: (*pr)("\tcache layer hits %llu misses %llu\n",
1582: pc->pc_hits, pc->pc_misses);
1583: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1584: pc->pc_hits + pc->pc_misses - pc->pc_contended,
1585: pc->pc_contended);
1586: (*pr)("\tcache layer empty groups %u full groups %u\n",
1587: pc->pc_nempty, pc->pc_nfull);
1588: if (print_cache) {
1589: (*pr)("\tfull cache groups:\n");
1590: for (pcg = pc->pc_fullgroups; pcg != NULL;
1591: pcg = pcg->pcg_next) {
1592: PR_GROUPLIST(pcg);
1593: }
1594: (*pr)("\tempty cache groups:\n");
1595: for (pcg = pc->pc_emptygroups; pcg != NULL;
1596: pcg = pcg->pcg_next) {
1597: PR_GROUPLIST(pcg);
1598: }
1.103 chs 1599: }
1.44 thorpej 1600: }
1.102 chs 1601: #undef PR_GROUPLIST
1.88 chs 1602: }
1603:
1604: static int
1605: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1606: {
1607: struct pool_item *pi;
1.128 christos 1608: void *page;
1.88 chs 1609: int n;
1610:
1.121 yamt 1611: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1612: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1613: if (page != ph->ph_page &&
1614: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1615: if (label != NULL)
1616: printf("%s: ", label);
1617: printf("pool(%p:%s): page inconsistency: page %p;"
1618: " at page head addr %p (p %p)\n", pp,
1619: pp->pr_wchan, ph->ph_page,
1620: ph, page);
1621: return 1;
1622: }
1.88 chs 1623: }
1.3 pk 1624:
1.97 yamt 1625: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1626: return 0;
1627:
1.102 chs 1628: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1629: pi != NULL;
1.102 chs 1630: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1631:
1632: #ifdef DIAGNOSTIC
1633: if (pi->pi_magic != PI_MAGIC) {
1634: if (label != NULL)
1635: printf("%s: ", label);
1636: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1637: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1638: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1639: n, pi);
1.88 chs 1640: panic("pool");
1641: }
1642: #endif
1.121 yamt 1643: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1644: continue;
1645: }
1.128 christos 1646: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1647: if (page == ph->ph_page)
1648: continue;
1649:
1650: if (label != NULL)
1651: printf("%s: ", label);
1652: printf("pool(%p:%s): page inconsistency: page %p;"
1653: " item ordinal %d; addr %p (p %p)\n", pp,
1654: pp->pr_wchan, ph->ph_page,
1655: n, pi, page);
1656: return 1;
1657: }
1658: return 0;
1.3 pk 1659: }
1660:
1.88 chs 1661:
1.3 pk 1662: int
1.42 thorpej 1663: pool_chk(struct pool *pp, const char *label)
1.3 pk 1664: {
1665: struct pool_item_header *ph;
1666: int r = 0;
1667:
1.134 ad 1668: mutex_enter(&pp->pr_lock);
1.88 chs 1669: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1670: r = pool_chk_page(pp, label, ph);
1671: if (r) {
1672: goto out;
1673: }
1674: }
1675: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1676: r = pool_chk_page(pp, label, ph);
1677: if (r) {
1.3 pk 1678: goto out;
1679: }
1.88 chs 1680: }
1681: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1682: r = pool_chk_page(pp, label, ph);
1683: if (r) {
1.3 pk 1684: goto out;
1685: }
1686: }
1.88 chs 1687:
1.3 pk 1688: out:
1.134 ad 1689: mutex_exit(&pp->pr_lock);
1.3 pk 1690: return (r);
1.43 thorpej 1691: }
1692:
1693: /*
1694: * pool_cache_init:
1695: *
1696: * Initialize a pool cache.
1.134 ad 1697: */
1698: pool_cache_t
1699: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
1700: const char *wchan, struct pool_allocator *palloc, int ipl,
1701: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
1702: {
1703: pool_cache_t pc;
1704:
1705: pc = pool_get(&cache_pool, PR_WAITOK);
1706: if (pc == NULL)
1707: return NULL;
1708:
1709: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
1710: palloc, ipl, ctor, dtor, arg);
1711:
1712: return pc;
1713: }
1714:
1715: /*
1716: * pool_cache_bootstrap:
1.43 thorpej 1717: *
1.134 ad 1718: * Kernel-private version of pool_cache_init(). The caller
1719: * provides initial storage.
1.43 thorpej 1720: */
1721: void
1.134 ad 1722: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
1723: u_int align_offset, u_int flags, const char *wchan,
1724: struct pool_allocator *palloc, int ipl,
1725: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 1726: void *arg)
1727: {
1.134 ad 1728: CPU_INFO_ITERATOR cii;
1.145 ad 1729: pool_cache_t pc1;
1.134 ad 1730: struct cpu_info *ci;
1731: struct pool *pp;
1732:
1733: pp = &pc->pc_pool;
1734: if (palloc == NULL && ipl == IPL_NONE)
1735: palloc = &pool_allocator_nointr;
1736: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.157 ad 1737: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1.43 thorpej 1738:
1.134 ad 1739: if (ctor == NULL) {
1740: ctor = (int (*)(void *, void *, int))nullop;
1741: }
1742: if (dtor == NULL) {
1743: dtor = (void (*)(void *, void *))nullop;
1744: }
1.43 thorpej 1745:
1.134 ad 1746: pc->pc_emptygroups = NULL;
1747: pc->pc_fullgroups = NULL;
1748: pc->pc_partgroups = NULL;
1.43 thorpej 1749: pc->pc_ctor = ctor;
1750: pc->pc_dtor = dtor;
1751: pc->pc_arg = arg;
1.134 ad 1752: pc->pc_hits = 0;
1.48 thorpej 1753: pc->pc_misses = 0;
1.134 ad 1754: pc->pc_nempty = 0;
1755: pc->pc_npart = 0;
1756: pc->pc_nfull = 0;
1757: pc->pc_contended = 0;
1758: pc->pc_refcnt = 0;
1.136 yamt 1759: pc->pc_freecheck = NULL;
1.134 ad 1760:
1.142 ad 1761: if ((flags & PR_LARGECACHE) != 0) {
1762: pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
1.163 ad 1763: pc->pc_pcgpool = &pcg_large_pool;
1.142 ad 1764: } else {
1765: pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
1.163 ad 1766: pc->pc_pcgpool = &pcg_normal_pool;
1.142 ad 1767: }
1768:
1.134 ad 1769: /* Allocate per-CPU caches. */
1770: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
1771: pc->pc_ncpu = 0;
1.139 ad 1772: if (ncpu < 2) {
1.137 ad 1773: /* XXX For sparc: boot CPU is not attached yet. */
1774: pool_cache_cpu_init1(curcpu(), pc);
1775: } else {
1776: for (CPU_INFO_FOREACH(cii, ci)) {
1777: pool_cache_cpu_init1(ci, pc);
1778: }
1.134 ad 1779: }
1.145 ad 1780:
1781: /* Add to list of all pools. */
1782: if (__predict_true(!cold))
1.134 ad 1783: mutex_enter(&pool_head_lock);
1.145 ad 1784: TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
1785: if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
1786: break;
1787: }
1788: if (pc1 == NULL)
1789: TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
1790: else
1791: TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
1792: if (__predict_true(!cold))
1.134 ad 1793: mutex_exit(&pool_head_lock);
1.145 ad 1794:
1795: membar_sync();
1796: pp->pr_cache = pc;
1.43 thorpej 1797: }
1798:
1799: /*
1800: * pool_cache_destroy:
1801: *
1802: * Destroy a pool cache.
1803: */
1804: void
1.134 ad 1805: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 1806: {
1.191 para 1807:
1808: pool_cache_bootstrap_destroy(pc);
1809: pool_put(&cache_pool, pc);
1810: }
1811:
1812: /*
1813: * pool_cache_bootstrap_destroy:
1814: *
1815: * Destroy a pool cache.
1816: */
1817: void
1818: pool_cache_bootstrap_destroy(pool_cache_t pc)
1819: {
1.134 ad 1820: struct pool *pp = &pc->pc_pool;
1.175 jym 1821: u_int i;
1.134 ad 1822:
1823: /* Remove it from the global list. */
1824: mutex_enter(&pool_head_lock);
1825: while (pc->pc_refcnt != 0)
1826: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 1827: TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1.134 ad 1828: mutex_exit(&pool_head_lock);
1.43 thorpej 1829:
1830: /* First, invalidate the entire cache. */
1831: pool_cache_invalidate(pc);
1832:
1.134 ad 1833: /* Disassociate it from the pool. */
1834: mutex_enter(&pp->pr_lock);
1835: pp->pr_cache = NULL;
1836: mutex_exit(&pp->pr_lock);
1837:
1838: /* Destroy per-CPU data */
1.183 ad 1839: for (i = 0; i < __arraycount(pc->pc_cpus); i++)
1.175 jym 1840: pool_cache_invalidate_cpu(pc, i);
1.134 ad 1841:
1842: /* Finally, destroy it. */
1843: mutex_destroy(&pc->pc_lock);
1844: pool_destroy(pp);
1845: }
1846:
1847: /*
1848: * pool_cache_cpu_init1:
1849: *
1850: * Called for each pool_cache whenever a new CPU is attached.
1851: */
1852: static void
1853: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
1854: {
1855: pool_cache_cpu_t *cc;
1.137 ad 1856: int index;
1.134 ad 1857:
1.137 ad 1858: index = ci->ci_index;
1859:
1.183 ad 1860: KASSERT(index < __arraycount(pc->pc_cpus));
1.134 ad 1861:
1.137 ad 1862: if ((cc = pc->pc_cpus[index]) != NULL) {
1863: KASSERT(cc->cc_cpuindex == index);
1.134 ad 1864: return;
1865: }
1866:
1867: /*
1868: * The first CPU is 'free'. This needs to be the case for
1869: * bootstrap - we may not be able to allocate yet.
1870: */
1871: if (pc->pc_ncpu == 0) {
1872: cc = &pc->pc_cpu0;
1873: pc->pc_ncpu = 1;
1874: } else {
1875: mutex_enter(&pc->pc_lock);
1876: pc->pc_ncpu++;
1877: mutex_exit(&pc->pc_lock);
1878: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
1879: }
1880:
1881: cc->cc_ipl = pc->pc_pool.pr_ipl;
1882: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
1883: cc->cc_cache = pc;
1.137 ad 1884: cc->cc_cpuindex = index;
1.134 ad 1885: cc->cc_hits = 0;
1886: cc->cc_misses = 0;
1.169 yamt 1887: cc->cc_current = __UNCONST(&pcg_dummy);
1888: cc->cc_previous = __UNCONST(&pcg_dummy);
1.134 ad 1889:
1.137 ad 1890: pc->pc_cpus[index] = cc;
1.43 thorpej 1891: }
1892:
1.134 ad 1893: /*
1894: * pool_cache_cpu_init:
1895: *
1896: * Called whenever a new CPU is attached.
1897: */
1898: void
1899: pool_cache_cpu_init(struct cpu_info *ci)
1.43 thorpej 1900: {
1.134 ad 1901: pool_cache_t pc;
1902:
1903: mutex_enter(&pool_head_lock);
1.145 ad 1904: TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1.134 ad 1905: pc->pc_refcnt++;
1906: mutex_exit(&pool_head_lock);
1.43 thorpej 1907:
1.134 ad 1908: pool_cache_cpu_init1(ci, pc);
1.43 thorpej 1909:
1.134 ad 1910: mutex_enter(&pool_head_lock);
1911: pc->pc_refcnt--;
1912: cv_broadcast(&pool_busy);
1913: }
1914: mutex_exit(&pool_head_lock);
1.43 thorpej 1915: }
1916:
1.134 ad 1917: /*
1918: * pool_cache_reclaim:
1919: *
1920: * Reclaim memory from a pool cache.
1921: */
1922: bool
1923: pool_cache_reclaim(pool_cache_t pc)
1.43 thorpej 1924: {
1925:
1.134 ad 1926: return pool_reclaim(&pc->pc_pool);
1927: }
1.43 thorpej 1928:
1.136 yamt 1929: static void
1930: pool_cache_destruct_object1(pool_cache_t pc, void *object)
1931: {
1932:
1933: (*pc->pc_dtor)(pc->pc_arg, object);
1934: pool_put(&pc->pc_pool, object);
1935: }
1936:
1.134 ad 1937: /*
1938: * pool_cache_destruct_object:
1939: *
1940: * Force destruction of an object and its release back into
1941: * the pool.
1942: */
1943: void
1944: pool_cache_destruct_object(pool_cache_t pc, void *object)
1945: {
1946:
1.136 yamt 1947: FREECHECK_IN(&pc->pc_freecheck, object);
1948:
1949: pool_cache_destruct_object1(pc, object);
1.43 thorpej 1950: }
1951:
1.134 ad 1952: /*
1953: * pool_cache_invalidate_groups:
1954: *
1955: * Invalidate a chain of groups and destruct all objects.
1956: */
1.102 chs 1957: static void
1.134 ad 1958: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1.102 chs 1959: {
1.134 ad 1960: void *object;
1961: pcg_t *next;
1962: int i;
1963:
1964: for (; pcg != NULL; pcg = next) {
1965: next = pcg->pcg_next;
1966:
1967: for (i = 0; i < pcg->pcg_avail; i++) {
1968: object = pcg->pcg_objects[i].pcgo_va;
1.136 yamt 1969: pool_cache_destruct_object1(pc, object);
1.134 ad 1970: }
1.102 chs 1971:
1.142 ad 1972: if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
1973: pool_put(&pcg_large_pool, pcg);
1974: } else {
1975: KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
1976: pool_put(&pcg_normal_pool, pcg);
1977: }
1.102 chs 1978: }
1979: }
1980:
1.43 thorpej 1981: /*
1.134 ad 1982: * pool_cache_invalidate:
1.43 thorpej 1983: *
1.134 ad 1984: * Invalidate a pool cache (destruct and release all of the
1985: * cached objects). Does not reclaim objects from the pool.
1.176 thorpej 1986: *
1987: * Note: For pool caches that provide constructed objects, there
1988: * is an assumption that another level of synchronization is occurring
1989: * between the input to the constructor and the cache invalidation.
1.196 jym 1990: *
1991: * Invalidation is a costly process and should not be called from
1992: * interrupt context.
1.43 thorpej 1993: */
1.134 ad 1994: void
1995: pool_cache_invalidate(pool_cache_t pc)
1996: {
1.196 jym 1997: uint64_t where;
1.134 ad 1998: pcg_t *full, *empty, *part;
1.196 jym 1999:
2000: KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1.176 thorpej 2001:
1.177 jym 2002: if (ncpu < 2 || !mp_online) {
1.176 thorpej 2003: /*
2004: * We might be called early enough in the boot process
2005: * for the CPU data structures to not be fully initialized.
1.196 jym 2006: * In this case, transfer the content of the local CPU's
2007: * cache back into global cache as only this CPU is currently
2008: * running.
1.176 thorpej 2009: */
1.196 jym 2010: pool_cache_transfer(pc);
1.176 thorpej 2011: } else {
2012: /*
1.196 jym 2013: * Signal all CPUs that they must transfer their local
2014: * cache back to the global pool then wait for the xcall to
2015: * complete.
1.176 thorpej 2016: */
1.196 jym 2017: where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
2018: pc, NULL);
1.176 thorpej 2019: xc_wait(where);
2020: }
1.196 jym 2021:
2022: /* Empty pool caches, then invalidate objects */
1.134 ad 2023: mutex_enter(&pc->pc_lock);
2024: full = pc->pc_fullgroups;
2025: empty = pc->pc_emptygroups;
2026: part = pc->pc_partgroups;
2027: pc->pc_fullgroups = NULL;
2028: pc->pc_emptygroups = NULL;
2029: pc->pc_partgroups = NULL;
2030: pc->pc_nfull = 0;
2031: pc->pc_nempty = 0;
2032: pc->pc_npart = 0;
2033: mutex_exit(&pc->pc_lock);
2034:
2035: pool_cache_invalidate_groups(pc, full);
2036: pool_cache_invalidate_groups(pc, empty);
2037: pool_cache_invalidate_groups(pc, part);
2038: }
2039:
1.175 jym 2040: /*
2041: * pool_cache_invalidate_cpu:
2042: *
2043: * Invalidate all CPU-bound cached objects in pool cache, the CPU being
2044: * identified by its associated index.
2045: * It is caller's responsibility to ensure that no operation is
2046: * taking place on this pool cache while doing this invalidation.
2047: * WARNING: as no inter-CPU locking is enforced, trying to invalidate
2048: * pool cached objects from a CPU different from the one currently running
2049: * may result in an undefined behaviour.
2050: */
2051: static void
2052: pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2053: {
2054: pool_cache_cpu_t *cc;
2055: pcg_t *pcg;
2056:
2057: if ((cc = pc->pc_cpus[index]) == NULL)
2058: return;
2059:
2060: if ((pcg = cc->cc_current) != &pcg_dummy) {
2061: pcg->pcg_next = NULL;
2062: pool_cache_invalidate_groups(pc, pcg);
2063: }
2064: if ((pcg = cc->cc_previous) != &pcg_dummy) {
2065: pcg->pcg_next = NULL;
2066: pool_cache_invalidate_groups(pc, pcg);
2067: }
2068: if (cc != &pc->pc_cpu0)
2069: pool_put(&cache_cpu_pool, cc);
2070:
2071: }
2072:
1.134 ad 2073: void
2074: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2075: {
2076:
2077: pool_set_drain_hook(&pc->pc_pool, fn, arg);
2078: }
2079:
2080: void
2081: pool_cache_setlowat(pool_cache_t pc, int n)
2082: {
2083:
2084: pool_setlowat(&pc->pc_pool, n);
2085: }
2086:
2087: void
2088: pool_cache_sethiwat(pool_cache_t pc, int n)
2089: {
2090:
2091: pool_sethiwat(&pc->pc_pool, n);
2092: }
2093:
2094: void
2095: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2096: {
2097:
2098: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2099: }
2100:
1.162 ad 2101: static bool __noinline
2102: pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
1.134 ad 2103: paddr_t *pap, int flags)
1.43 thorpej 2104: {
1.134 ad 2105: pcg_t *pcg, *cur;
2106: uint64_t ncsw;
2107: pool_cache_t pc;
1.43 thorpej 2108: void *object;
1.58 thorpej 2109:
1.168 yamt 2110: KASSERT(cc->cc_current->pcg_avail == 0);
2111: KASSERT(cc->cc_previous->pcg_avail == 0);
2112:
1.134 ad 2113: pc = cc->cc_cache;
2114: cc->cc_misses++;
1.43 thorpej 2115:
1.134 ad 2116: /*
2117: * Nothing was available locally. Try and grab a group
2118: * from the cache.
2119: */
1.162 ad 2120: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.134 ad 2121: ncsw = curlwp->l_ncsw;
2122: mutex_enter(&pc->pc_lock);
2123: pc->pc_contended++;
1.43 thorpej 2124:
1.134 ad 2125: /*
2126: * If we context switched while locking, then
2127: * our view of the per-CPU data is invalid:
2128: * retry.
2129: */
2130: if (curlwp->l_ncsw != ncsw) {
2131: mutex_exit(&pc->pc_lock);
1.162 ad 2132: return true;
1.43 thorpej 2133: }
1.102 chs 2134: }
1.43 thorpej 2135:
1.162 ad 2136: if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
1.43 thorpej 2137: /*
1.134 ad 2138: * If there's a full group, release our empty
2139: * group back to the cache. Install the full
2140: * group as cc_current and return.
1.43 thorpej 2141: */
1.162 ad 2142: if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
1.134 ad 2143: KASSERT(cur->pcg_avail == 0);
2144: cur->pcg_next = pc->pc_emptygroups;
2145: pc->pc_emptygroups = cur;
2146: pc->pc_nempty++;
1.87 thorpej 2147: }
1.142 ad 2148: KASSERT(pcg->pcg_avail == pcg->pcg_size);
1.134 ad 2149: cc->cc_current = pcg;
2150: pc->pc_fullgroups = pcg->pcg_next;
2151: pc->pc_hits++;
2152: pc->pc_nfull--;
2153: mutex_exit(&pc->pc_lock);
1.162 ad 2154: return true;
1.134 ad 2155: }
2156:
2157: /*
2158: * Nothing available locally or in cache. Take the slow
2159: * path: fetch a new object from the pool and construct
2160: * it.
2161: */
2162: pc->pc_misses++;
2163: mutex_exit(&pc->pc_lock);
1.162 ad 2164: splx(s);
1.134 ad 2165:
2166: object = pool_get(&pc->pc_pool, flags);
2167: *objectp = object;
1.162 ad 2168: if (__predict_false(object == NULL))
2169: return false;
1.125 ad 2170:
1.162 ad 2171: if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
1.134 ad 2172: pool_put(&pc->pc_pool, object);
2173: *objectp = NULL;
1.162 ad 2174: return false;
1.43 thorpej 2175: }
2176:
1.134 ad 2177: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2178: (pc->pc_pool.pr_align - 1)) == 0);
1.43 thorpej 2179:
1.134 ad 2180: if (pap != NULL) {
2181: #ifdef POOL_VTOPHYS
2182: *pap = POOL_VTOPHYS(object);
2183: #else
2184: *pap = POOL_PADDR_INVALID;
2185: #endif
1.102 chs 2186: }
1.43 thorpej 2187:
1.125 ad 2188: FREECHECK_OUT(&pc->pc_freecheck, object);
1.162 ad 2189: return false;
1.43 thorpej 2190: }
2191:
2192: /*
1.134 ad 2193: * pool_cache_get{,_paddr}:
1.43 thorpej 2194: *
1.134 ad 2195: * Get an object from a pool cache (optionally returning
2196: * the physical address of the object).
1.43 thorpej 2197: */
1.134 ad 2198: void *
2199: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.43 thorpej 2200: {
1.134 ad 2201: pool_cache_cpu_t *cc;
2202: pcg_t *pcg;
2203: void *object;
1.60 thorpej 2204: int s;
1.43 thorpej 2205:
1.184 rmind 2206: KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
1.185 rmind 2207: (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
1.190 jym 2208: "pool '%s' is IPL_NONE, but called from interrupt context\n",
2209: pc->pc_pool.pr_wchan);
1.184 rmind 2210:
1.155 ad 2211: if (flags & PR_WAITOK) {
1.154 yamt 2212: ASSERT_SLEEPABLE();
1.155 ad 2213: }
1.125 ad 2214:
1.162 ad 2215: /* Lock out interrupts and disable preemption. */
2216: s = splvm();
1.165 yamt 2217: while (/* CONSTCOND */ true) {
1.134 ad 2218: /* Try and allocate an object from the current group. */
1.162 ad 2219: cc = pc->pc_cpus[curcpu()->ci_index];
2220: KASSERT(cc->cc_cache == pc);
1.134 ad 2221: pcg = cc->cc_current;
1.162 ad 2222: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2223: object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
1.162 ad 2224: if (__predict_false(pap != NULL))
1.134 ad 2225: *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
1.148 yamt 2226: #if defined(DIAGNOSTIC)
1.134 ad 2227: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
1.163 ad 2228: KASSERT(pcg->pcg_avail < pcg->pcg_size);
1.134 ad 2229: KASSERT(object != NULL);
1.163 ad 2230: #endif
1.134 ad 2231: cc->cc_hits++;
1.162 ad 2232: splx(s);
1.134 ad 2233: FREECHECK_OUT(&pc->pc_freecheck, object);
2234: return object;
1.43 thorpej 2235: }
2236:
2237: /*
1.134 ad 2238: * That failed. If the previous group isn't empty, swap
2239: * it with the current group and allocate from there.
1.43 thorpej 2240: */
1.134 ad 2241: pcg = cc->cc_previous;
1.162 ad 2242: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2243: cc->cc_previous = cc->cc_current;
2244: cc->cc_current = pcg;
2245: continue;
1.43 thorpej 2246: }
2247:
1.134 ad 2248: /*
2249: * Can't allocate from either group: try the slow path.
2250: * If get_slow() allocated an object for us, or if
1.162 ad 2251: * no more objects are available, it will return false.
1.134 ad 2252: * Otherwise, we need to retry.
2253: */
1.165 yamt 2254: if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2255: break;
2256: }
1.43 thorpej 2257:
1.134 ad 2258: return object;
1.51 thorpej 2259: }
2260:
1.162 ad 2261: static bool __noinline
2262: pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
1.51 thorpej 2263: {
1.163 ad 2264: pcg_t *pcg, *cur;
1.134 ad 2265: uint64_t ncsw;
2266: pool_cache_t pc;
1.51 thorpej 2267:
1.168 yamt 2268: KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2269: KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2270:
1.134 ad 2271: pc = cc->cc_cache;
1.171 ad 2272: pcg = NULL;
1.134 ad 2273: cc->cc_misses++;
1.43 thorpej 2274:
1.171 ad 2275: /*
2276: * If there are no empty groups in the cache then allocate one
2277: * while still unlocked.
2278: */
2279: if (__predict_false(pc->pc_emptygroups == NULL)) {
2280: if (__predict_true(!pool_cache_disable)) {
2281: pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2282: }
2283: if (__predict_true(pcg != NULL)) {
2284: pcg->pcg_avail = 0;
2285: pcg->pcg_size = pc->pc_pcgsize;
2286: }
2287: }
2288:
1.162 ad 2289: /* Lock the cache. */
2290: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.164 ad 2291: ncsw = curlwp->l_ncsw;
1.134 ad 2292: mutex_enter(&pc->pc_lock);
2293: pc->pc_contended++;
1.162 ad 2294:
1.163 ad 2295: /*
2296: * If we context switched while locking, then our view of
2297: * the per-CPU data is invalid: retry.
2298: */
2299: if (__predict_false(curlwp->l_ncsw != ncsw)) {
2300: mutex_exit(&pc->pc_lock);
1.171 ad 2301: if (pcg != NULL) {
2302: pool_put(pc->pc_pcgpool, pcg);
2303: }
1.163 ad 2304: return true;
2305: }
1.162 ad 2306: }
1.102 chs 2307:
1.163 ad 2308: /* If there are no empty groups in the cache then allocate one. */
1.171 ad 2309: if (pcg == NULL && pc->pc_emptygroups != NULL) {
2310: pcg = pc->pc_emptygroups;
1.163 ad 2311: pc->pc_emptygroups = pcg->pcg_next;
2312: pc->pc_nempty--;
1.134 ad 2313: }
1.130 ad 2314:
1.162 ad 2315: /*
2316: * If there's a empty group, release our full group back
2317: * to the cache. Install the empty group to the local CPU
2318: * and return.
2319: */
1.163 ad 2320: if (pcg != NULL) {
1.134 ad 2321: KASSERT(pcg->pcg_avail == 0);
1.162 ad 2322: if (__predict_false(cc->cc_previous == &pcg_dummy)) {
1.146 ad 2323: cc->cc_previous = pcg;
2324: } else {
1.162 ad 2325: cur = cc->cc_current;
2326: if (__predict_true(cur != &pcg_dummy)) {
1.163 ad 2327: KASSERT(cur->pcg_avail == cur->pcg_size);
1.146 ad 2328: cur->pcg_next = pc->pc_fullgroups;
2329: pc->pc_fullgroups = cur;
2330: pc->pc_nfull++;
2331: }
2332: cc->cc_current = pcg;
2333: }
1.163 ad 2334: pc->pc_hits++;
1.134 ad 2335: mutex_exit(&pc->pc_lock);
1.162 ad 2336: return true;
1.102 chs 2337: }
1.105 christos 2338:
1.134 ad 2339: /*
1.162 ad 2340: * Nothing available locally or in cache, and we didn't
2341: * allocate an empty group. Take the slow path and destroy
2342: * the object here and now.
1.134 ad 2343: */
2344: pc->pc_misses++;
2345: mutex_exit(&pc->pc_lock);
1.162 ad 2346: splx(s);
2347: pool_cache_destruct_object(pc, object);
1.105 christos 2348:
1.162 ad 2349: return false;
1.134 ad 2350: }
1.102 chs 2351:
1.43 thorpej 2352: /*
1.134 ad 2353: * pool_cache_put{,_paddr}:
1.43 thorpej 2354: *
1.134 ad 2355: * Put an object back to the pool cache (optionally caching the
2356: * physical address of the object).
1.43 thorpej 2357: */
1.101 thorpej 2358: void
1.134 ad 2359: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2360: {
1.134 ad 2361: pool_cache_cpu_t *cc;
2362: pcg_t *pcg;
2363: int s;
1.101 thorpej 2364:
1.172 yamt 2365: KASSERT(object != NULL);
1.134 ad 2366: FREECHECK_IN(&pc->pc_freecheck, object);
1.101 thorpej 2367:
1.162 ad 2368: /* Lock out interrupts and disable preemption. */
2369: s = splvm();
1.165 yamt 2370: while (/* CONSTCOND */ true) {
1.134 ad 2371: /* If the current group isn't full, release it there. */
1.162 ad 2372: cc = pc->pc_cpus[curcpu()->ci_index];
2373: KASSERT(cc->cc_cache == pc);
1.134 ad 2374: pcg = cc->cc_current;
1.162 ad 2375: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2376: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2377: pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2378: pcg->pcg_avail++;
2379: cc->cc_hits++;
1.162 ad 2380: splx(s);
1.134 ad 2381: return;
2382: }
1.43 thorpej 2383:
1.134 ad 2384: /*
1.162 ad 2385: * That failed. If the previous group isn't full, swap
1.134 ad 2386: * it with the current group and try again.
2387: */
2388: pcg = cc->cc_previous;
1.162 ad 2389: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2390: cc->cc_previous = cc->cc_current;
2391: cc->cc_current = pcg;
2392: continue;
2393: }
1.43 thorpej 2394:
1.134 ad 2395: /*
2396: * Can't free to either group: try the slow path.
2397: * If put_slow() releases the object for us, it
1.162 ad 2398: * will return false. Otherwise we need to retry.
1.134 ad 2399: */
1.165 yamt 2400: if (!pool_cache_put_slow(cc, s, object))
2401: break;
2402: }
1.43 thorpej 2403: }
2404:
2405: /*
1.196 jym 2406: * pool_cache_transfer:
1.43 thorpej 2407: *
1.134 ad 2408: * Transfer objects from the per-CPU cache to the global cache.
2409: * Run within a cross-call thread.
1.43 thorpej 2410: */
2411: static void
1.196 jym 2412: pool_cache_transfer(pool_cache_t pc)
1.43 thorpej 2413: {
1.134 ad 2414: pool_cache_cpu_t *cc;
2415: pcg_t *prev, *cur, **list;
1.162 ad 2416: int s;
1.134 ad 2417:
1.162 ad 2418: s = splvm();
2419: mutex_enter(&pc->pc_lock);
2420: cc = pc->pc_cpus[curcpu()->ci_index];
1.134 ad 2421: cur = cc->cc_current;
1.169 yamt 2422: cc->cc_current = __UNCONST(&pcg_dummy);
1.134 ad 2423: prev = cc->cc_previous;
1.169 yamt 2424: cc->cc_previous = __UNCONST(&pcg_dummy);
1.162 ad 2425: if (cur != &pcg_dummy) {
1.142 ad 2426: if (cur->pcg_avail == cur->pcg_size) {
1.134 ad 2427: list = &pc->pc_fullgroups;
2428: pc->pc_nfull++;
2429: } else if (cur->pcg_avail == 0) {
2430: list = &pc->pc_emptygroups;
2431: pc->pc_nempty++;
2432: } else {
2433: list = &pc->pc_partgroups;
2434: pc->pc_npart++;
2435: }
2436: cur->pcg_next = *list;
2437: *list = cur;
2438: }
1.162 ad 2439: if (prev != &pcg_dummy) {
1.142 ad 2440: if (prev->pcg_avail == prev->pcg_size) {
1.134 ad 2441: list = &pc->pc_fullgroups;
2442: pc->pc_nfull++;
2443: } else if (prev->pcg_avail == 0) {
2444: list = &pc->pc_emptygroups;
2445: pc->pc_nempty++;
2446: } else {
2447: list = &pc->pc_partgroups;
2448: pc->pc_npart++;
2449: }
2450: prev->pcg_next = *list;
2451: *list = prev;
2452: }
2453: mutex_exit(&pc->pc_lock);
2454: splx(s);
1.3 pk 2455: }
1.66 thorpej 2456:
2457: /*
2458: * Pool backend allocators.
2459: *
2460: * Each pool has a backend allocator that handles allocation, deallocation,
2461: * and any additional draining that might be needed.
2462: *
2463: * We provide two standard allocators:
2464: *
2465: * pool_allocator_kmem - the default when no allocator is specified
2466: *
2467: * pool_allocator_nointr - used for pools that will not be accessed
2468: * in interrupt context.
2469: */
2470: void *pool_page_alloc(struct pool *, int);
2471: void pool_page_free(struct pool *, void *);
2472:
1.112 bjh21 2473: #ifdef POOL_SUBPAGE
2474: struct pool_allocator pool_allocator_kmem_fullpage = {
1.192 rmind 2475: .pa_alloc = pool_page_alloc,
2476: .pa_free = pool_page_free,
2477: .pa_pagesz = 0
1.112 bjh21 2478: };
2479: #else
1.66 thorpej 2480: struct pool_allocator pool_allocator_kmem = {
1.191 para 2481: .pa_alloc = pool_page_alloc,
2482: .pa_free = pool_page_free,
2483: .pa_pagesz = 0
1.66 thorpej 2484: };
1.112 bjh21 2485: #endif
1.66 thorpej 2486:
1.112 bjh21 2487: #ifdef POOL_SUBPAGE
2488: struct pool_allocator pool_allocator_nointr_fullpage = {
1.194 para 2489: .pa_alloc = pool_page_alloc,
2490: .pa_free = pool_page_free,
1.192 rmind 2491: .pa_pagesz = 0
1.112 bjh21 2492: };
2493: #else
1.66 thorpej 2494: struct pool_allocator pool_allocator_nointr = {
1.191 para 2495: .pa_alloc = pool_page_alloc,
2496: .pa_free = pool_page_free,
2497: .pa_pagesz = 0
1.66 thorpej 2498: };
1.112 bjh21 2499: #endif
1.66 thorpej 2500:
2501: #ifdef POOL_SUBPAGE
2502: void *pool_subpage_alloc(struct pool *, int);
2503: void pool_subpage_free(struct pool *, void *);
2504:
1.112 bjh21 2505: struct pool_allocator pool_allocator_kmem = {
1.193 he 2506: .pa_alloc = pool_subpage_alloc,
2507: .pa_free = pool_subpage_free,
2508: .pa_pagesz = POOL_SUBPAGE
1.112 bjh21 2509: };
2510:
2511: struct pool_allocator pool_allocator_nointr = {
1.192 rmind 2512: .pa_alloc = pool_subpage_alloc,
2513: .pa_free = pool_subpage_free,
2514: .pa_pagesz = POOL_SUBPAGE
1.66 thorpej 2515: };
2516: #endif /* POOL_SUBPAGE */
2517:
1.117 yamt 2518: static void *
2519: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2520: {
1.117 yamt 2521: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2522: void *res;
2523:
1.117 yamt 2524: res = (*pa->pa_alloc)(pp, flags);
2525: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2526: /*
1.117 yamt 2527: * We only run the drain hook here if PR_NOWAIT.
2528: * In other cases, the hook will be run in
2529: * pool_reclaim().
1.66 thorpej 2530: */
1.117 yamt 2531: if (pp->pr_drain_hook != NULL) {
2532: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2533: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2534: }
1.117 yamt 2535: }
2536: return res;
1.66 thorpej 2537: }
2538:
1.117 yamt 2539: static void
1.66 thorpej 2540: pool_allocator_free(struct pool *pp, void *v)
2541: {
2542: struct pool_allocator *pa = pp->pr_alloc;
2543:
2544: (*pa->pa_free)(pp, v);
2545: }
2546:
2547: void *
1.124 yamt 2548: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2549: {
1.192 rmind 2550: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
1.191 para 2551: vmem_addr_t va;
1.192 rmind 2552: int ret;
1.191 para 2553:
1.192 rmind 2554: ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2555: vflags | VM_INSTANTFIT, &va);
1.66 thorpej 2556:
1.192 rmind 2557: return ret ? NULL : (void *)va;
1.66 thorpej 2558: }
2559:
2560: void
1.124 yamt 2561: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2562: {
2563:
1.191 para 2564: uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
1.98 yamt 2565: }
2566:
2567: static void *
1.124 yamt 2568: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2569: {
1.192 rmind 2570: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2571: vmem_addr_t va;
2572: int ret;
1.191 para 2573:
1.192 rmind 2574: ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2575: vflags | VM_INSTANTFIT, &va);
1.98 yamt 2576:
1.192 rmind 2577: return ret ? NULL : (void *)va;
1.98 yamt 2578: }
2579:
2580: static void
1.124 yamt 2581: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2582: {
2583:
1.192 rmind 2584: vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
1.66 thorpej 2585: }
2586:
2587: #ifdef POOL_SUBPAGE
2588: /* Sub-page allocator, for machines with large hardware pages. */
2589: void *
2590: pool_subpage_alloc(struct pool *pp, int flags)
2591: {
1.134 ad 2592: return pool_get(&psppool, flags);
1.66 thorpej 2593: }
2594:
2595: void
2596: pool_subpage_free(struct pool *pp, void *v)
2597: {
2598: pool_put(&psppool, v);
2599: }
2600:
1.112 bjh21 2601: #endif /* POOL_SUBPAGE */
1.141 yamt 2602:
2603: #if defined(DDB)
2604: static bool
2605: pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2606: {
2607:
2608: return (uintptr_t)ph->ph_page <= addr &&
2609: addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2610: }
2611:
1.143 yamt 2612: static bool
2613: pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2614: {
2615:
2616: return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2617: }
2618:
2619: static bool
2620: pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2621: {
2622: int i;
2623:
2624: if (pcg == NULL) {
2625: return false;
2626: }
1.144 yamt 2627: for (i = 0; i < pcg->pcg_avail; i++) {
1.143 yamt 2628: if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2629: return true;
2630: }
2631: }
2632: return false;
2633: }
2634:
2635: static bool
2636: pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2637: {
2638:
2639: if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2640: unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2641: pool_item_bitmap_t *bitmap =
2642: ph->ph_bitmap + (idx / BITMAP_SIZE);
2643: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2644:
2645: return (*bitmap & mask) == 0;
2646: } else {
2647: struct pool_item *pi;
2648:
2649: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2650: if (pool_in_item(pp, pi, addr)) {
2651: return false;
2652: }
2653: }
2654: return true;
2655: }
2656: }
2657:
1.141 yamt 2658: void
2659: pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2660: {
2661: struct pool *pp;
2662:
1.145 ad 2663: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.141 yamt 2664: struct pool_item_header *ph;
2665: uintptr_t item;
1.143 yamt 2666: bool allocated = true;
2667: bool incache = false;
2668: bool incpucache = false;
2669: char cpucachestr[32];
1.141 yamt 2670:
2671: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2672: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2673: if (pool_in_page(pp, ph, addr)) {
2674: goto found;
2675: }
2676: }
2677: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2678: if (pool_in_page(pp, ph, addr)) {
1.143 yamt 2679: allocated =
2680: pool_allocated(pp, ph, addr);
2681: goto found;
2682: }
2683: }
2684: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2685: if (pool_in_page(pp, ph, addr)) {
2686: allocated = false;
1.141 yamt 2687: goto found;
2688: }
2689: }
2690: continue;
2691: } else {
2692: ph = pr_find_pagehead_noalign(pp, (void *)addr);
2693: if (ph == NULL || !pool_in_page(pp, ph, addr)) {
2694: continue;
2695: }
1.143 yamt 2696: allocated = pool_allocated(pp, ph, addr);
1.141 yamt 2697: }
2698: found:
1.143 yamt 2699: if (allocated && pp->pr_cache) {
2700: pool_cache_t pc = pp->pr_cache;
2701: struct pool_cache_group *pcg;
2702: int i;
2703:
2704: for (pcg = pc->pc_fullgroups; pcg != NULL;
2705: pcg = pcg->pcg_next) {
2706: if (pool_in_cg(pp, pcg, addr)) {
2707: incache = true;
2708: goto print;
2709: }
2710: }
1.183 ad 2711: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.143 yamt 2712: pool_cache_cpu_t *cc;
2713:
2714: if ((cc = pc->pc_cpus[i]) == NULL) {
2715: continue;
2716: }
2717: if (pool_in_cg(pp, cc->cc_current, addr) ||
2718: pool_in_cg(pp, cc->cc_previous, addr)) {
2719: struct cpu_info *ci =
1.170 ad 2720: cpu_lookup(i);
1.143 yamt 2721:
2722: incpucache = true;
2723: snprintf(cpucachestr,
2724: sizeof(cpucachestr),
2725: "cached by CPU %u",
1.153 martin 2726: ci->ci_index);
1.143 yamt 2727: goto print;
2728: }
2729: }
2730: }
2731: print:
1.141 yamt 2732: item = (uintptr_t)ph->ph_page + ph->ph_off;
2733: item = item + rounddown(addr - item, pp->pr_size);
1.143 yamt 2734: (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
1.141 yamt 2735: (void *)addr, item, (size_t)(addr - item),
1.143 yamt 2736: pp->pr_wchan,
2737: incpucache ? cpucachestr :
2738: incache ? "cached" : allocated ? "allocated" : "free");
1.141 yamt 2739: }
2740: }
2741: #endif /* defined(DDB) */
CVSweb <webmaster@jp.NetBSD.org>