Annotation of src/sys/kern/subr_pool.c, Revision 1.195
1.195 ! rmind 1: /* $NetBSD: subr_pool.c,v 1.194 2012/02/04 22:11:42 para Exp $ */
1.1 pk 2:
3: /*-
1.183 ad 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010
5: * The NetBSD Foundation, Inc.
1.1 pk 6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 9: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.134 ad 10: * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
1.1 pk 11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: *
21: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31: * POSSIBILITY OF SUCH DAMAGE.
32: */
1.64 lukem 33:
34: #include <sys/cdefs.h>
1.195 ! rmind 35: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.194 2012/02/04 22:11:42 para Exp $");
1.24 scottr 36:
1.141 yamt 37: #include "opt_ddb.h"
1.28 thorpej 38: #include "opt_lockdebug.h"
1.1 pk 39:
40: #include <sys/param.h>
41: #include <sys/systm.h>
1.135 yamt 42: #include <sys/bitops.h>
1.1 pk 43: #include <sys/proc.h>
44: #include <sys/errno.h>
45: #include <sys/kernel.h>
1.191 para 46: #include <sys/vmem.h>
1.1 pk 47: #include <sys/pool.h>
1.20 thorpej 48: #include <sys/syslog.h>
1.125 ad 49: #include <sys/debug.h>
1.134 ad 50: #include <sys/lockdebug.h>
51: #include <sys/xcall.h>
52: #include <sys/cpu.h>
1.145 ad 53: #include <sys/atomic.h>
1.3 pk 54:
1.187 uebayasi 55: #include <uvm/uvm_extern.h>
1.3 pk 56:
1.1 pk 57: /*
58: * Pool resource management utility.
1.3 pk 59: *
1.88 chs 60: * Memory is allocated in pages which are split into pieces according to
61: * the pool item size. Each page is kept on one of three lists in the
62: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
63: * for empty, full and partially-full pages respectively. The individual
64: * pool items are on a linked list headed by `ph_itemlist' in each page
65: * header. The memory for building the page list is either taken from
66: * the allocated pages themselves (for small pool items) or taken from
67: * an internal pool of page headers (`phpool').
1.1 pk 68: */
69:
1.3 pk 70: /* List of all pools */
1.173 rmind 71: static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.134 ad 72:
1.3 pk 73: /* Private pool for page header structures */
1.97 yamt 74: #define PHPOOL_MAX 8
75: static struct pool phpool[PHPOOL_MAX];
1.135 yamt 76: #define PHPOOL_FREELIST_NELEM(idx) \
77: (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
1.3 pk 78:
1.62 bjh21 79: #ifdef POOL_SUBPAGE
80: /* Pool of subpages for use by normal pools. */
81: static struct pool psppool;
82: #endif
83:
1.98 yamt 84: static void *pool_page_alloc_meta(struct pool *, int);
85: static void pool_page_free_meta(struct pool *, void *);
86:
87: /* allocator for pool metadata */
1.134 ad 88: struct pool_allocator pool_allocator_meta = {
1.191 para 89: .pa_alloc = pool_page_alloc_meta,
90: .pa_free = pool_page_free_meta,
91: .pa_pagesz = 0
1.98 yamt 92: };
93:
1.3 pk 94: /* # of seconds to retain page after last use */
95: int pool_inactive_time = 10;
96:
97: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 98: static struct pool *drainpp;
99:
1.134 ad 100: /* This lock protects both pool_head and drainpp. */
101: static kmutex_t pool_head_lock;
102: static kcondvar_t pool_busy;
1.3 pk 103:
1.178 elad 104: /* This lock protects initialization of a potentially shared pool allocator */
105: static kmutex_t pool_allocator_lock;
106:
1.135 yamt 107: typedef uint32_t pool_item_bitmap_t;
108: #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
109: #define BITMAP_MASK (BITMAP_SIZE - 1)
1.99 yamt 110:
1.3 pk 111: struct pool_item_header {
112: /* Page headers */
1.88 chs 113: LIST_ENTRY(pool_item_header)
1.3 pk 114: ph_pagelist; /* pool page list */
1.88 chs 115: SPLAY_ENTRY(pool_item_header)
116: ph_node; /* Off-page page headers */
1.128 christos 117: void * ph_page; /* this page's address */
1.151 yamt 118: uint32_t ph_time; /* last referenced */
1.135 yamt 119: uint16_t ph_nmissing; /* # of chunks in use */
1.141 yamt 120: uint16_t ph_off; /* start offset in page */
1.97 yamt 121: union {
122: /* !PR_NOTOUCH */
123: struct {
1.102 chs 124: LIST_HEAD(, pool_item)
1.97 yamt 125: phu_itemlist; /* chunk list for this page */
126: } phu_normal;
127: /* PR_NOTOUCH */
128: struct {
1.141 yamt 129: pool_item_bitmap_t phu_bitmap[1];
1.97 yamt 130: } phu_notouch;
131: } ph_u;
1.3 pk 132: };
1.97 yamt 133: #define ph_itemlist ph_u.phu_normal.phu_itemlist
1.135 yamt 134: #define ph_bitmap ph_u.phu_notouch.phu_bitmap
1.3 pk 135:
1.1 pk 136: struct pool_item {
1.3 pk 137: #ifdef DIAGNOSTIC
1.82 thorpej 138: u_int pi_magic;
1.33 chs 139: #endif
1.134 ad 140: #define PI_MAGIC 0xdeaddeadU
1.3 pk 141: /* Other entries use only this list entry */
1.102 chs 142: LIST_ENTRY(pool_item) pi_list;
1.3 pk 143: };
144:
1.53 thorpej 145: #define POOL_NEEDS_CATCHUP(pp) \
146: ((pp)->pr_nitems < (pp)->pr_minitems)
147:
1.43 thorpej 148: /*
149: * Pool cache management.
150: *
151: * Pool caches provide a way for constructed objects to be cached by the
152: * pool subsystem. This can lead to performance improvements by avoiding
153: * needless object construction/destruction; it is deferred until absolutely
154: * necessary.
155: *
1.134 ad 156: * Caches are grouped into cache groups. Each cache group references up
157: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
158: * object from the pool, it calls the object's constructor and places it
159: * into a cache group. When a cache group frees an object back to the
160: * pool, it first calls the object's destructor. This allows the object
161: * to persist in constructed form while freed to the cache.
162: *
163: * The pool references each cache, so that when a pool is drained by the
164: * pagedaemon, it can drain each individual cache as well. Each time a
165: * cache is drained, the most idle cache group is freed to the pool in
166: * its entirety.
1.43 thorpej 167: *
168: * Pool caches are layed on top of pools. By layering them, we can avoid
169: * the complexity of cache management for pools which would not benefit
170: * from it.
171: */
172:
1.142 ad 173: static struct pool pcg_normal_pool;
174: static struct pool pcg_large_pool;
1.134 ad 175: static struct pool cache_pool;
176: static struct pool cache_cpu_pool;
1.3 pk 177:
1.189 pooka 178: pool_cache_t pnbuf_cache; /* pathname buffer cache */
179:
1.145 ad 180: /* List of all caches. */
181: TAILQ_HEAD(,pool_cache) pool_cache_head =
182: TAILQ_HEAD_INITIALIZER(pool_cache_head);
183:
1.162 ad 184: int pool_cache_disable; /* global disable for caching */
1.169 yamt 185: static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */
1.145 ad 186:
1.162 ad 187: static bool pool_cache_put_slow(pool_cache_cpu_t *, int,
188: void *);
189: static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
190: void **, paddr_t *, int);
1.134 ad 191: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
192: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
1.175 jym 193: static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
1.134 ad 194: static void pool_cache_xcall(pool_cache_t);
1.3 pk 195:
1.42 thorpej 196: static int pool_catchup(struct pool *);
1.128 christos 197: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 198: struct pool_item_header *);
1.88 chs 199: static void pool_update_curpage(struct pool *);
1.66 thorpej 200:
1.113 yamt 201: static int pool_grow(struct pool *, int);
1.117 yamt 202: static void *pool_allocator_alloc(struct pool *, int);
203: static void pool_allocator_free(struct pool *, void *);
1.3 pk 204:
1.97 yamt 205: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 206: void (*)(const char *, ...));
1.42 thorpej 207: static void pool_print1(struct pool *, const char *,
208: void (*)(const char *, ...));
1.3 pk 209:
1.88 chs 210: static int pool_chk_page(struct pool *, const char *,
211: struct pool_item_header *);
212:
1.135 yamt 213: static inline unsigned int
1.97 yamt 214: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
215: const void *v)
216: {
217: const char *cp = v;
1.135 yamt 218: unsigned int idx;
1.97 yamt 219:
220: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 221: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 222: KASSERT(idx < pp->pr_itemsperpage);
223: return idx;
224: }
225:
1.110 perry 226: static inline void
1.97 yamt 227: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
228: void *obj)
229: {
1.135 yamt 230: unsigned int idx = pr_item_notouch_index(pp, ph, obj);
231: pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
232: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
1.97 yamt 233:
1.135 yamt 234: KASSERT((*bitmap & mask) == 0);
235: *bitmap |= mask;
1.97 yamt 236: }
237:
1.110 perry 238: static inline void *
1.97 yamt 239: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
240: {
1.135 yamt 241: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
242: unsigned int idx;
243: int i;
1.97 yamt 244:
1.135 yamt 245: for (i = 0; ; i++) {
246: int bit;
1.97 yamt 247:
1.135 yamt 248: KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
249: bit = ffs32(bitmap[i]);
250: if (bit) {
251: pool_item_bitmap_t mask;
252:
253: bit--;
254: idx = (i * BITMAP_SIZE) + bit;
255: mask = 1 << bit;
256: KASSERT((bitmap[i] & mask) != 0);
257: bitmap[i] &= ~mask;
258: break;
259: }
260: }
261: KASSERT(idx < pp->pr_itemsperpage);
1.128 christos 262: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 263: }
264:
1.135 yamt 265: static inline void
1.141 yamt 266: pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
1.135 yamt 267: {
268: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
269: const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
270: int i;
271:
272: for (i = 0; i < n; i++) {
273: bitmap[i] = (pool_item_bitmap_t)-1;
274: }
275: }
276:
1.110 perry 277: static inline int
1.88 chs 278: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
279: {
1.121 yamt 280:
281: /*
282: * we consider pool_item_header with smaller ph_page bigger.
283: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
284: */
285:
1.88 chs 286: if (a->ph_page < b->ph_page)
1.121 yamt 287: return (1);
288: else if (a->ph_page > b->ph_page)
1.88 chs 289: return (-1);
290: else
291: return (0);
292: }
293:
294: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
295: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
296:
1.141 yamt 297: static inline struct pool_item_header *
298: pr_find_pagehead_noalign(struct pool *pp, void *v)
299: {
300: struct pool_item_header *ph, tmp;
301:
302: tmp.ph_page = (void *)(uintptr_t)v;
303: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
304: if (ph == NULL) {
305: ph = SPLAY_ROOT(&pp->pr_phtree);
306: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
307: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
308: }
309: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
310: }
311:
312: return ph;
313: }
314:
1.3 pk 315: /*
1.121 yamt 316: * Return the pool page header based on item address.
1.3 pk 317: */
1.110 perry 318: static inline struct pool_item_header *
1.121 yamt 319: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 320: {
1.88 chs 321: struct pool_item_header *ph, tmp;
1.3 pk 322:
1.121 yamt 323: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.141 yamt 324: ph = pr_find_pagehead_noalign(pp, v);
1.121 yamt 325: } else {
1.128 christos 326: void *page =
327: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 328:
329: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 330: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 331: } else {
332: tmp.ph_page = page;
333: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
334: }
335: }
1.3 pk 336:
1.121 yamt 337: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 338: ((char *)ph->ph_page <= (char *)v &&
339: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 340: return ph;
1.3 pk 341: }
342:
1.101 thorpej 343: static void
344: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
345: {
346: struct pool_item_header *ph;
347:
348: while ((ph = LIST_FIRST(pq)) != NULL) {
349: LIST_REMOVE(ph, ph_pagelist);
350: pool_allocator_free(pp, ph->ph_page);
1.134 ad 351: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 352: pool_put(pp->pr_phpool, ph);
353: }
354: }
355:
1.3 pk 356: /*
357: * Remove a page from the pool.
358: */
1.110 perry 359: static inline void
1.61 chs 360: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
361: struct pool_pagelist *pq)
1.3 pk 362: {
363:
1.134 ad 364: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 365:
1.3 pk 366: /*
1.7 thorpej 367: * If the page was idle, decrement the idle page count.
1.3 pk 368: */
1.6 thorpej 369: if (ph->ph_nmissing == 0) {
370: #ifdef DIAGNOSTIC
371: if (pp->pr_nidle == 0)
372: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 373: if (pp->pr_nitems < pp->pr_itemsperpage)
374: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 375: #endif
376: pp->pr_nidle--;
377: }
1.7 thorpej 378:
1.20 thorpej 379: pp->pr_nitems -= pp->pr_itemsperpage;
380:
1.7 thorpej 381: /*
1.101 thorpej 382: * Unlink the page from the pool and queue it for release.
1.7 thorpej 383: */
1.88 chs 384: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 385: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
386: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 387: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
388:
1.7 thorpej 389: pp->pr_npages--;
390: pp->pr_npagefree++;
1.6 thorpej 391:
1.88 chs 392: pool_update_curpage(pp);
1.3 pk 393: }
394:
395: /*
1.94 simonb 396: * Initialize all the pools listed in the "pools" link set.
397: */
398: void
1.117 yamt 399: pool_subsystem_init(void)
1.94 simonb 400: {
1.192 rmind 401: size_t size;
1.191 para 402: int idx;
1.94 simonb 403:
1.134 ad 404: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
1.179 mlelstv 405: mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
1.134 ad 406: cv_init(&pool_busy, "poolbusy");
407:
1.191 para 408: /*
409: * Initialize private page header pool and cache magazine pool if we
410: * haven't done so yet.
411: */
412: for (idx = 0; idx < PHPOOL_MAX; idx++) {
413: static char phpool_names[PHPOOL_MAX][6+1+6+1];
414: int nelem;
415: size_t sz;
416:
417: nelem = PHPOOL_FREELIST_NELEM(idx);
418: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
419: "phpool-%d", nelem);
420: sz = sizeof(struct pool_item_header);
421: if (nelem) {
422: sz = offsetof(struct pool_item_header,
423: ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
424: }
425: pool_init(&phpool[idx], sz, 0, 0, 0,
426: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.117 yamt 427: }
1.191 para 428: #ifdef POOL_SUBPAGE
429: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
430: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
431: #endif
432:
433: size = sizeof(pcg_t) +
434: (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
435: pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
436: "pcgnormal", &pool_allocator_meta, IPL_VM);
437:
438: size = sizeof(pcg_t) +
439: (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
440: pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
441: "pcglarge", &pool_allocator_meta, IPL_VM);
1.134 ad 442:
1.156 ad 443: pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
1.191 para 444: 0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
1.134 ad 445:
1.156 ad 446: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
1.191 para 447: 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
1.94 simonb 448: }
449:
450: /*
1.3 pk 451: * Initialize the given pool resource structure.
452: *
453: * We export this routine to allow other kernel parts to declare
1.195 ! rmind 454: * static pools that must be initialized before kmem(9) is available.
1.3 pk 455: */
456: void
1.42 thorpej 457: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.129 ad 458: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 459: {
1.116 simonb 460: struct pool *pp1;
1.92 enami 461: size_t trysize, phsize;
1.134 ad 462: int off, slack;
1.3 pk 463:
1.116 simonb 464: #ifdef DEBUG
465: /*
466: * Check that the pool hasn't already been initialised and
467: * added to the list of all pools.
468: */
1.145 ad 469: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
1.116 simonb 470: if (pp == pp1)
471: panic("pool_init: pool %s already initialised",
472: wchan);
473: }
474: #endif
475:
1.66 thorpej 476: if (palloc == NULL)
477: palloc = &pool_allocator_kmem;
1.112 bjh21 478: #ifdef POOL_SUBPAGE
479: if (size > palloc->pa_pagesz) {
480: if (palloc == &pool_allocator_kmem)
481: palloc = &pool_allocator_kmem_fullpage;
482: else if (palloc == &pool_allocator_nointr)
483: palloc = &pool_allocator_nointr_fullpage;
484: }
1.66 thorpej 485: #endif /* POOL_SUBPAGE */
1.180 mlelstv 486: if (!cold)
487: mutex_enter(&pool_allocator_lock);
1.178 elad 488: if (palloc->pa_refcnt++ == 0) {
1.112 bjh21 489: if (palloc->pa_pagesz == 0)
1.66 thorpej 490: palloc->pa_pagesz = PAGE_SIZE;
491:
492: TAILQ_INIT(&palloc->pa_list);
493:
1.134 ad 494: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 495: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
496: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.4 thorpej 497: }
1.180 mlelstv 498: if (!cold)
499: mutex_exit(&pool_allocator_lock);
1.3 pk 500:
501: if (align == 0)
502: align = ALIGN(1);
1.14 thorpej 503:
1.120 yamt 504: if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
1.14 thorpej 505: size = sizeof(struct pool_item);
1.3 pk 506:
1.78 thorpej 507: size = roundup(size, align);
1.66 thorpej 508: #ifdef DIAGNOSTIC
509: if (size > palloc->pa_pagesz)
1.121 yamt 510: panic("pool_init: pool item size (%zu) too large", size);
1.66 thorpej 511: #endif
1.35 pk 512:
1.3 pk 513: /*
514: * Initialize the pool structure.
515: */
1.88 chs 516: LIST_INIT(&pp->pr_emptypages);
517: LIST_INIT(&pp->pr_fullpages);
518: LIST_INIT(&pp->pr_partpages);
1.134 ad 519: pp->pr_cache = NULL;
1.3 pk 520: pp->pr_curpage = NULL;
521: pp->pr_npages = 0;
522: pp->pr_minitems = 0;
523: pp->pr_minpages = 0;
524: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 525: pp->pr_roflags = flags;
526: pp->pr_flags = 0;
1.35 pk 527: pp->pr_size = size;
1.3 pk 528: pp->pr_align = align;
529: pp->pr_wchan = wchan;
1.66 thorpej 530: pp->pr_alloc = palloc;
1.20 thorpej 531: pp->pr_nitems = 0;
532: pp->pr_nout = 0;
533: pp->pr_hardlimit = UINT_MAX;
534: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 535: pp->pr_hardlimit_ratecap.tv_sec = 0;
536: pp->pr_hardlimit_ratecap.tv_usec = 0;
537: pp->pr_hardlimit_warning_last.tv_sec = 0;
538: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 539: pp->pr_drain_hook = NULL;
540: pp->pr_drain_hook_arg = NULL;
1.125 ad 541: pp->pr_freecheck = NULL;
1.3 pk 542:
543: /*
544: * Decide whether to put the page header off page to avoid
1.92 enami 545: * wasting too large a part of the page or too big item.
546: * Off-page page headers go on a hash table, so we can match
547: * a returned item with its header based on the page address.
548: * We use 1/16 of the page size and about 8 times of the item
549: * size as the threshold (XXX: tune)
550: *
551: * However, we'll put the header into the page if we can put
552: * it without wasting any items.
553: *
554: * Silently enforce `0 <= ioff < align'.
1.3 pk 555: */
1.92 enami 556: pp->pr_itemoffset = ioff %= align;
557: /* See the comment below about reserved bytes. */
558: trysize = palloc->pa_pagesz - ((align - ioff) % align);
559: phsize = ALIGN(sizeof(struct pool_item_header));
1.121 yamt 560: if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 561: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
562: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 563: /* Use the end of the page for the page header */
1.20 thorpej 564: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 565: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 566: } else {
1.3 pk 567: /* The page header will be taken from our page header pool */
568: pp->pr_phoffset = 0;
1.66 thorpej 569: off = palloc->pa_pagesz;
1.88 chs 570: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 571: }
1.1 pk 572:
1.3 pk 573: /*
574: * Alignment is to take place at `ioff' within the item. This means
575: * we must reserve up to `align - 1' bytes on the page to allow
576: * appropriate positioning of each item.
577: */
578: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 579: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 580: if ((pp->pr_roflags & PR_NOTOUCH)) {
581: int idx;
582:
583: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
584: idx++) {
585: /* nothing */
586: }
587: if (idx >= PHPOOL_MAX) {
588: /*
589: * if you see this panic, consider to tweak
590: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
591: */
592: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
593: pp->pr_wchan, pp->pr_itemsperpage);
594: }
595: pp->pr_phpool = &phpool[idx];
596: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
597: pp->pr_phpool = &phpool[0];
598: }
599: #if defined(DIAGNOSTIC)
600: else {
601: pp->pr_phpool = NULL;
602: }
603: #endif
1.3 pk 604:
605: /*
606: * Use the slack between the chunks and the page header
607: * for "cache coloring".
608: */
609: slack = off - pp->pr_itemsperpage * pp->pr_size;
610: pp->pr_maxcolor = (slack / align) * align;
611: pp->pr_curcolor = 0;
612:
613: pp->pr_nget = 0;
614: pp->pr_nfail = 0;
615: pp->pr_nput = 0;
616: pp->pr_npagealloc = 0;
617: pp->pr_npagefree = 0;
1.1 pk 618: pp->pr_hiwat = 0;
1.8 thorpej 619: pp->pr_nidle = 0;
1.134 ad 620: pp->pr_refcnt = 0;
1.3 pk 621:
1.157 ad 622: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
1.134 ad 623: cv_init(&pp->pr_cv, wchan);
624: pp->pr_ipl = ipl;
1.1 pk 625:
1.145 ad 626: /* Insert into the list of all pools. */
1.181 mlelstv 627: if (!cold)
1.134 ad 628: mutex_enter(&pool_head_lock);
1.145 ad 629: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
630: if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
631: break;
632: }
633: if (pp1 == NULL)
634: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
635: else
636: TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
1.181 mlelstv 637: if (!cold)
1.134 ad 638: mutex_exit(&pool_head_lock);
639:
1.167 skrll 640: /* Insert this into the list of pools using this allocator. */
1.181 mlelstv 641: if (!cold)
1.134 ad 642: mutex_enter(&palloc->pa_lock);
1.145 ad 643: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
1.181 mlelstv 644: if (!cold)
1.134 ad 645: mutex_exit(&palloc->pa_lock);
1.1 pk 646: }
647:
648: /*
649: * De-commision a pool resource.
650: */
651: void
1.42 thorpej 652: pool_destroy(struct pool *pp)
1.1 pk 653: {
1.101 thorpej 654: struct pool_pagelist pq;
1.3 pk 655: struct pool_item_header *ph;
1.43 thorpej 656:
1.101 thorpej 657: /* Remove from global pool list */
1.134 ad 658: mutex_enter(&pool_head_lock);
659: while (pp->pr_refcnt != 0)
660: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 661: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.101 thorpej 662: if (drainpp == pp)
663: drainpp = NULL;
1.134 ad 664: mutex_exit(&pool_head_lock);
1.101 thorpej 665:
666: /* Remove this pool from its allocator's list of pools. */
1.134 ad 667: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 668: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.134 ad 669: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 670:
1.178 elad 671: mutex_enter(&pool_allocator_lock);
672: if (--pp->pr_alloc->pa_refcnt == 0)
673: mutex_destroy(&pp->pr_alloc->pa_lock);
674: mutex_exit(&pool_allocator_lock);
675:
1.134 ad 676: mutex_enter(&pp->pr_lock);
1.101 thorpej 677:
1.134 ad 678: KASSERT(pp->pr_cache == NULL);
1.3 pk 679:
680: #ifdef DIAGNOSTIC
1.20 thorpej 681: if (pp->pr_nout != 0) {
1.80 provos 682: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 683: pp->pr_nout);
1.3 pk 684: }
685: #endif
1.1 pk 686:
1.101 thorpej 687: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
688: KASSERT(LIST_EMPTY(&pp->pr_partpages));
689:
1.3 pk 690: /* Remove all pages */
1.101 thorpej 691: LIST_INIT(&pq);
1.88 chs 692: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 693: pr_rmpage(pp, ph, &pq);
694:
1.134 ad 695: mutex_exit(&pp->pr_lock);
1.3 pk 696:
1.101 thorpej 697: pr_pagelist_free(pp, &pq);
1.134 ad 698: cv_destroy(&pp->pr_cv);
699: mutex_destroy(&pp->pr_lock);
1.1 pk 700: }
701:
1.68 thorpej 702: void
703: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
704: {
705:
706: /* XXX no locking -- must be used just after pool_init() */
707: #ifdef DIAGNOSTIC
708: if (pp->pr_drain_hook != NULL)
709: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
710: #endif
711: pp->pr_drain_hook = fn;
712: pp->pr_drain_hook_arg = arg;
713: }
714:
1.88 chs 715: static struct pool_item_header *
1.128 christos 716: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 717: {
718: struct pool_item_header *ph;
719:
720: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 721: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.134 ad 722: else
1.97 yamt 723: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 724:
725: return (ph);
726: }
1.1 pk 727:
728: /*
1.134 ad 729: * Grab an item from the pool.
1.1 pk 730: */
1.3 pk 731: void *
1.56 sommerfe 732: pool_get(struct pool *pp, int flags)
1.1 pk 733: {
734: struct pool_item *pi;
1.3 pk 735: struct pool_item_header *ph;
1.55 thorpej 736: void *v;
1.1 pk 737:
1.2 pk 738: #ifdef DIAGNOSTIC
1.184 rmind 739: if (pp->pr_itemsperpage == 0)
740: panic("pool_get: pool '%s': pr_itemsperpage is zero, "
741: "pool not initialized?", pp->pr_wchan);
1.185 rmind 742: if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE &&
743: !cold && panicstr == NULL)
1.184 rmind 744: panic("pool '%s' is IPL_NONE, but called from "
745: "interrupt context\n", pp->pr_wchan);
746: #endif
1.155 ad 747: if (flags & PR_WAITOK) {
1.154 yamt 748: ASSERT_SLEEPABLE();
1.155 ad 749: }
1.1 pk 750:
1.134 ad 751: mutex_enter(&pp->pr_lock);
1.20 thorpej 752: startover:
753: /*
754: * Check to see if we've reached the hard limit. If we have,
755: * and we can wait, then wait until an item has been returned to
756: * the pool.
757: */
758: #ifdef DIAGNOSTIC
1.34 thorpej 759: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.134 ad 760: mutex_exit(&pp->pr_lock);
1.20 thorpej 761: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
762: }
763: #endif
1.34 thorpej 764: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 765: if (pp->pr_drain_hook != NULL) {
766: /*
767: * Since the drain hook is going to free things
768: * back to the pool, unlock, call the hook, re-lock,
769: * and check the hardlimit condition again.
770: */
1.134 ad 771: mutex_exit(&pp->pr_lock);
1.68 thorpej 772: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.134 ad 773: mutex_enter(&pp->pr_lock);
1.68 thorpej 774: if (pp->pr_nout < pp->pr_hardlimit)
775: goto startover;
776: }
777:
1.29 sommerfe 778: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 779: /*
780: * XXX: A warning isn't logged in this case. Should
781: * it be?
782: */
783: pp->pr_flags |= PR_WANTED;
1.134 ad 784: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.20 thorpej 785: goto startover;
786: }
1.31 thorpej 787:
788: /*
789: * Log a message that the hard limit has been hit.
790: */
791: if (pp->pr_hardlimit_warning != NULL &&
792: ratecheck(&pp->pr_hardlimit_warning_last,
793: &pp->pr_hardlimit_ratecap))
794: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 795:
796: pp->pr_nfail++;
797:
1.134 ad 798: mutex_exit(&pp->pr_lock);
1.20 thorpej 799: return (NULL);
800: }
801:
1.3 pk 802: /*
803: * The convention we use is that if `curpage' is not NULL, then
804: * it points at a non-empty bucket. In particular, `curpage'
805: * never points at a page header which has PR_PHINPAGE set and
806: * has no items in its bucket.
807: */
1.20 thorpej 808: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 809: int error;
810:
1.20 thorpej 811: #ifdef DIAGNOSTIC
812: if (pp->pr_nitems != 0) {
1.134 ad 813: mutex_exit(&pp->pr_lock);
1.20 thorpej 814: printf("pool_get: %s: curpage NULL, nitems %u\n",
815: pp->pr_wchan, pp->pr_nitems);
1.80 provos 816: panic("pool_get: nitems inconsistent");
1.20 thorpej 817: }
818: #endif
819:
1.21 thorpej 820: /*
821: * Call the back-end page allocator for more memory.
822: * Release the pool lock, as the back-end page allocator
823: * may block.
824: */
1.113 yamt 825: error = pool_grow(pp, flags);
826: if (error != 0) {
1.21 thorpej 827: /*
1.55 thorpej 828: * We were unable to allocate a page or item
829: * header, but we released the lock during
830: * allocation, so perhaps items were freed
831: * back to the pool. Check for this case.
1.21 thorpej 832: */
833: if (pp->pr_curpage != NULL)
834: goto startover;
1.15 pk 835:
1.117 yamt 836: pp->pr_nfail++;
1.134 ad 837: mutex_exit(&pp->pr_lock);
1.117 yamt 838: return (NULL);
1.1 pk 839: }
1.3 pk 840:
1.20 thorpej 841: /* Start the allocation process over. */
842: goto startover;
1.3 pk 843: }
1.97 yamt 844: if (pp->pr_roflags & PR_NOTOUCH) {
845: #ifdef DIAGNOSTIC
846: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1.134 ad 847: mutex_exit(&pp->pr_lock);
1.97 yamt 848: panic("pool_get: %s: page empty", pp->pr_wchan);
849: }
850: #endif
851: v = pr_item_notouch_get(pp, ph);
852: } else {
1.102 chs 853: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 854: if (__predict_false(v == NULL)) {
1.134 ad 855: mutex_exit(&pp->pr_lock);
1.97 yamt 856: panic("pool_get: %s: page empty", pp->pr_wchan);
857: }
1.20 thorpej 858: #ifdef DIAGNOSTIC
1.97 yamt 859: if (__predict_false(pp->pr_nitems == 0)) {
1.134 ad 860: mutex_exit(&pp->pr_lock);
1.97 yamt 861: printf("pool_get: %s: items on itemlist, nitems %u\n",
862: pp->pr_wchan, pp->pr_nitems);
863: panic("pool_get: nitems inconsistent");
864: }
1.65 enami 865: #endif
1.56 sommerfe 866:
1.65 enami 867: #ifdef DIAGNOSTIC
1.97 yamt 868: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
869: panic("pool_get(%s): free list modified: "
870: "magic=%x; page %p; item addr %p\n",
871: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
872: }
1.3 pk 873: #endif
874:
1.97 yamt 875: /*
876: * Remove from item list.
877: */
1.102 chs 878: LIST_REMOVE(pi, pi_list);
1.97 yamt 879: }
1.20 thorpej 880: pp->pr_nitems--;
881: pp->pr_nout++;
1.6 thorpej 882: if (ph->ph_nmissing == 0) {
883: #ifdef DIAGNOSTIC
1.34 thorpej 884: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 885: panic("pool_get: nidle inconsistent");
886: #endif
887: pp->pr_nidle--;
1.88 chs 888:
889: /*
890: * This page was previously empty. Move it to the list of
891: * partially-full pages. This page is already curpage.
892: */
893: LIST_REMOVE(ph, ph_pagelist);
894: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 895: }
1.3 pk 896: ph->ph_nmissing++;
1.97 yamt 897: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 898: #ifdef DIAGNOSTIC
1.97 yamt 899: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 900: !LIST_EMPTY(&ph->ph_itemlist))) {
1.134 ad 901: mutex_exit(&pp->pr_lock);
1.21 thorpej 902: panic("pool_get: %s: nmissing inconsistent",
903: pp->pr_wchan);
904: }
905: #endif
1.3 pk 906: /*
1.88 chs 907: * This page is now full. Move it to the full list
908: * and select a new current page.
1.3 pk 909: */
1.88 chs 910: LIST_REMOVE(ph, ph_pagelist);
911: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
912: pool_update_curpage(pp);
1.1 pk 913: }
1.3 pk 914:
915: pp->pr_nget++;
1.20 thorpej 916:
917: /*
918: * If we have a low water mark and we are now below that low
919: * water mark, add more items to the pool.
920: */
1.53 thorpej 921: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 922: /*
923: * XXX: Should we log a warning? Should we set up a timeout
924: * to try again in a second or so? The latter could break
925: * a caller's assumptions about interrupt protection, etc.
926: */
927: }
928:
1.134 ad 929: mutex_exit(&pp->pr_lock);
1.125 ad 930: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
931: FREECHECK_OUT(&pp->pr_freecheck, v);
1.1 pk 932: return (v);
933: }
934:
935: /*
1.43 thorpej 936: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 937: */
1.43 thorpej 938: static void
1.101 thorpej 939: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 940: {
941: struct pool_item *pi = v;
1.3 pk 942: struct pool_item_header *ph;
943:
1.134 ad 944: KASSERT(mutex_owned(&pp->pr_lock));
1.125 ad 945: FREECHECK_IN(&pp->pr_freecheck, v);
1.134 ad 946: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 947:
1.30 thorpej 948: #ifdef DIAGNOSTIC
1.34 thorpej 949: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 950: printf("pool %s: putting with none out\n",
951: pp->pr_wchan);
952: panic("pool_put");
953: }
954: #endif
1.3 pk 955:
1.121 yamt 956: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.3 pk 957: panic("pool_put: %s: page header missing", pp->pr_wchan);
958: }
1.28 thorpej 959:
1.3 pk 960: /*
961: * Return to item list.
962: */
1.97 yamt 963: if (pp->pr_roflags & PR_NOTOUCH) {
964: pr_item_notouch_put(pp, ph, v);
965: } else {
1.2 pk 966: #ifdef DIAGNOSTIC
1.97 yamt 967: pi->pi_magic = PI_MAGIC;
1.3 pk 968: #endif
1.32 chs 969: #ifdef DEBUG
1.97 yamt 970: {
971: int i, *ip = v;
1.32 chs 972:
1.97 yamt 973: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
974: *ip++ = PI_MAGIC;
975: }
1.32 chs 976: }
977: #endif
978:
1.102 chs 979: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 980: }
1.79 thorpej 981: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 982: ph->ph_nmissing--;
983: pp->pr_nput++;
1.20 thorpej 984: pp->pr_nitems++;
985: pp->pr_nout--;
1.3 pk 986:
987: /* Cancel "pool empty" condition if it exists */
988: if (pp->pr_curpage == NULL)
989: pp->pr_curpage = ph;
990:
991: if (pp->pr_flags & PR_WANTED) {
992: pp->pr_flags &= ~PR_WANTED;
1.134 ad 993: cv_broadcast(&pp->pr_cv);
1.3 pk 994: }
995:
996: /*
1.88 chs 997: * If this page is now empty, do one of two things:
1.21 thorpej 998: *
1.88 chs 999: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1000: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1001: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1002: * CLAIM.
1.21 thorpej 1003: *
1.88 chs 1004: * (2) Otherwise, move the page to the empty page list.
1005: *
1006: * Either way, select a new current page (so we use a partially-full
1007: * page if one is available).
1.3 pk 1008: */
1009: if (ph->ph_nmissing == 0) {
1.6 thorpej 1010: pp->pr_nidle++;
1.90 thorpej 1011: if (pp->pr_npages > pp->pr_minpages &&
1.152 yamt 1012: pp->pr_npages > pp->pr_maxpages) {
1.101 thorpej 1013: pr_rmpage(pp, ph, pq);
1.3 pk 1014: } else {
1.88 chs 1015: LIST_REMOVE(ph, ph_pagelist);
1016: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1017:
1.21 thorpej 1018: /*
1019: * Update the timestamp on the page. A page must
1020: * be idle for some period of time before it can
1021: * be reclaimed by the pagedaemon. This minimizes
1022: * ping-pong'ing for memory.
1.151 yamt 1023: *
1024: * note for 64-bit time_t: truncating to 32-bit is not
1025: * a problem for our usage.
1.21 thorpej 1026: */
1.151 yamt 1027: ph->ph_time = time_uptime;
1.1 pk 1028: }
1.88 chs 1029: pool_update_curpage(pp);
1.1 pk 1030: }
1.88 chs 1031:
1.21 thorpej 1032: /*
1.88 chs 1033: * If the page was previously completely full, move it to the
1034: * partially-full list and make it the current page. The next
1035: * allocation will get the item from this page, instead of
1036: * further fragmenting the pool.
1.21 thorpej 1037: */
1038: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1039: LIST_REMOVE(ph, ph_pagelist);
1040: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1041: pp->pr_curpage = ph;
1042: }
1.43 thorpej 1043: }
1044:
1.56 sommerfe 1045: void
1046: pool_put(struct pool *pp, void *v)
1047: {
1.101 thorpej 1048: struct pool_pagelist pq;
1049:
1050: LIST_INIT(&pq);
1.56 sommerfe 1051:
1.134 ad 1052: mutex_enter(&pp->pr_lock);
1.101 thorpej 1053: pool_do_put(pp, v, &pq);
1.134 ad 1054: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1055:
1.102 chs 1056: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1057: }
1.57 sommerfe 1058:
1.74 thorpej 1059: /*
1.113 yamt 1060: * pool_grow: grow a pool by a page.
1061: *
1062: * => called with pool locked.
1063: * => unlock and relock the pool.
1064: * => return with pool locked.
1065: */
1066:
1067: static int
1068: pool_grow(struct pool *pp, int flags)
1069: {
1070: struct pool_item_header *ph = NULL;
1071: char *cp;
1072:
1.134 ad 1073: mutex_exit(&pp->pr_lock);
1.113 yamt 1074: cp = pool_allocator_alloc(pp, flags);
1075: if (__predict_true(cp != NULL)) {
1076: ph = pool_alloc_item_header(pp, cp, flags);
1077: }
1078: if (__predict_false(cp == NULL || ph == NULL)) {
1079: if (cp != NULL) {
1080: pool_allocator_free(pp, cp);
1081: }
1.134 ad 1082: mutex_enter(&pp->pr_lock);
1.113 yamt 1083: return ENOMEM;
1084: }
1085:
1.134 ad 1086: mutex_enter(&pp->pr_lock);
1.113 yamt 1087: pool_prime_page(pp, cp, ph);
1088: pp->pr_npagealloc++;
1089: return 0;
1090: }
1091:
1092: /*
1.74 thorpej 1093: * Add N items to the pool.
1094: */
1095: int
1096: pool_prime(struct pool *pp, int n)
1097: {
1.75 simonb 1098: int newpages;
1.113 yamt 1099: int error = 0;
1.74 thorpej 1100:
1.134 ad 1101: mutex_enter(&pp->pr_lock);
1.74 thorpej 1102:
1103: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1104:
1105: while (newpages-- > 0) {
1.113 yamt 1106: error = pool_grow(pp, PR_NOWAIT);
1107: if (error) {
1.74 thorpej 1108: break;
1109: }
1110: pp->pr_minpages++;
1111: }
1112:
1113: if (pp->pr_minpages >= pp->pr_maxpages)
1114: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1115:
1.134 ad 1116: mutex_exit(&pp->pr_lock);
1.113 yamt 1117: return error;
1.74 thorpej 1118: }
1.55 thorpej 1119:
1120: /*
1.3 pk 1121: * Add a page worth of items to the pool.
1.21 thorpej 1122: *
1123: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1124: */
1.55 thorpej 1125: static void
1.128 christos 1126: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1127: {
1128: struct pool_item *pi;
1.128 christos 1129: void *cp = storage;
1.125 ad 1130: const unsigned int align = pp->pr_align;
1131: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1132: int n;
1.36 pk 1133:
1.134 ad 1134: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 1135:
1.66 thorpej 1136: #ifdef DIAGNOSTIC
1.121 yamt 1137: if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1.150 skrll 1138: ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1139: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1140: #endif
1.3 pk 1141:
1142: /*
1143: * Insert page header.
1144: */
1.88 chs 1145: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1146: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1147: ph->ph_page = storage;
1148: ph->ph_nmissing = 0;
1.151 yamt 1149: ph->ph_time = time_uptime;
1.88 chs 1150: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1151: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1152:
1.6 thorpej 1153: pp->pr_nidle++;
1154:
1.3 pk 1155: /*
1156: * Color this page.
1157: */
1.141 yamt 1158: ph->ph_off = pp->pr_curcolor;
1159: cp = (char *)cp + ph->ph_off;
1.3 pk 1160: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1161: pp->pr_curcolor = 0;
1162:
1163: /*
1164: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1165: */
1166: if (ioff != 0)
1.128 christos 1167: cp = (char *)cp + align - ioff;
1.3 pk 1168:
1.125 ad 1169: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1170:
1.3 pk 1171: /*
1172: * Insert remaining chunks on the bucket list.
1173: */
1174: n = pp->pr_itemsperpage;
1.20 thorpej 1175: pp->pr_nitems += n;
1.3 pk 1176:
1.97 yamt 1177: if (pp->pr_roflags & PR_NOTOUCH) {
1.141 yamt 1178: pr_item_notouch_init(pp, ph);
1.97 yamt 1179: } else {
1180: while (n--) {
1181: pi = (struct pool_item *)cp;
1.78 thorpej 1182:
1.97 yamt 1183: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1184:
1.97 yamt 1185: /* Insert on page list */
1.102 chs 1186: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1187: #ifdef DIAGNOSTIC
1.97 yamt 1188: pi->pi_magic = PI_MAGIC;
1.3 pk 1189: #endif
1.128 christos 1190: cp = (char *)cp + pp->pr_size;
1.125 ad 1191:
1192: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1193: }
1.3 pk 1194: }
1195:
1196: /*
1197: * If the pool was depleted, point at the new page.
1198: */
1199: if (pp->pr_curpage == NULL)
1200: pp->pr_curpage = ph;
1201:
1202: if (++pp->pr_npages > pp->pr_hiwat)
1203: pp->pr_hiwat = pp->pr_npages;
1204: }
1205:
1.20 thorpej 1206: /*
1.52 thorpej 1207: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1208: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1209: *
1.21 thorpej 1210: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1211: *
1.73 thorpej 1212: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1213: * with it locked.
1214: */
1215: static int
1.42 thorpej 1216: pool_catchup(struct pool *pp)
1.20 thorpej 1217: {
1218: int error = 0;
1219:
1.54 thorpej 1220: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1221: error = pool_grow(pp, PR_NOWAIT);
1222: if (error) {
1.20 thorpej 1223: break;
1224: }
1225: }
1.113 yamt 1226: return error;
1.20 thorpej 1227: }
1228:
1.88 chs 1229: static void
1230: pool_update_curpage(struct pool *pp)
1231: {
1232:
1233: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1234: if (pp->pr_curpage == NULL) {
1235: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1236: }
1.168 yamt 1237: KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1238: (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1.88 chs 1239: }
1240:
1.3 pk 1241: void
1.42 thorpej 1242: pool_setlowat(struct pool *pp, int n)
1.3 pk 1243: {
1.15 pk 1244:
1.134 ad 1245: mutex_enter(&pp->pr_lock);
1.21 thorpej 1246:
1.3 pk 1247: pp->pr_minitems = n;
1.15 pk 1248: pp->pr_minpages = (n == 0)
1249: ? 0
1.18 thorpej 1250: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1251:
1252: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1253: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1254: /*
1255: * XXX: Should we log a warning? Should we set up a timeout
1256: * to try again in a second or so? The latter could break
1257: * a caller's assumptions about interrupt protection, etc.
1258: */
1259: }
1.21 thorpej 1260:
1.134 ad 1261: mutex_exit(&pp->pr_lock);
1.3 pk 1262: }
1263:
1264: void
1.42 thorpej 1265: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1266: {
1.15 pk 1267:
1.134 ad 1268: mutex_enter(&pp->pr_lock);
1.21 thorpej 1269:
1.15 pk 1270: pp->pr_maxpages = (n == 0)
1271: ? 0
1.18 thorpej 1272: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1273:
1.134 ad 1274: mutex_exit(&pp->pr_lock);
1.3 pk 1275: }
1276:
1.20 thorpej 1277: void
1.42 thorpej 1278: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1279: {
1280:
1.134 ad 1281: mutex_enter(&pp->pr_lock);
1.20 thorpej 1282:
1283: pp->pr_hardlimit = n;
1284: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1285: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1286: pp->pr_hardlimit_warning_last.tv_sec = 0;
1287: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1288:
1289: /*
1.21 thorpej 1290: * In-line version of pool_sethiwat(), because we don't want to
1291: * release the lock.
1.20 thorpej 1292: */
1293: pp->pr_maxpages = (n == 0)
1294: ? 0
1295: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1296:
1.134 ad 1297: mutex_exit(&pp->pr_lock);
1.20 thorpej 1298: }
1.3 pk 1299:
1300: /*
1301: * Release all complete pages that have not been used recently.
1.184 rmind 1302: *
1303: * Might be called from interrupt context.
1.3 pk 1304: */
1.66 thorpej 1305: int
1.56 sommerfe 1306: pool_reclaim(struct pool *pp)
1.3 pk 1307: {
1308: struct pool_item_header *ph, *phnext;
1.61 chs 1309: struct pool_pagelist pq;
1.151 yamt 1310: uint32_t curtime;
1.134 ad 1311: bool klock;
1312: int rv;
1.3 pk 1313:
1.184 rmind 1314: if (cpu_intr_p() || cpu_softintr_p()) {
1315: KASSERT(pp->pr_ipl != IPL_NONE);
1316: }
1317:
1.68 thorpej 1318: if (pp->pr_drain_hook != NULL) {
1319: /*
1320: * The drain hook must be called with the pool unlocked.
1321: */
1322: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1323: }
1324:
1.134 ad 1325: /*
1.157 ad 1326: * XXXSMP Because we do not want to cause non-MPSAFE code
1327: * to block.
1.134 ad 1328: */
1329: if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1330: pp->pr_ipl == IPL_SOFTSERIAL) {
1331: KERNEL_LOCK(1, NULL);
1332: klock = true;
1333: } else
1334: klock = false;
1335:
1336: /* Reclaim items from the pool's cache (if any). */
1337: if (pp->pr_cache != NULL)
1338: pool_cache_invalidate(pp->pr_cache);
1339:
1340: if (mutex_tryenter(&pp->pr_lock) == 0) {
1341: if (klock) {
1342: KERNEL_UNLOCK_ONE(NULL);
1343: }
1.66 thorpej 1344: return (0);
1.134 ad 1345: }
1.68 thorpej 1346:
1.88 chs 1347: LIST_INIT(&pq);
1.43 thorpej 1348:
1.151 yamt 1349: curtime = time_uptime;
1.21 thorpej 1350:
1.88 chs 1351: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1352: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1353:
1354: /* Check our minimum page claim */
1355: if (pp->pr_npages <= pp->pr_minpages)
1356: break;
1357:
1.88 chs 1358: KASSERT(ph->ph_nmissing == 0);
1.191 para 1359: if (curtime - ph->ph_time < pool_inactive_time)
1.88 chs 1360: continue;
1.21 thorpej 1361:
1.88 chs 1362: /*
1363: * If freeing this page would put us below
1364: * the low water mark, stop now.
1365: */
1366: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1367: pp->pr_minitems)
1368: break;
1.21 thorpej 1369:
1.88 chs 1370: pr_rmpage(pp, ph, &pq);
1.3 pk 1371: }
1372:
1.134 ad 1373: mutex_exit(&pp->pr_lock);
1374:
1375: if (LIST_EMPTY(&pq))
1376: rv = 0;
1377: else {
1378: pr_pagelist_free(pp, &pq);
1379: rv = 1;
1380: }
1381:
1382: if (klock) {
1383: KERNEL_UNLOCK_ONE(NULL);
1384: }
1.66 thorpej 1385:
1.134 ad 1386: return (rv);
1.3 pk 1387: }
1388:
1389: /*
1.134 ad 1390: * Drain pools, one at a time. This is a two stage process;
1391: * drain_start kicks off a cross call to drain CPU-level caches
1392: * if the pool has an associated pool_cache. drain_end waits
1393: * for those cross calls to finish, and then drains the cache
1394: * (if any) and pool.
1.131 ad 1395: *
1.134 ad 1396: * Note, must never be called from interrupt context.
1.3 pk 1397: */
1398: void
1.134 ad 1399: pool_drain_start(struct pool **ppp, uint64_t *wp)
1.3 pk 1400: {
1401: struct pool *pp;
1.134 ad 1402:
1.145 ad 1403: KASSERT(!TAILQ_EMPTY(&pool_head));
1.3 pk 1404:
1.61 chs 1405: pp = NULL;
1.134 ad 1406:
1407: /* Find next pool to drain, and add a reference. */
1408: mutex_enter(&pool_head_lock);
1409: do {
1410: if (drainpp == NULL) {
1.145 ad 1411: drainpp = TAILQ_FIRST(&pool_head);
1.134 ad 1412: }
1413: if (drainpp != NULL) {
1414: pp = drainpp;
1.145 ad 1415: drainpp = TAILQ_NEXT(pp, pr_poollist);
1.134 ad 1416: }
1417: /*
1418: * Skip completely idle pools. We depend on at least
1419: * one pool in the system being active.
1420: */
1421: } while (pp == NULL || pp->pr_npages == 0);
1422: pp->pr_refcnt++;
1423: mutex_exit(&pool_head_lock);
1424:
1425: /* If there is a pool_cache, drain CPU level caches. */
1426: *ppp = pp;
1427: if (pp->pr_cache != NULL) {
1428: *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall,
1429: pp->pr_cache, NULL);
1430: }
1431: }
1432:
1.186 pooka 1433: bool
1.134 ad 1434: pool_drain_end(struct pool *pp, uint64_t where)
1435: {
1.186 pooka 1436: bool reclaimed;
1.134 ad 1437:
1438: if (pp == NULL)
1.186 pooka 1439: return false;
1.134 ad 1440:
1441: KASSERT(pp->pr_refcnt > 0);
1442:
1443: /* Wait for remote draining to complete. */
1444: if (pp->pr_cache != NULL)
1445: xc_wait(where);
1446:
1447: /* Drain the cache (if any) and pool.. */
1.186 pooka 1448: reclaimed = pool_reclaim(pp);
1.134 ad 1449:
1450: /* Finally, unlock the pool. */
1451: mutex_enter(&pool_head_lock);
1452: pp->pr_refcnt--;
1453: cv_broadcast(&pool_busy);
1454: mutex_exit(&pool_head_lock);
1.186 pooka 1455:
1456: return reclaimed;
1.3 pk 1457: }
1458:
1459: /*
1460: * Diagnostic helpers.
1461: */
1.21 thorpej 1462:
1.25 thorpej 1463: void
1.108 yamt 1464: pool_printall(const char *modif, void (*pr)(const char *, ...))
1465: {
1466: struct pool *pp;
1467:
1.145 ad 1468: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.108 yamt 1469: pool_printit(pp, modif, pr);
1470: }
1471: }
1472:
1473: void
1.42 thorpej 1474: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1475: {
1476:
1477: if (pp == NULL) {
1478: (*pr)("Must specify a pool to print.\n");
1479: return;
1480: }
1481:
1482: pool_print1(pp, modif, pr);
1483: }
1484:
1.21 thorpej 1485: static void
1.124 yamt 1486: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1487: void (*pr)(const char *, ...))
1.88 chs 1488: {
1489: struct pool_item_header *ph;
1490: #ifdef DIAGNOSTIC
1491: struct pool_item *pi;
1492: #endif
1493:
1494: LIST_FOREACH(ph, pl, ph_pagelist) {
1.151 yamt 1495: (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1496: ph->ph_page, ph->ph_nmissing, ph->ph_time);
1.88 chs 1497: #ifdef DIAGNOSTIC
1.97 yamt 1498: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1499: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1500: if (pi->pi_magic != PI_MAGIC) {
1501: (*pr)("\t\t\titem %p, magic 0x%x\n",
1502: pi, pi->pi_magic);
1503: }
1.88 chs 1504: }
1505: }
1506: #endif
1507: }
1508: }
1509:
1510: static void
1.42 thorpej 1511: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1512: {
1.25 thorpej 1513: struct pool_item_header *ph;
1.134 ad 1514: pool_cache_t pc;
1515: pcg_t *pcg;
1516: pool_cache_cpu_t *cc;
1517: uint64_t cpuhit, cpumiss;
1.44 thorpej 1518: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1519: char c;
1520:
1521: while ((c = *modif++) != '\0') {
1522: if (c == 'l')
1523: print_log = 1;
1524: if (c == 'p')
1525: print_pagelist = 1;
1.44 thorpej 1526: if (c == 'c')
1527: print_cache = 1;
1.25 thorpej 1528: }
1529:
1.134 ad 1530: if ((pc = pp->pr_cache) != NULL) {
1531: (*pr)("POOL CACHE");
1532: } else {
1533: (*pr)("POOL");
1534: }
1535:
1536: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1537: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1538: pp->pr_roflags);
1.66 thorpej 1539: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1540: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1541: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1542: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1543: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1544:
1.134 ad 1545: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1546: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1547: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1548: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1549:
1550: if (print_pagelist == 0)
1551: goto skip_pagelist;
1552:
1.88 chs 1553: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1554: (*pr)("\n\tempty page list:\n");
1.97 yamt 1555: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1556: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1557: (*pr)("\n\tfull page list:\n");
1.97 yamt 1558: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1559: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1560: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1561: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1562:
1.25 thorpej 1563: if (pp->pr_curpage == NULL)
1564: (*pr)("\tno current page\n");
1565: else
1566: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1567:
1568: skip_pagelist:
1569: if (print_log == 0)
1570: goto skip_log;
1571:
1572: (*pr)("\n");
1.3 pk 1573:
1.25 thorpej 1574: skip_log:
1.44 thorpej 1575:
1.102 chs 1576: #define PR_GROUPLIST(pcg) \
1577: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1.142 ad 1578: for (i = 0; i < pcg->pcg_size; i++) { \
1.102 chs 1579: if (pcg->pcg_objects[i].pcgo_pa != \
1580: POOL_PADDR_INVALID) { \
1581: (*pr)("\t\t\t%p, 0x%llx\n", \
1582: pcg->pcg_objects[i].pcgo_va, \
1583: (unsigned long long) \
1584: pcg->pcg_objects[i].pcgo_pa); \
1585: } else { \
1586: (*pr)("\t\t\t%p\n", \
1587: pcg->pcg_objects[i].pcgo_va); \
1588: } \
1589: }
1590:
1.134 ad 1591: if (pc != NULL) {
1592: cpuhit = 0;
1593: cpumiss = 0;
1.183 ad 1594: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.134 ad 1595: if ((cc = pc->pc_cpus[i]) == NULL)
1596: continue;
1597: cpuhit += cc->cc_hits;
1598: cpumiss += cc->cc_misses;
1599: }
1600: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1601: (*pr)("\tcache layer hits %llu misses %llu\n",
1602: pc->pc_hits, pc->pc_misses);
1603: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1604: pc->pc_hits + pc->pc_misses - pc->pc_contended,
1605: pc->pc_contended);
1606: (*pr)("\tcache layer empty groups %u full groups %u\n",
1607: pc->pc_nempty, pc->pc_nfull);
1608: if (print_cache) {
1609: (*pr)("\tfull cache groups:\n");
1610: for (pcg = pc->pc_fullgroups; pcg != NULL;
1611: pcg = pcg->pcg_next) {
1612: PR_GROUPLIST(pcg);
1613: }
1614: (*pr)("\tempty cache groups:\n");
1615: for (pcg = pc->pc_emptygroups; pcg != NULL;
1616: pcg = pcg->pcg_next) {
1617: PR_GROUPLIST(pcg);
1618: }
1.103 chs 1619: }
1.44 thorpej 1620: }
1.102 chs 1621: #undef PR_GROUPLIST
1.88 chs 1622: }
1623:
1624: static int
1625: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1626: {
1627: struct pool_item *pi;
1.128 christos 1628: void *page;
1.88 chs 1629: int n;
1630:
1.121 yamt 1631: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1632: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1633: if (page != ph->ph_page &&
1634: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1635: if (label != NULL)
1636: printf("%s: ", label);
1637: printf("pool(%p:%s): page inconsistency: page %p;"
1638: " at page head addr %p (p %p)\n", pp,
1639: pp->pr_wchan, ph->ph_page,
1640: ph, page);
1641: return 1;
1642: }
1.88 chs 1643: }
1.3 pk 1644:
1.97 yamt 1645: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1646: return 0;
1647:
1.102 chs 1648: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1649: pi != NULL;
1.102 chs 1650: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1651:
1652: #ifdef DIAGNOSTIC
1653: if (pi->pi_magic != PI_MAGIC) {
1654: if (label != NULL)
1655: printf("%s: ", label);
1656: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1657: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1658: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1659: n, pi);
1.88 chs 1660: panic("pool");
1661: }
1662: #endif
1.121 yamt 1663: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1664: continue;
1665: }
1.128 christos 1666: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1667: if (page == ph->ph_page)
1668: continue;
1669:
1670: if (label != NULL)
1671: printf("%s: ", label);
1672: printf("pool(%p:%s): page inconsistency: page %p;"
1673: " item ordinal %d; addr %p (p %p)\n", pp,
1674: pp->pr_wchan, ph->ph_page,
1675: n, pi, page);
1676: return 1;
1677: }
1678: return 0;
1.3 pk 1679: }
1680:
1.88 chs 1681:
1.3 pk 1682: int
1.42 thorpej 1683: pool_chk(struct pool *pp, const char *label)
1.3 pk 1684: {
1685: struct pool_item_header *ph;
1686: int r = 0;
1687:
1.134 ad 1688: mutex_enter(&pp->pr_lock);
1.88 chs 1689: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1690: r = pool_chk_page(pp, label, ph);
1691: if (r) {
1692: goto out;
1693: }
1694: }
1695: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1696: r = pool_chk_page(pp, label, ph);
1697: if (r) {
1.3 pk 1698: goto out;
1699: }
1.88 chs 1700: }
1701: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1702: r = pool_chk_page(pp, label, ph);
1703: if (r) {
1.3 pk 1704: goto out;
1705: }
1706: }
1.88 chs 1707:
1.3 pk 1708: out:
1.134 ad 1709: mutex_exit(&pp->pr_lock);
1.3 pk 1710: return (r);
1.43 thorpej 1711: }
1712:
1713: /*
1714: * pool_cache_init:
1715: *
1716: * Initialize a pool cache.
1.134 ad 1717: */
1718: pool_cache_t
1719: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
1720: const char *wchan, struct pool_allocator *palloc, int ipl,
1721: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
1722: {
1723: pool_cache_t pc;
1724:
1725: pc = pool_get(&cache_pool, PR_WAITOK);
1726: if (pc == NULL)
1727: return NULL;
1728:
1729: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
1730: palloc, ipl, ctor, dtor, arg);
1731:
1732: return pc;
1733: }
1734:
1735: /*
1736: * pool_cache_bootstrap:
1.43 thorpej 1737: *
1.134 ad 1738: * Kernel-private version of pool_cache_init(). The caller
1739: * provides initial storage.
1.43 thorpej 1740: */
1741: void
1.134 ad 1742: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
1743: u_int align_offset, u_int flags, const char *wchan,
1744: struct pool_allocator *palloc, int ipl,
1745: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 1746: void *arg)
1747: {
1.134 ad 1748: CPU_INFO_ITERATOR cii;
1.145 ad 1749: pool_cache_t pc1;
1.134 ad 1750: struct cpu_info *ci;
1751: struct pool *pp;
1752:
1753: pp = &pc->pc_pool;
1754: if (palloc == NULL && ipl == IPL_NONE)
1755: palloc = &pool_allocator_nointr;
1756: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.157 ad 1757: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1.43 thorpej 1758:
1.134 ad 1759: if (ctor == NULL) {
1760: ctor = (int (*)(void *, void *, int))nullop;
1761: }
1762: if (dtor == NULL) {
1763: dtor = (void (*)(void *, void *))nullop;
1764: }
1.43 thorpej 1765:
1.134 ad 1766: pc->pc_emptygroups = NULL;
1767: pc->pc_fullgroups = NULL;
1768: pc->pc_partgroups = NULL;
1.43 thorpej 1769: pc->pc_ctor = ctor;
1770: pc->pc_dtor = dtor;
1771: pc->pc_arg = arg;
1.134 ad 1772: pc->pc_hits = 0;
1.48 thorpej 1773: pc->pc_misses = 0;
1.134 ad 1774: pc->pc_nempty = 0;
1775: pc->pc_npart = 0;
1776: pc->pc_nfull = 0;
1777: pc->pc_contended = 0;
1778: pc->pc_refcnt = 0;
1.136 yamt 1779: pc->pc_freecheck = NULL;
1.134 ad 1780:
1.142 ad 1781: if ((flags & PR_LARGECACHE) != 0) {
1782: pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
1.163 ad 1783: pc->pc_pcgpool = &pcg_large_pool;
1.142 ad 1784: } else {
1785: pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
1.163 ad 1786: pc->pc_pcgpool = &pcg_normal_pool;
1.142 ad 1787: }
1788:
1.134 ad 1789: /* Allocate per-CPU caches. */
1790: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
1791: pc->pc_ncpu = 0;
1.139 ad 1792: if (ncpu < 2) {
1.137 ad 1793: /* XXX For sparc: boot CPU is not attached yet. */
1794: pool_cache_cpu_init1(curcpu(), pc);
1795: } else {
1796: for (CPU_INFO_FOREACH(cii, ci)) {
1797: pool_cache_cpu_init1(ci, pc);
1798: }
1.134 ad 1799: }
1.145 ad 1800:
1801: /* Add to list of all pools. */
1802: if (__predict_true(!cold))
1.134 ad 1803: mutex_enter(&pool_head_lock);
1.145 ad 1804: TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
1805: if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
1806: break;
1807: }
1808: if (pc1 == NULL)
1809: TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
1810: else
1811: TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
1812: if (__predict_true(!cold))
1.134 ad 1813: mutex_exit(&pool_head_lock);
1.145 ad 1814:
1815: membar_sync();
1816: pp->pr_cache = pc;
1.43 thorpej 1817: }
1818:
1819: /*
1820: * pool_cache_destroy:
1821: *
1822: * Destroy a pool cache.
1823: */
1824: void
1.134 ad 1825: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 1826: {
1.191 para 1827:
1828: pool_cache_bootstrap_destroy(pc);
1829: pool_put(&cache_pool, pc);
1830: }
1831:
1832: /*
1833: * pool_cache_bootstrap_destroy:
1834: *
1835: * Destroy a pool cache.
1836: */
1837: void
1838: pool_cache_bootstrap_destroy(pool_cache_t pc)
1839: {
1.134 ad 1840: struct pool *pp = &pc->pc_pool;
1.175 jym 1841: u_int i;
1.134 ad 1842:
1843: /* Remove it from the global list. */
1844: mutex_enter(&pool_head_lock);
1845: while (pc->pc_refcnt != 0)
1846: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 1847: TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1.134 ad 1848: mutex_exit(&pool_head_lock);
1.43 thorpej 1849:
1850: /* First, invalidate the entire cache. */
1851: pool_cache_invalidate(pc);
1852:
1.134 ad 1853: /* Disassociate it from the pool. */
1854: mutex_enter(&pp->pr_lock);
1855: pp->pr_cache = NULL;
1856: mutex_exit(&pp->pr_lock);
1857:
1858: /* Destroy per-CPU data */
1.183 ad 1859: for (i = 0; i < __arraycount(pc->pc_cpus); i++)
1.175 jym 1860: pool_cache_invalidate_cpu(pc, i);
1.134 ad 1861:
1862: /* Finally, destroy it. */
1863: mutex_destroy(&pc->pc_lock);
1864: pool_destroy(pp);
1865: }
1866:
1867: /*
1868: * pool_cache_cpu_init1:
1869: *
1870: * Called for each pool_cache whenever a new CPU is attached.
1871: */
1872: static void
1873: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
1874: {
1875: pool_cache_cpu_t *cc;
1.137 ad 1876: int index;
1.134 ad 1877:
1.137 ad 1878: index = ci->ci_index;
1879:
1.183 ad 1880: KASSERT(index < __arraycount(pc->pc_cpus));
1.134 ad 1881:
1.137 ad 1882: if ((cc = pc->pc_cpus[index]) != NULL) {
1883: KASSERT(cc->cc_cpuindex == index);
1.134 ad 1884: return;
1885: }
1886:
1887: /*
1888: * The first CPU is 'free'. This needs to be the case for
1889: * bootstrap - we may not be able to allocate yet.
1890: */
1891: if (pc->pc_ncpu == 0) {
1892: cc = &pc->pc_cpu0;
1893: pc->pc_ncpu = 1;
1894: } else {
1895: mutex_enter(&pc->pc_lock);
1896: pc->pc_ncpu++;
1897: mutex_exit(&pc->pc_lock);
1898: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
1899: }
1900:
1901: cc->cc_ipl = pc->pc_pool.pr_ipl;
1902: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
1903: cc->cc_cache = pc;
1.137 ad 1904: cc->cc_cpuindex = index;
1.134 ad 1905: cc->cc_hits = 0;
1906: cc->cc_misses = 0;
1.169 yamt 1907: cc->cc_current = __UNCONST(&pcg_dummy);
1908: cc->cc_previous = __UNCONST(&pcg_dummy);
1.134 ad 1909:
1.137 ad 1910: pc->pc_cpus[index] = cc;
1.43 thorpej 1911: }
1912:
1.134 ad 1913: /*
1914: * pool_cache_cpu_init:
1915: *
1916: * Called whenever a new CPU is attached.
1917: */
1918: void
1919: pool_cache_cpu_init(struct cpu_info *ci)
1.43 thorpej 1920: {
1.134 ad 1921: pool_cache_t pc;
1922:
1923: mutex_enter(&pool_head_lock);
1.145 ad 1924: TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1.134 ad 1925: pc->pc_refcnt++;
1926: mutex_exit(&pool_head_lock);
1.43 thorpej 1927:
1.134 ad 1928: pool_cache_cpu_init1(ci, pc);
1.43 thorpej 1929:
1.134 ad 1930: mutex_enter(&pool_head_lock);
1931: pc->pc_refcnt--;
1932: cv_broadcast(&pool_busy);
1933: }
1934: mutex_exit(&pool_head_lock);
1.43 thorpej 1935: }
1936:
1.134 ad 1937: /*
1938: * pool_cache_reclaim:
1939: *
1940: * Reclaim memory from a pool cache.
1941: */
1942: bool
1943: pool_cache_reclaim(pool_cache_t pc)
1.43 thorpej 1944: {
1945:
1.134 ad 1946: return pool_reclaim(&pc->pc_pool);
1947: }
1.43 thorpej 1948:
1.136 yamt 1949: static void
1950: pool_cache_destruct_object1(pool_cache_t pc, void *object)
1951: {
1952:
1953: (*pc->pc_dtor)(pc->pc_arg, object);
1954: pool_put(&pc->pc_pool, object);
1955: }
1956:
1.134 ad 1957: /*
1958: * pool_cache_destruct_object:
1959: *
1960: * Force destruction of an object and its release back into
1961: * the pool.
1962: */
1963: void
1964: pool_cache_destruct_object(pool_cache_t pc, void *object)
1965: {
1966:
1.136 yamt 1967: FREECHECK_IN(&pc->pc_freecheck, object);
1968:
1969: pool_cache_destruct_object1(pc, object);
1.43 thorpej 1970: }
1971:
1.134 ad 1972: /*
1973: * pool_cache_invalidate_groups:
1974: *
1975: * Invalidate a chain of groups and destruct all objects.
1976: */
1.102 chs 1977: static void
1.134 ad 1978: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1.102 chs 1979: {
1.134 ad 1980: void *object;
1981: pcg_t *next;
1982: int i;
1983:
1984: for (; pcg != NULL; pcg = next) {
1985: next = pcg->pcg_next;
1986:
1987: for (i = 0; i < pcg->pcg_avail; i++) {
1988: object = pcg->pcg_objects[i].pcgo_va;
1.136 yamt 1989: pool_cache_destruct_object1(pc, object);
1.134 ad 1990: }
1.102 chs 1991:
1.142 ad 1992: if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
1993: pool_put(&pcg_large_pool, pcg);
1994: } else {
1995: KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
1996: pool_put(&pcg_normal_pool, pcg);
1997: }
1.102 chs 1998: }
1999: }
2000:
1.43 thorpej 2001: /*
1.134 ad 2002: * pool_cache_invalidate:
1.43 thorpej 2003: *
1.134 ad 2004: * Invalidate a pool cache (destruct and release all of the
2005: * cached objects). Does not reclaim objects from the pool.
1.176 thorpej 2006: *
2007: * Note: For pool caches that provide constructed objects, there
2008: * is an assumption that another level of synchronization is occurring
2009: * between the input to the constructor and the cache invalidation.
1.43 thorpej 2010: */
1.134 ad 2011: void
2012: pool_cache_invalidate(pool_cache_t pc)
2013: {
2014: pcg_t *full, *empty, *part;
1.182 rmind 2015: #if 0
1.176 thorpej 2016: uint64_t where;
2017:
1.177 jym 2018: if (ncpu < 2 || !mp_online) {
1.176 thorpej 2019: /*
2020: * We might be called early enough in the boot process
2021: * for the CPU data structures to not be fully initialized.
2022: * In this case, simply gather the local CPU's cache now
2023: * since it will be the only one running.
2024: */
2025: pool_cache_xcall(pc);
2026: } else {
2027: /*
2028: * Gather all of the CPU-specific caches into the
2029: * global cache.
2030: */
2031: where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL);
2032: xc_wait(where);
2033: }
1.182 rmind 2034: #endif
1.134 ad 2035: mutex_enter(&pc->pc_lock);
2036: full = pc->pc_fullgroups;
2037: empty = pc->pc_emptygroups;
2038: part = pc->pc_partgroups;
2039: pc->pc_fullgroups = NULL;
2040: pc->pc_emptygroups = NULL;
2041: pc->pc_partgroups = NULL;
2042: pc->pc_nfull = 0;
2043: pc->pc_nempty = 0;
2044: pc->pc_npart = 0;
2045: mutex_exit(&pc->pc_lock);
2046:
2047: pool_cache_invalidate_groups(pc, full);
2048: pool_cache_invalidate_groups(pc, empty);
2049: pool_cache_invalidate_groups(pc, part);
2050: }
2051:
1.175 jym 2052: /*
2053: * pool_cache_invalidate_cpu:
2054: *
2055: * Invalidate all CPU-bound cached objects in pool cache, the CPU being
2056: * identified by its associated index.
2057: * It is caller's responsibility to ensure that no operation is
2058: * taking place on this pool cache while doing this invalidation.
2059: * WARNING: as no inter-CPU locking is enforced, trying to invalidate
2060: * pool cached objects from a CPU different from the one currently running
2061: * may result in an undefined behaviour.
2062: */
2063: static void
2064: pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2065: {
2066: pool_cache_cpu_t *cc;
2067: pcg_t *pcg;
2068:
2069: if ((cc = pc->pc_cpus[index]) == NULL)
2070: return;
2071:
2072: if ((pcg = cc->cc_current) != &pcg_dummy) {
2073: pcg->pcg_next = NULL;
2074: pool_cache_invalidate_groups(pc, pcg);
2075: }
2076: if ((pcg = cc->cc_previous) != &pcg_dummy) {
2077: pcg->pcg_next = NULL;
2078: pool_cache_invalidate_groups(pc, pcg);
2079: }
2080: if (cc != &pc->pc_cpu0)
2081: pool_put(&cache_cpu_pool, cc);
2082:
2083: }
2084:
1.134 ad 2085: void
2086: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2087: {
2088:
2089: pool_set_drain_hook(&pc->pc_pool, fn, arg);
2090: }
2091:
2092: void
2093: pool_cache_setlowat(pool_cache_t pc, int n)
2094: {
2095:
2096: pool_setlowat(&pc->pc_pool, n);
2097: }
2098:
2099: void
2100: pool_cache_sethiwat(pool_cache_t pc, int n)
2101: {
2102:
2103: pool_sethiwat(&pc->pc_pool, n);
2104: }
2105:
2106: void
2107: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2108: {
2109:
2110: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2111: }
2112:
1.162 ad 2113: static bool __noinline
2114: pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
1.134 ad 2115: paddr_t *pap, int flags)
1.43 thorpej 2116: {
1.134 ad 2117: pcg_t *pcg, *cur;
2118: uint64_t ncsw;
2119: pool_cache_t pc;
1.43 thorpej 2120: void *object;
1.58 thorpej 2121:
1.168 yamt 2122: KASSERT(cc->cc_current->pcg_avail == 0);
2123: KASSERT(cc->cc_previous->pcg_avail == 0);
2124:
1.134 ad 2125: pc = cc->cc_cache;
2126: cc->cc_misses++;
1.43 thorpej 2127:
1.134 ad 2128: /*
2129: * Nothing was available locally. Try and grab a group
2130: * from the cache.
2131: */
1.162 ad 2132: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.134 ad 2133: ncsw = curlwp->l_ncsw;
2134: mutex_enter(&pc->pc_lock);
2135: pc->pc_contended++;
1.43 thorpej 2136:
1.134 ad 2137: /*
2138: * If we context switched while locking, then
2139: * our view of the per-CPU data is invalid:
2140: * retry.
2141: */
2142: if (curlwp->l_ncsw != ncsw) {
2143: mutex_exit(&pc->pc_lock);
1.162 ad 2144: return true;
1.43 thorpej 2145: }
1.102 chs 2146: }
1.43 thorpej 2147:
1.162 ad 2148: if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
1.43 thorpej 2149: /*
1.134 ad 2150: * If there's a full group, release our empty
2151: * group back to the cache. Install the full
2152: * group as cc_current and return.
1.43 thorpej 2153: */
1.162 ad 2154: if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
1.134 ad 2155: KASSERT(cur->pcg_avail == 0);
2156: cur->pcg_next = pc->pc_emptygroups;
2157: pc->pc_emptygroups = cur;
2158: pc->pc_nempty++;
1.87 thorpej 2159: }
1.142 ad 2160: KASSERT(pcg->pcg_avail == pcg->pcg_size);
1.134 ad 2161: cc->cc_current = pcg;
2162: pc->pc_fullgroups = pcg->pcg_next;
2163: pc->pc_hits++;
2164: pc->pc_nfull--;
2165: mutex_exit(&pc->pc_lock);
1.162 ad 2166: return true;
1.134 ad 2167: }
2168:
2169: /*
2170: * Nothing available locally or in cache. Take the slow
2171: * path: fetch a new object from the pool and construct
2172: * it.
2173: */
2174: pc->pc_misses++;
2175: mutex_exit(&pc->pc_lock);
1.162 ad 2176: splx(s);
1.134 ad 2177:
2178: object = pool_get(&pc->pc_pool, flags);
2179: *objectp = object;
1.162 ad 2180: if (__predict_false(object == NULL))
2181: return false;
1.125 ad 2182:
1.162 ad 2183: if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
1.134 ad 2184: pool_put(&pc->pc_pool, object);
2185: *objectp = NULL;
1.162 ad 2186: return false;
1.43 thorpej 2187: }
2188:
1.134 ad 2189: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2190: (pc->pc_pool.pr_align - 1)) == 0);
1.43 thorpej 2191:
1.134 ad 2192: if (pap != NULL) {
2193: #ifdef POOL_VTOPHYS
2194: *pap = POOL_VTOPHYS(object);
2195: #else
2196: *pap = POOL_PADDR_INVALID;
2197: #endif
1.102 chs 2198: }
1.43 thorpej 2199:
1.125 ad 2200: FREECHECK_OUT(&pc->pc_freecheck, object);
1.162 ad 2201: return false;
1.43 thorpej 2202: }
2203:
2204: /*
1.134 ad 2205: * pool_cache_get{,_paddr}:
1.43 thorpej 2206: *
1.134 ad 2207: * Get an object from a pool cache (optionally returning
2208: * the physical address of the object).
1.43 thorpej 2209: */
1.134 ad 2210: void *
2211: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.43 thorpej 2212: {
1.134 ad 2213: pool_cache_cpu_t *cc;
2214: pcg_t *pcg;
2215: void *object;
1.60 thorpej 2216: int s;
1.43 thorpej 2217:
1.184 rmind 2218: KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
1.185 rmind 2219: (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
1.190 jym 2220: "pool '%s' is IPL_NONE, but called from interrupt context\n",
2221: pc->pc_pool.pr_wchan);
1.184 rmind 2222:
1.155 ad 2223: if (flags & PR_WAITOK) {
1.154 yamt 2224: ASSERT_SLEEPABLE();
1.155 ad 2225: }
1.125 ad 2226:
1.162 ad 2227: /* Lock out interrupts and disable preemption. */
2228: s = splvm();
1.165 yamt 2229: while (/* CONSTCOND */ true) {
1.134 ad 2230: /* Try and allocate an object from the current group. */
1.162 ad 2231: cc = pc->pc_cpus[curcpu()->ci_index];
2232: KASSERT(cc->cc_cache == pc);
1.134 ad 2233: pcg = cc->cc_current;
1.162 ad 2234: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2235: object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
1.162 ad 2236: if (__predict_false(pap != NULL))
1.134 ad 2237: *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
1.148 yamt 2238: #if defined(DIAGNOSTIC)
1.134 ad 2239: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
1.163 ad 2240: KASSERT(pcg->pcg_avail < pcg->pcg_size);
1.134 ad 2241: KASSERT(object != NULL);
1.163 ad 2242: #endif
1.134 ad 2243: cc->cc_hits++;
1.162 ad 2244: splx(s);
1.134 ad 2245: FREECHECK_OUT(&pc->pc_freecheck, object);
2246: return object;
1.43 thorpej 2247: }
2248:
2249: /*
1.134 ad 2250: * That failed. If the previous group isn't empty, swap
2251: * it with the current group and allocate from there.
1.43 thorpej 2252: */
1.134 ad 2253: pcg = cc->cc_previous;
1.162 ad 2254: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2255: cc->cc_previous = cc->cc_current;
2256: cc->cc_current = pcg;
2257: continue;
1.43 thorpej 2258: }
2259:
1.134 ad 2260: /*
2261: * Can't allocate from either group: try the slow path.
2262: * If get_slow() allocated an object for us, or if
1.162 ad 2263: * no more objects are available, it will return false.
1.134 ad 2264: * Otherwise, we need to retry.
2265: */
1.165 yamt 2266: if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2267: break;
2268: }
1.43 thorpej 2269:
1.134 ad 2270: return object;
1.51 thorpej 2271: }
2272:
1.162 ad 2273: static bool __noinline
2274: pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
1.51 thorpej 2275: {
1.163 ad 2276: pcg_t *pcg, *cur;
1.134 ad 2277: uint64_t ncsw;
2278: pool_cache_t pc;
1.51 thorpej 2279:
1.168 yamt 2280: KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2281: KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2282:
1.134 ad 2283: pc = cc->cc_cache;
1.171 ad 2284: pcg = NULL;
1.134 ad 2285: cc->cc_misses++;
1.43 thorpej 2286:
1.171 ad 2287: /*
2288: * If there are no empty groups in the cache then allocate one
2289: * while still unlocked.
2290: */
2291: if (__predict_false(pc->pc_emptygroups == NULL)) {
2292: if (__predict_true(!pool_cache_disable)) {
2293: pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2294: }
2295: if (__predict_true(pcg != NULL)) {
2296: pcg->pcg_avail = 0;
2297: pcg->pcg_size = pc->pc_pcgsize;
2298: }
2299: }
2300:
1.162 ad 2301: /* Lock the cache. */
2302: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.164 ad 2303: ncsw = curlwp->l_ncsw;
1.134 ad 2304: mutex_enter(&pc->pc_lock);
2305: pc->pc_contended++;
1.162 ad 2306:
1.163 ad 2307: /*
2308: * If we context switched while locking, then our view of
2309: * the per-CPU data is invalid: retry.
2310: */
2311: if (__predict_false(curlwp->l_ncsw != ncsw)) {
2312: mutex_exit(&pc->pc_lock);
1.171 ad 2313: if (pcg != NULL) {
2314: pool_put(pc->pc_pcgpool, pcg);
2315: }
1.163 ad 2316: return true;
2317: }
1.162 ad 2318: }
1.102 chs 2319:
1.163 ad 2320: /* If there are no empty groups in the cache then allocate one. */
1.171 ad 2321: if (pcg == NULL && pc->pc_emptygroups != NULL) {
2322: pcg = pc->pc_emptygroups;
1.163 ad 2323: pc->pc_emptygroups = pcg->pcg_next;
2324: pc->pc_nempty--;
1.134 ad 2325: }
1.130 ad 2326:
1.162 ad 2327: /*
2328: * If there's a empty group, release our full group back
2329: * to the cache. Install the empty group to the local CPU
2330: * and return.
2331: */
1.163 ad 2332: if (pcg != NULL) {
1.134 ad 2333: KASSERT(pcg->pcg_avail == 0);
1.162 ad 2334: if (__predict_false(cc->cc_previous == &pcg_dummy)) {
1.146 ad 2335: cc->cc_previous = pcg;
2336: } else {
1.162 ad 2337: cur = cc->cc_current;
2338: if (__predict_true(cur != &pcg_dummy)) {
1.163 ad 2339: KASSERT(cur->pcg_avail == cur->pcg_size);
1.146 ad 2340: cur->pcg_next = pc->pc_fullgroups;
2341: pc->pc_fullgroups = cur;
2342: pc->pc_nfull++;
2343: }
2344: cc->cc_current = pcg;
2345: }
1.163 ad 2346: pc->pc_hits++;
1.134 ad 2347: mutex_exit(&pc->pc_lock);
1.162 ad 2348: return true;
1.102 chs 2349: }
1.105 christos 2350:
1.134 ad 2351: /*
1.162 ad 2352: * Nothing available locally or in cache, and we didn't
2353: * allocate an empty group. Take the slow path and destroy
2354: * the object here and now.
1.134 ad 2355: */
2356: pc->pc_misses++;
2357: mutex_exit(&pc->pc_lock);
1.162 ad 2358: splx(s);
2359: pool_cache_destruct_object(pc, object);
1.105 christos 2360:
1.162 ad 2361: return false;
1.134 ad 2362: }
1.102 chs 2363:
1.43 thorpej 2364: /*
1.134 ad 2365: * pool_cache_put{,_paddr}:
1.43 thorpej 2366: *
1.134 ad 2367: * Put an object back to the pool cache (optionally caching the
2368: * physical address of the object).
1.43 thorpej 2369: */
1.101 thorpej 2370: void
1.134 ad 2371: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2372: {
1.134 ad 2373: pool_cache_cpu_t *cc;
2374: pcg_t *pcg;
2375: int s;
1.101 thorpej 2376:
1.172 yamt 2377: KASSERT(object != NULL);
1.134 ad 2378: FREECHECK_IN(&pc->pc_freecheck, object);
1.101 thorpej 2379:
1.162 ad 2380: /* Lock out interrupts and disable preemption. */
2381: s = splvm();
1.165 yamt 2382: while (/* CONSTCOND */ true) {
1.134 ad 2383: /* If the current group isn't full, release it there. */
1.162 ad 2384: cc = pc->pc_cpus[curcpu()->ci_index];
2385: KASSERT(cc->cc_cache == pc);
1.134 ad 2386: pcg = cc->cc_current;
1.162 ad 2387: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2388: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2389: pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2390: pcg->pcg_avail++;
2391: cc->cc_hits++;
1.162 ad 2392: splx(s);
1.134 ad 2393: return;
2394: }
1.43 thorpej 2395:
1.134 ad 2396: /*
1.162 ad 2397: * That failed. If the previous group isn't full, swap
1.134 ad 2398: * it with the current group and try again.
2399: */
2400: pcg = cc->cc_previous;
1.162 ad 2401: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2402: cc->cc_previous = cc->cc_current;
2403: cc->cc_current = pcg;
2404: continue;
2405: }
1.43 thorpej 2406:
1.134 ad 2407: /*
2408: * Can't free to either group: try the slow path.
2409: * If put_slow() releases the object for us, it
1.162 ad 2410: * will return false. Otherwise we need to retry.
1.134 ad 2411: */
1.165 yamt 2412: if (!pool_cache_put_slow(cc, s, object))
2413: break;
2414: }
1.43 thorpej 2415: }
2416:
2417: /*
1.134 ad 2418: * pool_cache_xcall:
1.43 thorpej 2419: *
1.134 ad 2420: * Transfer objects from the per-CPU cache to the global cache.
2421: * Run within a cross-call thread.
1.43 thorpej 2422: */
2423: static void
1.134 ad 2424: pool_cache_xcall(pool_cache_t pc)
1.43 thorpej 2425: {
1.134 ad 2426: pool_cache_cpu_t *cc;
2427: pcg_t *prev, *cur, **list;
1.162 ad 2428: int s;
1.134 ad 2429:
1.162 ad 2430: s = splvm();
2431: mutex_enter(&pc->pc_lock);
2432: cc = pc->pc_cpus[curcpu()->ci_index];
1.134 ad 2433: cur = cc->cc_current;
1.169 yamt 2434: cc->cc_current = __UNCONST(&pcg_dummy);
1.134 ad 2435: prev = cc->cc_previous;
1.169 yamt 2436: cc->cc_previous = __UNCONST(&pcg_dummy);
1.162 ad 2437: if (cur != &pcg_dummy) {
1.142 ad 2438: if (cur->pcg_avail == cur->pcg_size) {
1.134 ad 2439: list = &pc->pc_fullgroups;
2440: pc->pc_nfull++;
2441: } else if (cur->pcg_avail == 0) {
2442: list = &pc->pc_emptygroups;
2443: pc->pc_nempty++;
2444: } else {
2445: list = &pc->pc_partgroups;
2446: pc->pc_npart++;
2447: }
2448: cur->pcg_next = *list;
2449: *list = cur;
2450: }
1.162 ad 2451: if (prev != &pcg_dummy) {
1.142 ad 2452: if (prev->pcg_avail == prev->pcg_size) {
1.134 ad 2453: list = &pc->pc_fullgroups;
2454: pc->pc_nfull++;
2455: } else if (prev->pcg_avail == 0) {
2456: list = &pc->pc_emptygroups;
2457: pc->pc_nempty++;
2458: } else {
2459: list = &pc->pc_partgroups;
2460: pc->pc_npart++;
2461: }
2462: prev->pcg_next = *list;
2463: *list = prev;
2464: }
2465: mutex_exit(&pc->pc_lock);
2466: splx(s);
1.3 pk 2467: }
1.66 thorpej 2468:
2469: /*
2470: * Pool backend allocators.
2471: *
2472: * Each pool has a backend allocator that handles allocation, deallocation,
2473: * and any additional draining that might be needed.
2474: *
2475: * We provide two standard allocators:
2476: *
2477: * pool_allocator_kmem - the default when no allocator is specified
2478: *
2479: * pool_allocator_nointr - used for pools that will not be accessed
2480: * in interrupt context.
2481: */
2482: void *pool_page_alloc(struct pool *, int);
2483: void pool_page_free(struct pool *, void *);
2484:
1.112 bjh21 2485: #ifdef POOL_SUBPAGE
2486: struct pool_allocator pool_allocator_kmem_fullpage = {
1.192 rmind 2487: .pa_alloc = pool_page_alloc,
2488: .pa_free = pool_page_free,
2489: .pa_pagesz = 0
1.112 bjh21 2490: };
2491: #else
1.66 thorpej 2492: struct pool_allocator pool_allocator_kmem = {
1.191 para 2493: .pa_alloc = pool_page_alloc,
2494: .pa_free = pool_page_free,
2495: .pa_pagesz = 0
1.66 thorpej 2496: };
1.112 bjh21 2497: #endif
1.66 thorpej 2498:
1.112 bjh21 2499: #ifdef POOL_SUBPAGE
2500: struct pool_allocator pool_allocator_nointr_fullpage = {
1.194 para 2501: .pa_alloc = pool_page_alloc,
2502: .pa_free = pool_page_free,
1.192 rmind 2503: .pa_pagesz = 0
1.112 bjh21 2504: };
2505: #else
1.66 thorpej 2506: struct pool_allocator pool_allocator_nointr = {
1.191 para 2507: .pa_alloc = pool_page_alloc,
2508: .pa_free = pool_page_free,
2509: .pa_pagesz = 0
1.66 thorpej 2510: };
1.112 bjh21 2511: #endif
1.66 thorpej 2512:
2513: #ifdef POOL_SUBPAGE
2514: void *pool_subpage_alloc(struct pool *, int);
2515: void pool_subpage_free(struct pool *, void *);
2516:
1.112 bjh21 2517: struct pool_allocator pool_allocator_kmem = {
1.193 he 2518: .pa_alloc = pool_subpage_alloc,
2519: .pa_free = pool_subpage_free,
2520: .pa_pagesz = POOL_SUBPAGE
1.112 bjh21 2521: };
2522:
2523: struct pool_allocator pool_allocator_nointr = {
1.192 rmind 2524: .pa_alloc = pool_subpage_alloc,
2525: .pa_free = pool_subpage_free,
2526: .pa_pagesz = POOL_SUBPAGE
1.66 thorpej 2527: };
2528: #endif /* POOL_SUBPAGE */
2529:
1.117 yamt 2530: static void *
2531: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2532: {
1.117 yamt 2533: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2534: void *res;
2535:
1.117 yamt 2536: res = (*pa->pa_alloc)(pp, flags);
2537: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2538: /*
1.117 yamt 2539: * We only run the drain hook here if PR_NOWAIT.
2540: * In other cases, the hook will be run in
2541: * pool_reclaim().
1.66 thorpej 2542: */
1.117 yamt 2543: if (pp->pr_drain_hook != NULL) {
2544: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2545: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2546: }
1.117 yamt 2547: }
2548: return res;
1.66 thorpej 2549: }
2550:
1.117 yamt 2551: static void
1.66 thorpej 2552: pool_allocator_free(struct pool *pp, void *v)
2553: {
2554: struct pool_allocator *pa = pp->pr_alloc;
2555:
2556: (*pa->pa_free)(pp, v);
2557: }
2558:
2559: void *
1.124 yamt 2560: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2561: {
1.192 rmind 2562: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
1.191 para 2563: vmem_addr_t va;
1.192 rmind 2564: int ret;
1.191 para 2565:
1.192 rmind 2566: ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2567: vflags | VM_INSTANTFIT, &va);
1.66 thorpej 2568:
1.192 rmind 2569: return ret ? NULL : (void *)va;
1.66 thorpej 2570: }
2571:
2572: void
1.124 yamt 2573: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2574: {
2575:
1.191 para 2576: uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
1.98 yamt 2577: }
2578:
2579: static void *
1.124 yamt 2580: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2581: {
1.192 rmind 2582: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2583: vmem_addr_t va;
2584: int ret;
1.191 para 2585:
1.192 rmind 2586: ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2587: vflags | VM_INSTANTFIT, &va);
1.98 yamt 2588:
1.192 rmind 2589: return ret ? NULL : (void *)va;
1.98 yamt 2590: }
2591:
2592: static void
1.124 yamt 2593: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2594: {
2595:
1.192 rmind 2596: vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
1.66 thorpej 2597: }
2598:
2599: #ifdef POOL_SUBPAGE
2600: /* Sub-page allocator, for machines with large hardware pages. */
2601: void *
2602: pool_subpage_alloc(struct pool *pp, int flags)
2603: {
1.134 ad 2604: return pool_get(&psppool, flags);
1.66 thorpej 2605: }
2606:
2607: void
2608: pool_subpage_free(struct pool *pp, void *v)
2609: {
2610: pool_put(&psppool, v);
2611: }
2612:
1.112 bjh21 2613: #endif /* POOL_SUBPAGE */
1.141 yamt 2614:
2615: #if defined(DDB)
2616: static bool
2617: pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2618: {
2619:
2620: return (uintptr_t)ph->ph_page <= addr &&
2621: addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2622: }
2623:
1.143 yamt 2624: static bool
2625: pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2626: {
2627:
2628: return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2629: }
2630:
2631: static bool
2632: pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2633: {
2634: int i;
2635:
2636: if (pcg == NULL) {
2637: return false;
2638: }
1.144 yamt 2639: for (i = 0; i < pcg->pcg_avail; i++) {
1.143 yamt 2640: if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2641: return true;
2642: }
2643: }
2644: return false;
2645: }
2646:
2647: static bool
2648: pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2649: {
2650:
2651: if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2652: unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2653: pool_item_bitmap_t *bitmap =
2654: ph->ph_bitmap + (idx / BITMAP_SIZE);
2655: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2656:
2657: return (*bitmap & mask) == 0;
2658: } else {
2659: struct pool_item *pi;
2660:
2661: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2662: if (pool_in_item(pp, pi, addr)) {
2663: return false;
2664: }
2665: }
2666: return true;
2667: }
2668: }
2669:
1.141 yamt 2670: void
2671: pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2672: {
2673: struct pool *pp;
2674:
1.145 ad 2675: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.141 yamt 2676: struct pool_item_header *ph;
2677: uintptr_t item;
1.143 yamt 2678: bool allocated = true;
2679: bool incache = false;
2680: bool incpucache = false;
2681: char cpucachestr[32];
1.141 yamt 2682:
2683: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2684: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2685: if (pool_in_page(pp, ph, addr)) {
2686: goto found;
2687: }
2688: }
2689: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2690: if (pool_in_page(pp, ph, addr)) {
1.143 yamt 2691: allocated =
2692: pool_allocated(pp, ph, addr);
2693: goto found;
2694: }
2695: }
2696: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2697: if (pool_in_page(pp, ph, addr)) {
2698: allocated = false;
1.141 yamt 2699: goto found;
2700: }
2701: }
2702: continue;
2703: } else {
2704: ph = pr_find_pagehead_noalign(pp, (void *)addr);
2705: if (ph == NULL || !pool_in_page(pp, ph, addr)) {
2706: continue;
2707: }
1.143 yamt 2708: allocated = pool_allocated(pp, ph, addr);
1.141 yamt 2709: }
2710: found:
1.143 yamt 2711: if (allocated && pp->pr_cache) {
2712: pool_cache_t pc = pp->pr_cache;
2713: struct pool_cache_group *pcg;
2714: int i;
2715:
2716: for (pcg = pc->pc_fullgroups; pcg != NULL;
2717: pcg = pcg->pcg_next) {
2718: if (pool_in_cg(pp, pcg, addr)) {
2719: incache = true;
2720: goto print;
2721: }
2722: }
1.183 ad 2723: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.143 yamt 2724: pool_cache_cpu_t *cc;
2725:
2726: if ((cc = pc->pc_cpus[i]) == NULL) {
2727: continue;
2728: }
2729: if (pool_in_cg(pp, cc->cc_current, addr) ||
2730: pool_in_cg(pp, cc->cc_previous, addr)) {
2731: struct cpu_info *ci =
1.170 ad 2732: cpu_lookup(i);
1.143 yamt 2733:
2734: incpucache = true;
2735: snprintf(cpucachestr,
2736: sizeof(cpucachestr),
2737: "cached by CPU %u",
1.153 martin 2738: ci->ci_index);
1.143 yamt 2739: goto print;
2740: }
2741: }
2742: }
2743: print:
1.141 yamt 2744: item = (uintptr_t)ph->ph_page + ph->ph_off;
2745: item = item + rounddown(addr - item, pp->pr_size);
1.143 yamt 2746: (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
1.141 yamt 2747: (void *)addr, item, (size_t)(addr - item),
1.143 yamt 2748: pp->pr_wchan,
2749: incpucache ? cpucachestr :
2750: incache ? "cached" : allocated ? "allocated" : "free");
1.141 yamt 2751: }
2752: }
2753: #endif /* defined(DDB) */
CVSweb <webmaster@jp.NetBSD.org>