Annotation of src/sys/kern/subr_pool.c, Revision 1.201.2.1
1.201.2.1! tls 1: /* $NetBSD: subr_pool.c,v 1.203 2014/06/13 19:09:07 joerg Exp $ */
1.1 pk 2:
3: /*-
1.201.2.1! tls 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014
1.183 ad 5: * The NetBSD Foundation, Inc.
1.1 pk 6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 9: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.134 ad 10: * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
1.1 pk 11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: *
21: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31: * POSSIBILITY OF SUCH DAMAGE.
32: */
1.64 lukem 33:
34: #include <sys/cdefs.h>
1.201.2.1! tls 35: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.203 2014/06/13 19:09:07 joerg Exp $");
1.24 scottr 36:
1.141 yamt 37: #include "opt_ddb.h"
1.28 thorpej 38: #include "opt_lockdebug.h"
1.1 pk 39:
40: #include <sys/param.h>
41: #include <sys/systm.h>
1.201.2.1! tls 42: #include <sys/sysctl.h>
1.135 yamt 43: #include <sys/bitops.h>
1.1 pk 44: #include <sys/proc.h>
45: #include <sys/errno.h>
46: #include <sys/kernel.h>
1.191 para 47: #include <sys/vmem.h>
1.1 pk 48: #include <sys/pool.h>
1.20 thorpej 49: #include <sys/syslog.h>
1.125 ad 50: #include <sys/debug.h>
1.134 ad 51: #include <sys/lockdebug.h>
52: #include <sys/xcall.h>
53: #include <sys/cpu.h>
1.145 ad 54: #include <sys/atomic.h>
1.3 pk 55:
1.187 uebayasi 56: #include <uvm/uvm_extern.h>
1.3 pk 57:
1.1 pk 58: /*
59: * Pool resource management utility.
1.3 pk 60: *
1.88 chs 61: * Memory is allocated in pages which are split into pieces according to
62: * the pool item size. Each page is kept on one of three lists in the
63: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
64: * for empty, full and partially-full pages respectively. The individual
65: * pool items are on a linked list headed by `ph_itemlist' in each page
66: * header. The memory for building the page list is either taken from
67: * the allocated pages themselves (for small pool items) or taken from
68: * an internal pool of page headers (`phpool').
1.1 pk 69: */
70:
1.201.2.1! tls 71: /* List of all pools. Non static as needed by 'vmstat -i' */
! 72: TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.134 ad 73:
1.3 pk 74: /* Private pool for page header structures */
1.97 yamt 75: #define PHPOOL_MAX 8
76: static struct pool phpool[PHPOOL_MAX];
1.135 yamt 77: #define PHPOOL_FREELIST_NELEM(idx) \
78: (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
1.3 pk 79:
1.62 bjh21 80: #ifdef POOL_SUBPAGE
81: /* Pool of subpages for use by normal pools. */
82: static struct pool psppool;
83: #endif
84:
1.98 yamt 85: static void *pool_page_alloc_meta(struct pool *, int);
86: static void pool_page_free_meta(struct pool *, void *);
87:
88: /* allocator for pool metadata */
1.134 ad 89: struct pool_allocator pool_allocator_meta = {
1.191 para 90: .pa_alloc = pool_page_alloc_meta,
91: .pa_free = pool_page_free_meta,
92: .pa_pagesz = 0
1.98 yamt 93: };
94:
1.3 pk 95: /* # of seconds to retain page after last use */
96: int pool_inactive_time = 10;
97:
98: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 99: static struct pool *drainpp;
100:
1.134 ad 101: /* This lock protects both pool_head and drainpp. */
102: static kmutex_t pool_head_lock;
103: static kcondvar_t pool_busy;
1.3 pk 104:
1.178 elad 105: /* This lock protects initialization of a potentially shared pool allocator */
106: static kmutex_t pool_allocator_lock;
107:
1.135 yamt 108: typedef uint32_t pool_item_bitmap_t;
109: #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
110: #define BITMAP_MASK (BITMAP_SIZE - 1)
1.99 yamt 111:
1.3 pk 112: struct pool_item_header {
113: /* Page headers */
1.88 chs 114: LIST_ENTRY(pool_item_header)
1.3 pk 115: ph_pagelist; /* pool page list */
1.88 chs 116: SPLAY_ENTRY(pool_item_header)
117: ph_node; /* Off-page page headers */
1.128 christos 118: void * ph_page; /* this page's address */
1.151 yamt 119: uint32_t ph_time; /* last referenced */
1.135 yamt 120: uint16_t ph_nmissing; /* # of chunks in use */
1.141 yamt 121: uint16_t ph_off; /* start offset in page */
1.97 yamt 122: union {
123: /* !PR_NOTOUCH */
124: struct {
1.102 chs 125: LIST_HEAD(, pool_item)
1.97 yamt 126: phu_itemlist; /* chunk list for this page */
127: } phu_normal;
128: /* PR_NOTOUCH */
129: struct {
1.141 yamt 130: pool_item_bitmap_t phu_bitmap[1];
1.97 yamt 131: } phu_notouch;
132: } ph_u;
1.3 pk 133: };
1.97 yamt 134: #define ph_itemlist ph_u.phu_normal.phu_itemlist
1.135 yamt 135: #define ph_bitmap ph_u.phu_notouch.phu_bitmap
1.3 pk 136:
1.1 pk 137: struct pool_item {
1.3 pk 138: #ifdef DIAGNOSTIC
1.82 thorpej 139: u_int pi_magic;
1.33 chs 140: #endif
1.134 ad 141: #define PI_MAGIC 0xdeaddeadU
1.3 pk 142: /* Other entries use only this list entry */
1.102 chs 143: LIST_ENTRY(pool_item) pi_list;
1.3 pk 144: };
145:
1.53 thorpej 146: #define POOL_NEEDS_CATCHUP(pp) \
147: ((pp)->pr_nitems < (pp)->pr_minitems)
148:
1.43 thorpej 149: /*
150: * Pool cache management.
151: *
152: * Pool caches provide a way for constructed objects to be cached by the
153: * pool subsystem. This can lead to performance improvements by avoiding
154: * needless object construction/destruction; it is deferred until absolutely
155: * necessary.
156: *
1.134 ad 157: * Caches are grouped into cache groups. Each cache group references up
158: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
159: * object from the pool, it calls the object's constructor and places it
160: * into a cache group. When a cache group frees an object back to the
161: * pool, it first calls the object's destructor. This allows the object
162: * to persist in constructed form while freed to the cache.
163: *
164: * The pool references each cache, so that when a pool is drained by the
165: * pagedaemon, it can drain each individual cache as well. Each time a
166: * cache is drained, the most idle cache group is freed to the pool in
167: * its entirety.
1.43 thorpej 168: *
169: * Pool caches are layed on top of pools. By layering them, we can avoid
170: * the complexity of cache management for pools which would not benefit
171: * from it.
172: */
173:
1.142 ad 174: static struct pool pcg_normal_pool;
175: static struct pool pcg_large_pool;
1.134 ad 176: static struct pool cache_pool;
177: static struct pool cache_cpu_pool;
1.3 pk 178:
1.189 pooka 179: pool_cache_t pnbuf_cache; /* pathname buffer cache */
180:
1.145 ad 181: /* List of all caches. */
182: TAILQ_HEAD(,pool_cache) pool_cache_head =
183: TAILQ_HEAD_INITIALIZER(pool_cache_head);
184:
1.162 ad 185: int pool_cache_disable; /* global disable for caching */
1.169 yamt 186: static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */
1.145 ad 187:
1.162 ad 188: static bool pool_cache_put_slow(pool_cache_cpu_t *, int,
189: void *);
190: static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
191: void **, paddr_t *, int);
1.134 ad 192: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
193: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
1.175 jym 194: static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
1.196 jym 195: static void pool_cache_transfer(pool_cache_t);
1.3 pk 196:
1.42 thorpej 197: static int pool_catchup(struct pool *);
1.128 christos 198: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 199: struct pool_item_header *);
1.88 chs 200: static void pool_update_curpage(struct pool *);
1.66 thorpej 201:
1.113 yamt 202: static int pool_grow(struct pool *, int);
1.117 yamt 203: static void *pool_allocator_alloc(struct pool *, int);
204: static void pool_allocator_free(struct pool *, void *);
1.3 pk 205:
1.97 yamt 206: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.199 christos 207: void (*)(const char *, ...) __printflike(1, 2));
1.42 thorpej 208: static void pool_print1(struct pool *, const char *,
1.199 christos 209: void (*)(const char *, ...) __printflike(1, 2));
1.3 pk 210:
1.88 chs 211: static int pool_chk_page(struct pool *, const char *,
212: struct pool_item_header *);
213:
1.135 yamt 214: static inline unsigned int
1.97 yamt 215: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
216: const void *v)
217: {
218: const char *cp = v;
1.135 yamt 219: unsigned int idx;
1.97 yamt 220:
221: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 222: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 223: KASSERT(idx < pp->pr_itemsperpage);
224: return idx;
225: }
226:
1.110 perry 227: static inline void
1.97 yamt 228: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
229: void *obj)
230: {
1.135 yamt 231: unsigned int idx = pr_item_notouch_index(pp, ph, obj);
232: pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
233: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
1.97 yamt 234:
1.135 yamt 235: KASSERT((*bitmap & mask) == 0);
236: *bitmap |= mask;
1.97 yamt 237: }
238:
1.110 perry 239: static inline void *
1.97 yamt 240: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
241: {
1.135 yamt 242: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
243: unsigned int idx;
244: int i;
1.97 yamt 245:
1.135 yamt 246: for (i = 0; ; i++) {
247: int bit;
1.97 yamt 248:
1.135 yamt 249: KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
250: bit = ffs32(bitmap[i]);
251: if (bit) {
252: pool_item_bitmap_t mask;
253:
254: bit--;
255: idx = (i * BITMAP_SIZE) + bit;
256: mask = 1 << bit;
257: KASSERT((bitmap[i] & mask) != 0);
258: bitmap[i] &= ~mask;
259: break;
260: }
261: }
262: KASSERT(idx < pp->pr_itemsperpage);
1.128 christos 263: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 264: }
265:
1.135 yamt 266: static inline void
1.141 yamt 267: pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
1.135 yamt 268: {
269: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
270: const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
271: int i;
272:
273: for (i = 0; i < n; i++) {
274: bitmap[i] = (pool_item_bitmap_t)-1;
275: }
276: }
277:
1.110 perry 278: static inline int
1.88 chs 279: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
280: {
1.121 yamt 281:
282: /*
283: * we consider pool_item_header with smaller ph_page bigger.
284: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
285: */
286:
1.88 chs 287: if (a->ph_page < b->ph_page)
1.121 yamt 288: return (1);
289: else if (a->ph_page > b->ph_page)
1.88 chs 290: return (-1);
291: else
292: return (0);
293: }
294:
295: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
296: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
297:
1.141 yamt 298: static inline struct pool_item_header *
299: pr_find_pagehead_noalign(struct pool *pp, void *v)
300: {
301: struct pool_item_header *ph, tmp;
302:
303: tmp.ph_page = (void *)(uintptr_t)v;
304: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
305: if (ph == NULL) {
306: ph = SPLAY_ROOT(&pp->pr_phtree);
307: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
308: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
309: }
310: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
311: }
312:
313: return ph;
314: }
315:
1.3 pk 316: /*
1.121 yamt 317: * Return the pool page header based on item address.
1.3 pk 318: */
1.110 perry 319: static inline struct pool_item_header *
1.121 yamt 320: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 321: {
1.88 chs 322: struct pool_item_header *ph, tmp;
1.3 pk 323:
1.121 yamt 324: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.141 yamt 325: ph = pr_find_pagehead_noalign(pp, v);
1.121 yamt 326: } else {
1.128 christos 327: void *page =
328: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 329:
330: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 331: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 332: } else {
333: tmp.ph_page = page;
334: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
335: }
336: }
1.3 pk 337:
1.121 yamt 338: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 339: ((char *)ph->ph_page <= (char *)v &&
340: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 341: return ph;
1.3 pk 342: }
343:
1.101 thorpej 344: static void
345: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
346: {
347: struct pool_item_header *ph;
348:
349: while ((ph = LIST_FIRST(pq)) != NULL) {
350: LIST_REMOVE(ph, ph_pagelist);
351: pool_allocator_free(pp, ph->ph_page);
1.134 ad 352: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 353: pool_put(pp->pr_phpool, ph);
354: }
355: }
356:
1.3 pk 357: /*
358: * Remove a page from the pool.
359: */
1.110 perry 360: static inline void
1.61 chs 361: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
362: struct pool_pagelist *pq)
1.3 pk 363: {
364:
1.134 ad 365: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 366:
1.3 pk 367: /*
1.7 thorpej 368: * If the page was idle, decrement the idle page count.
1.3 pk 369: */
1.6 thorpej 370: if (ph->ph_nmissing == 0) {
371: #ifdef DIAGNOSTIC
372: if (pp->pr_nidle == 0)
373: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 374: if (pp->pr_nitems < pp->pr_itemsperpage)
375: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 376: #endif
377: pp->pr_nidle--;
378: }
1.7 thorpej 379:
1.20 thorpej 380: pp->pr_nitems -= pp->pr_itemsperpage;
381:
1.7 thorpej 382: /*
1.101 thorpej 383: * Unlink the page from the pool and queue it for release.
1.7 thorpej 384: */
1.88 chs 385: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 386: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
387: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 388: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
389:
1.7 thorpej 390: pp->pr_npages--;
391: pp->pr_npagefree++;
1.6 thorpej 392:
1.88 chs 393: pool_update_curpage(pp);
1.3 pk 394: }
395:
396: /*
1.94 simonb 397: * Initialize all the pools listed in the "pools" link set.
398: */
399: void
1.117 yamt 400: pool_subsystem_init(void)
1.94 simonb 401: {
1.192 rmind 402: size_t size;
1.191 para 403: int idx;
1.94 simonb 404:
1.134 ad 405: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
1.179 mlelstv 406: mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
1.134 ad 407: cv_init(&pool_busy, "poolbusy");
408:
1.191 para 409: /*
410: * Initialize private page header pool and cache magazine pool if we
411: * haven't done so yet.
412: */
413: for (idx = 0; idx < PHPOOL_MAX; idx++) {
414: static char phpool_names[PHPOOL_MAX][6+1+6+1];
415: int nelem;
416: size_t sz;
417:
418: nelem = PHPOOL_FREELIST_NELEM(idx);
419: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
420: "phpool-%d", nelem);
421: sz = sizeof(struct pool_item_header);
422: if (nelem) {
423: sz = offsetof(struct pool_item_header,
424: ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
425: }
426: pool_init(&phpool[idx], sz, 0, 0, 0,
427: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.117 yamt 428: }
1.191 para 429: #ifdef POOL_SUBPAGE
430: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
431: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
432: #endif
433:
434: size = sizeof(pcg_t) +
435: (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
436: pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
437: "pcgnormal", &pool_allocator_meta, IPL_VM);
438:
439: size = sizeof(pcg_t) +
440: (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
441: pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
442: "pcglarge", &pool_allocator_meta, IPL_VM);
1.134 ad 443:
1.156 ad 444: pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
1.191 para 445: 0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
1.134 ad 446:
1.156 ad 447: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
1.191 para 448: 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
1.94 simonb 449: }
450:
451: /*
1.3 pk 452: * Initialize the given pool resource structure.
453: *
454: * We export this routine to allow other kernel parts to declare
1.195 rmind 455: * static pools that must be initialized before kmem(9) is available.
1.3 pk 456: */
457: void
1.42 thorpej 458: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.129 ad 459: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 460: {
1.116 simonb 461: struct pool *pp1;
1.92 enami 462: size_t trysize, phsize;
1.134 ad 463: int off, slack;
1.3 pk 464:
1.116 simonb 465: #ifdef DEBUG
1.198 christos 466: if (__predict_true(!cold))
467: mutex_enter(&pool_head_lock);
1.116 simonb 468: /*
469: * Check that the pool hasn't already been initialised and
470: * added to the list of all pools.
471: */
1.145 ad 472: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
1.116 simonb 473: if (pp == pp1)
474: panic("pool_init: pool %s already initialised",
475: wchan);
476: }
1.198 christos 477: if (__predict_true(!cold))
478: mutex_exit(&pool_head_lock);
1.116 simonb 479: #endif
480:
1.66 thorpej 481: if (palloc == NULL)
482: palloc = &pool_allocator_kmem;
1.112 bjh21 483: #ifdef POOL_SUBPAGE
484: if (size > palloc->pa_pagesz) {
485: if (palloc == &pool_allocator_kmem)
486: palloc = &pool_allocator_kmem_fullpage;
487: else if (palloc == &pool_allocator_nointr)
488: palloc = &pool_allocator_nointr_fullpage;
489: }
1.66 thorpej 490: #endif /* POOL_SUBPAGE */
1.180 mlelstv 491: if (!cold)
492: mutex_enter(&pool_allocator_lock);
1.178 elad 493: if (palloc->pa_refcnt++ == 0) {
1.112 bjh21 494: if (palloc->pa_pagesz == 0)
1.66 thorpej 495: palloc->pa_pagesz = PAGE_SIZE;
496:
497: TAILQ_INIT(&palloc->pa_list);
498:
1.134 ad 499: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 500: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
501: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.4 thorpej 502: }
1.180 mlelstv 503: if (!cold)
504: mutex_exit(&pool_allocator_lock);
1.3 pk 505:
506: if (align == 0)
507: align = ALIGN(1);
1.14 thorpej 508:
1.120 yamt 509: if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
1.14 thorpej 510: size = sizeof(struct pool_item);
1.3 pk 511:
1.78 thorpej 512: size = roundup(size, align);
1.66 thorpej 513: #ifdef DIAGNOSTIC
514: if (size > palloc->pa_pagesz)
1.121 yamt 515: panic("pool_init: pool item size (%zu) too large", size);
1.66 thorpej 516: #endif
1.35 pk 517:
1.3 pk 518: /*
519: * Initialize the pool structure.
520: */
1.88 chs 521: LIST_INIT(&pp->pr_emptypages);
522: LIST_INIT(&pp->pr_fullpages);
523: LIST_INIT(&pp->pr_partpages);
1.134 ad 524: pp->pr_cache = NULL;
1.3 pk 525: pp->pr_curpage = NULL;
526: pp->pr_npages = 0;
527: pp->pr_minitems = 0;
528: pp->pr_minpages = 0;
529: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 530: pp->pr_roflags = flags;
531: pp->pr_flags = 0;
1.35 pk 532: pp->pr_size = size;
1.3 pk 533: pp->pr_align = align;
534: pp->pr_wchan = wchan;
1.66 thorpej 535: pp->pr_alloc = palloc;
1.20 thorpej 536: pp->pr_nitems = 0;
537: pp->pr_nout = 0;
538: pp->pr_hardlimit = UINT_MAX;
539: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 540: pp->pr_hardlimit_ratecap.tv_sec = 0;
541: pp->pr_hardlimit_ratecap.tv_usec = 0;
542: pp->pr_hardlimit_warning_last.tv_sec = 0;
543: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 544: pp->pr_drain_hook = NULL;
545: pp->pr_drain_hook_arg = NULL;
1.125 ad 546: pp->pr_freecheck = NULL;
1.3 pk 547:
548: /*
549: * Decide whether to put the page header off page to avoid
1.92 enami 550: * wasting too large a part of the page or too big item.
551: * Off-page page headers go on a hash table, so we can match
552: * a returned item with its header based on the page address.
553: * We use 1/16 of the page size and about 8 times of the item
554: * size as the threshold (XXX: tune)
555: *
556: * However, we'll put the header into the page if we can put
557: * it without wasting any items.
558: *
559: * Silently enforce `0 <= ioff < align'.
1.3 pk 560: */
1.92 enami 561: pp->pr_itemoffset = ioff %= align;
562: /* See the comment below about reserved bytes. */
563: trysize = palloc->pa_pagesz - ((align - ioff) % align);
564: phsize = ALIGN(sizeof(struct pool_item_header));
1.201 para 565: if (pp->pr_roflags & PR_PHINPAGE ||
566: ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 567: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
1.201 para 568: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) {
1.3 pk 569: /* Use the end of the page for the page header */
1.20 thorpej 570: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 571: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 572: } else {
1.3 pk 573: /* The page header will be taken from our page header pool */
574: pp->pr_phoffset = 0;
1.66 thorpej 575: off = palloc->pa_pagesz;
1.88 chs 576: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 577: }
1.1 pk 578:
1.3 pk 579: /*
580: * Alignment is to take place at `ioff' within the item. This means
581: * we must reserve up to `align - 1' bytes on the page to allow
582: * appropriate positioning of each item.
583: */
584: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 585: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 586: if ((pp->pr_roflags & PR_NOTOUCH)) {
587: int idx;
588:
589: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
590: idx++) {
591: /* nothing */
592: }
593: if (idx >= PHPOOL_MAX) {
594: /*
595: * if you see this panic, consider to tweak
596: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
597: */
598: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
599: pp->pr_wchan, pp->pr_itemsperpage);
600: }
601: pp->pr_phpool = &phpool[idx];
602: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
603: pp->pr_phpool = &phpool[0];
604: }
605: #if defined(DIAGNOSTIC)
606: else {
607: pp->pr_phpool = NULL;
608: }
609: #endif
1.3 pk 610:
611: /*
612: * Use the slack between the chunks and the page header
613: * for "cache coloring".
614: */
615: slack = off - pp->pr_itemsperpage * pp->pr_size;
616: pp->pr_maxcolor = (slack / align) * align;
617: pp->pr_curcolor = 0;
618:
619: pp->pr_nget = 0;
620: pp->pr_nfail = 0;
621: pp->pr_nput = 0;
622: pp->pr_npagealloc = 0;
623: pp->pr_npagefree = 0;
1.1 pk 624: pp->pr_hiwat = 0;
1.8 thorpej 625: pp->pr_nidle = 0;
1.134 ad 626: pp->pr_refcnt = 0;
1.3 pk 627:
1.157 ad 628: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
1.134 ad 629: cv_init(&pp->pr_cv, wchan);
630: pp->pr_ipl = ipl;
1.1 pk 631:
1.145 ad 632: /* Insert into the list of all pools. */
1.181 mlelstv 633: if (!cold)
1.134 ad 634: mutex_enter(&pool_head_lock);
1.145 ad 635: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
636: if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
637: break;
638: }
639: if (pp1 == NULL)
640: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
641: else
642: TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
1.181 mlelstv 643: if (!cold)
1.134 ad 644: mutex_exit(&pool_head_lock);
645:
1.167 skrll 646: /* Insert this into the list of pools using this allocator. */
1.181 mlelstv 647: if (!cold)
1.134 ad 648: mutex_enter(&palloc->pa_lock);
1.145 ad 649: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
1.181 mlelstv 650: if (!cold)
1.134 ad 651: mutex_exit(&palloc->pa_lock);
1.1 pk 652: }
653:
654: /*
655: * De-commision a pool resource.
656: */
657: void
1.42 thorpej 658: pool_destroy(struct pool *pp)
1.1 pk 659: {
1.101 thorpej 660: struct pool_pagelist pq;
1.3 pk 661: struct pool_item_header *ph;
1.43 thorpej 662:
1.101 thorpej 663: /* Remove from global pool list */
1.134 ad 664: mutex_enter(&pool_head_lock);
665: while (pp->pr_refcnt != 0)
666: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 667: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.101 thorpej 668: if (drainpp == pp)
669: drainpp = NULL;
1.134 ad 670: mutex_exit(&pool_head_lock);
1.101 thorpej 671:
672: /* Remove this pool from its allocator's list of pools. */
1.134 ad 673: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 674: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.134 ad 675: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 676:
1.178 elad 677: mutex_enter(&pool_allocator_lock);
678: if (--pp->pr_alloc->pa_refcnt == 0)
679: mutex_destroy(&pp->pr_alloc->pa_lock);
680: mutex_exit(&pool_allocator_lock);
681:
1.134 ad 682: mutex_enter(&pp->pr_lock);
1.101 thorpej 683:
1.134 ad 684: KASSERT(pp->pr_cache == NULL);
1.3 pk 685:
686: #ifdef DIAGNOSTIC
1.20 thorpej 687: if (pp->pr_nout != 0) {
1.80 provos 688: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 689: pp->pr_nout);
1.3 pk 690: }
691: #endif
1.1 pk 692:
1.101 thorpej 693: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
694: KASSERT(LIST_EMPTY(&pp->pr_partpages));
695:
1.3 pk 696: /* Remove all pages */
1.101 thorpej 697: LIST_INIT(&pq);
1.88 chs 698: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 699: pr_rmpage(pp, ph, &pq);
700:
1.134 ad 701: mutex_exit(&pp->pr_lock);
1.3 pk 702:
1.101 thorpej 703: pr_pagelist_free(pp, &pq);
1.134 ad 704: cv_destroy(&pp->pr_cv);
705: mutex_destroy(&pp->pr_lock);
1.1 pk 706: }
707:
1.68 thorpej 708: void
709: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
710: {
711:
712: /* XXX no locking -- must be used just after pool_init() */
713: #ifdef DIAGNOSTIC
714: if (pp->pr_drain_hook != NULL)
715: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
716: #endif
717: pp->pr_drain_hook = fn;
718: pp->pr_drain_hook_arg = arg;
719: }
720:
1.88 chs 721: static struct pool_item_header *
1.128 christos 722: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 723: {
724: struct pool_item_header *ph;
725:
726: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 727: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.134 ad 728: else
1.97 yamt 729: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 730:
731: return (ph);
732: }
1.1 pk 733:
734: /*
1.134 ad 735: * Grab an item from the pool.
1.1 pk 736: */
1.3 pk 737: void *
1.56 sommerfe 738: pool_get(struct pool *pp, int flags)
1.1 pk 739: {
740: struct pool_item *pi;
1.3 pk 741: struct pool_item_header *ph;
1.55 thorpej 742: void *v;
1.1 pk 743:
1.2 pk 744: #ifdef DIAGNOSTIC
1.184 rmind 745: if (pp->pr_itemsperpage == 0)
746: panic("pool_get: pool '%s': pr_itemsperpage is zero, "
747: "pool not initialized?", pp->pr_wchan);
1.185 rmind 748: if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE &&
749: !cold && panicstr == NULL)
1.184 rmind 750: panic("pool '%s' is IPL_NONE, but called from "
751: "interrupt context\n", pp->pr_wchan);
752: #endif
1.155 ad 753: if (flags & PR_WAITOK) {
1.154 yamt 754: ASSERT_SLEEPABLE();
1.155 ad 755: }
1.1 pk 756:
1.134 ad 757: mutex_enter(&pp->pr_lock);
1.20 thorpej 758: startover:
759: /*
760: * Check to see if we've reached the hard limit. If we have,
761: * and we can wait, then wait until an item has been returned to
762: * the pool.
763: */
764: #ifdef DIAGNOSTIC
1.34 thorpej 765: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.134 ad 766: mutex_exit(&pp->pr_lock);
1.20 thorpej 767: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
768: }
769: #endif
1.34 thorpej 770: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 771: if (pp->pr_drain_hook != NULL) {
772: /*
773: * Since the drain hook is going to free things
774: * back to the pool, unlock, call the hook, re-lock,
775: * and check the hardlimit condition again.
776: */
1.134 ad 777: mutex_exit(&pp->pr_lock);
1.68 thorpej 778: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.134 ad 779: mutex_enter(&pp->pr_lock);
1.68 thorpej 780: if (pp->pr_nout < pp->pr_hardlimit)
781: goto startover;
782: }
783:
1.29 sommerfe 784: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 785: /*
786: * XXX: A warning isn't logged in this case. Should
787: * it be?
788: */
789: pp->pr_flags |= PR_WANTED;
1.134 ad 790: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.20 thorpej 791: goto startover;
792: }
1.31 thorpej 793:
794: /*
795: * Log a message that the hard limit has been hit.
796: */
797: if (pp->pr_hardlimit_warning != NULL &&
798: ratecheck(&pp->pr_hardlimit_warning_last,
799: &pp->pr_hardlimit_ratecap))
800: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 801:
802: pp->pr_nfail++;
803:
1.134 ad 804: mutex_exit(&pp->pr_lock);
1.20 thorpej 805: return (NULL);
806: }
807:
1.3 pk 808: /*
809: * The convention we use is that if `curpage' is not NULL, then
810: * it points at a non-empty bucket. In particular, `curpage'
811: * never points at a page header which has PR_PHINPAGE set and
812: * has no items in its bucket.
813: */
1.20 thorpej 814: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 815: int error;
816:
1.20 thorpej 817: #ifdef DIAGNOSTIC
818: if (pp->pr_nitems != 0) {
1.134 ad 819: mutex_exit(&pp->pr_lock);
1.20 thorpej 820: printf("pool_get: %s: curpage NULL, nitems %u\n",
821: pp->pr_wchan, pp->pr_nitems);
1.80 provos 822: panic("pool_get: nitems inconsistent");
1.20 thorpej 823: }
824: #endif
825:
1.21 thorpej 826: /*
827: * Call the back-end page allocator for more memory.
828: * Release the pool lock, as the back-end page allocator
829: * may block.
830: */
1.113 yamt 831: error = pool_grow(pp, flags);
832: if (error != 0) {
1.21 thorpej 833: /*
1.55 thorpej 834: * We were unable to allocate a page or item
835: * header, but we released the lock during
836: * allocation, so perhaps items were freed
837: * back to the pool. Check for this case.
1.21 thorpej 838: */
839: if (pp->pr_curpage != NULL)
840: goto startover;
1.15 pk 841:
1.117 yamt 842: pp->pr_nfail++;
1.134 ad 843: mutex_exit(&pp->pr_lock);
1.117 yamt 844: return (NULL);
1.1 pk 845: }
1.3 pk 846:
1.20 thorpej 847: /* Start the allocation process over. */
848: goto startover;
1.3 pk 849: }
1.97 yamt 850: if (pp->pr_roflags & PR_NOTOUCH) {
851: #ifdef DIAGNOSTIC
852: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1.134 ad 853: mutex_exit(&pp->pr_lock);
1.97 yamt 854: panic("pool_get: %s: page empty", pp->pr_wchan);
855: }
856: #endif
857: v = pr_item_notouch_get(pp, ph);
858: } else {
1.102 chs 859: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 860: if (__predict_false(v == NULL)) {
1.134 ad 861: mutex_exit(&pp->pr_lock);
1.97 yamt 862: panic("pool_get: %s: page empty", pp->pr_wchan);
863: }
1.20 thorpej 864: #ifdef DIAGNOSTIC
1.97 yamt 865: if (__predict_false(pp->pr_nitems == 0)) {
1.134 ad 866: mutex_exit(&pp->pr_lock);
1.97 yamt 867: printf("pool_get: %s: items on itemlist, nitems %u\n",
868: pp->pr_wchan, pp->pr_nitems);
869: panic("pool_get: nitems inconsistent");
870: }
1.65 enami 871: #endif
1.56 sommerfe 872:
1.65 enami 873: #ifdef DIAGNOSTIC
1.97 yamt 874: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
875: panic("pool_get(%s): free list modified: "
876: "magic=%x; page %p; item addr %p\n",
877: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
878: }
1.3 pk 879: #endif
880:
1.97 yamt 881: /*
882: * Remove from item list.
883: */
1.102 chs 884: LIST_REMOVE(pi, pi_list);
1.97 yamt 885: }
1.20 thorpej 886: pp->pr_nitems--;
887: pp->pr_nout++;
1.6 thorpej 888: if (ph->ph_nmissing == 0) {
889: #ifdef DIAGNOSTIC
1.34 thorpej 890: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 891: panic("pool_get: nidle inconsistent");
892: #endif
893: pp->pr_nidle--;
1.88 chs 894:
895: /*
896: * This page was previously empty. Move it to the list of
897: * partially-full pages. This page is already curpage.
898: */
899: LIST_REMOVE(ph, ph_pagelist);
900: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 901: }
1.3 pk 902: ph->ph_nmissing++;
1.97 yamt 903: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 904: #ifdef DIAGNOSTIC
1.97 yamt 905: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 906: !LIST_EMPTY(&ph->ph_itemlist))) {
1.134 ad 907: mutex_exit(&pp->pr_lock);
1.21 thorpej 908: panic("pool_get: %s: nmissing inconsistent",
909: pp->pr_wchan);
910: }
911: #endif
1.3 pk 912: /*
1.88 chs 913: * This page is now full. Move it to the full list
914: * and select a new current page.
1.3 pk 915: */
1.88 chs 916: LIST_REMOVE(ph, ph_pagelist);
917: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
918: pool_update_curpage(pp);
1.1 pk 919: }
1.3 pk 920:
921: pp->pr_nget++;
1.20 thorpej 922:
923: /*
924: * If we have a low water mark and we are now below that low
925: * water mark, add more items to the pool.
926: */
1.53 thorpej 927: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 928: /*
929: * XXX: Should we log a warning? Should we set up a timeout
930: * to try again in a second or so? The latter could break
931: * a caller's assumptions about interrupt protection, etc.
932: */
933: }
934:
1.134 ad 935: mutex_exit(&pp->pr_lock);
1.125 ad 936: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
937: FREECHECK_OUT(&pp->pr_freecheck, v);
1.1 pk 938: return (v);
939: }
940:
941: /*
1.43 thorpej 942: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 943: */
1.43 thorpej 944: static void
1.101 thorpej 945: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 946: {
947: struct pool_item *pi = v;
1.3 pk 948: struct pool_item_header *ph;
949:
1.134 ad 950: KASSERT(mutex_owned(&pp->pr_lock));
1.125 ad 951: FREECHECK_IN(&pp->pr_freecheck, v);
1.134 ad 952: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 953:
1.30 thorpej 954: #ifdef DIAGNOSTIC
1.34 thorpej 955: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 956: printf("pool %s: putting with none out\n",
957: pp->pr_wchan);
958: panic("pool_put");
959: }
960: #endif
1.3 pk 961:
1.121 yamt 962: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.3 pk 963: panic("pool_put: %s: page header missing", pp->pr_wchan);
964: }
1.28 thorpej 965:
1.3 pk 966: /*
967: * Return to item list.
968: */
1.97 yamt 969: if (pp->pr_roflags & PR_NOTOUCH) {
970: pr_item_notouch_put(pp, ph, v);
971: } else {
1.2 pk 972: #ifdef DIAGNOSTIC
1.97 yamt 973: pi->pi_magic = PI_MAGIC;
1.3 pk 974: #endif
1.32 chs 975: #ifdef DEBUG
1.97 yamt 976: {
977: int i, *ip = v;
1.32 chs 978:
1.97 yamt 979: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
980: *ip++ = PI_MAGIC;
981: }
1.32 chs 982: }
983: #endif
984:
1.102 chs 985: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 986: }
1.79 thorpej 987: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 988: ph->ph_nmissing--;
989: pp->pr_nput++;
1.20 thorpej 990: pp->pr_nitems++;
991: pp->pr_nout--;
1.3 pk 992:
993: /* Cancel "pool empty" condition if it exists */
994: if (pp->pr_curpage == NULL)
995: pp->pr_curpage = ph;
996:
997: if (pp->pr_flags & PR_WANTED) {
998: pp->pr_flags &= ~PR_WANTED;
1.134 ad 999: cv_broadcast(&pp->pr_cv);
1.3 pk 1000: }
1001:
1002: /*
1.88 chs 1003: * If this page is now empty, do one of two things:
1.21 thorpej 1004: *
1.88 chs 1005: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1006: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1007: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1008: * CLAIM.
1.21 thorpej 1009: *
1.88 chs 1010: * (2) Otherwise, move the page to the empty page list.
1011: *
1012: * Either way, select a new current page (so we use a partially-full
1013: * page if one is available).
1.3 pk 1014: */
1015: if (ph->ph_nmissing == 0) {
1.6 thorpej 1016: pp->pr_nidle++;
1.90 thorpej 1017: if (pp->pr_npages > pp->pr_minpages &&
1.152 yamt 1018: pp->pr_npages > pp->pr_maxpages) {
1.101 thorpej 1019: pr_rmpage(pp, ph, pq);
1.3 pk 1020: } else {
1.88 chs 1021: LIST_REMOVE(ph, ph_pagelist);
1022: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1023:
1.21 thorpej 1024: /*
1025: * Update the timestamp on the page. A page must
1026: * be idle for some period of time before it can
1027: * be reclaimed by the pagedaemon. This minimizes
1028: * ping-pong'ing for memory.
1.151 yamt 1029: *
1030: * note for 64-bit time_t: truncating to 32-bit is not
1031: * a problem for our usage.
1.21 thorpej 1032: */
1.151 yamt 1033: ph->ph_time = time_uptime;
1.1 pk 1034: }
1.88 chs 1035: pool_update_curpage(pp);
1.1 pk 1036: }
1.88 chs 1037:
1.21 thorpej 1038: /*
1.88 chs 1039: * If the page was previously completely full, move it to the
1040: * partially-full list and make it the current page. The next
1041: * allocation will get the item from this page, instead of
1042: * further fragmenting the pool.
1.21 thorpej 1043: */
1044: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1045: LIST_REMOVE(ph, ph_pagelist);
1046: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1047: pp->pr_curpage = ph;
1048: }
1.43 thorpej 1049: }
1050:
1.56 sommerfe 1051: void
1052: pool_put(struct pool *pp, void *v)
1053: {
1.101 thorpej 1054: struct pool_pagelist pq;
1055:
1056: LIST_INIT(&pq);
1.56 sommerfe 1057:
1.134 ad 1058: mutex_enter(&pp->pr_lock);
1.101 thorpej 1059: pool_do_put(pp, v, &pq);
1.134 ad 1060: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1061:
1.102 chs 1062: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1063: }
1.57 sommerfe 1064:
1.74 thorpej 1065: /*
1.113 yamt 1066: * pool_grow: grow a pool by a page.
1067: *
1068: * => called with pool locked.
1069: * => unlock and relock the pool.
1070: * => return with pool locked.
1071: */
1072:
1073: static int
1074: pool_grow(struct pool *pp, int flags)
1075: {
1076: struct pool_item_header *ph = NULL;
1077: char *cp;
1078:
1.134 ad 1079: mutex_exit(&pp->pr_lock);
1.113 yamt 1080: cp = pool_allocator_alloc(pp, flags);
1081: if (__predict_true(cp != NULL)) {
1082: ph = pool_alloc_item_header(pp, cp, flags);
1083: }
1084: if (__predict_false(cp == NULL || ph == NULL)) {
1085: if (cp != NULL) {
1086: pool_allocator_free(pp, cp);
1087: }
1.134 ad 1088: mutex_enter(&pp->pr_lock);
1.113 yamt 1089: return ENOMEM;
1090: }
1091:
1.134 ad 1092: mutex_enter(&pp->pr_lock);
1.113 yamt 1093: pool_prime_page(pp, cp, ph);
1094: pp->pr_npagealloc++;
1095: return 0;
1096: }
1097:
1098: /*
1.74 thorpej 1099: * Add N items to the pool.
1100: */
1101: int
1102: pool_prime(struct pool *pp, int n)
1103: {
1.75 simonb 1104: int newpages;
1.113 yamt 1105: int error = 0;
1.74 thorpej 1106:
1.134 ad 1107: mutex_enter(&pp->pr_lock);
1.74 thorpej 1108:
1109: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1110:
1111: while (newpages-- > 0) {
1.113 yamt 1112: error = pool_grow(pp, PR_NOWAIT);
1113: if (error) {
1.74 thorpej 1114: break;
1115: }
1116: pp->pr_minpages++;
1117: }
1118:
1119: if (pp->pr_minpages >= pp->pr_maxpages)
1120: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1121:
1.134 ad 1122: mutex_exit(&pp->pr_lock);
1.113 yamt 1123: return error;
1.74 thorpej 1124: }
1.55 thorpej 1125:
1126: /*
1.3 pk 1127: * Add a page worth of items to the pool.
1.21 thorpej 1128: *
1129: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1130: */
1.55 thorpej 1131: static void
1.128 christos 1132: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1133: {
1134: struct pool_item *pi;
1.128 christos 1135: void *cp = storage;
1.125 ad 1136: const unsigned int align = pp->pr_align;
1137: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1138: int n;
1.36 pk 1139:
1.134 ad 1140: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 1141:
1.66 thorpej 1142: #ifdef DIAGNOSTIC
1.121 yamt 1143: if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1.150 skrll 1144: ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1145: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1146: #endif
1.3 pk 1147:
1148: /*
1149: * Insert page header.
1150: */
1.88 chs 1151: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1152: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1153: ph->ph_page = storage;
1154: ph->ph_nmissing = 0;
1.151 yamt 1155: ph->ph_time = time_uptime;
1.88 chs 1156: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1157: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1158:
1.6 thorpej 1159: pp->pr_nidle++;
1160:
1.3 pk 1161: /*
1162: * Color this page.
1163: */
1.141 yamt 1164: ph->ph_off = pp->pr_curcolor;
1165: cp = (char *)cp + ph->ph_off;
1.3 pk 1166: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1167: pp->pr_curcolor = 0;
1168:
1169: /*
1170: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1171: */
1172: if (ioff != 0)
1.128 christos 1173: cp = (char *)cp + align - ioff;
1.3 pk 1174:
1.125 ad 1175: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1176:
1.3 pk 1177: /*
1178: * Insert remaining chunks on the bucket list.
1179: */
1180: n = pp->pr_itemsperpage;
1.20 thorpej 1181: pp->pr_nitems += n;
1.3 pk 1182:
1.97 yamt 1183: if (pp->pr_roflags & PR_NOTOUCH) {
1.141 yamt 1184: pr_item_notouch_init(pp, ph);
1.97 yamt 1185: } else {
1186: while (n--) {
1187: pi = (struct pool_item *)cp;
1.78 thorpej 1188:
1.97 yamt 1189: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1190:
1.97 yamt 1191: /* Insert on page list */
1.102 chs 1192: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1193: #ifdef DIAGNOSTIC
1.97 yamt 1194: pi->pi_magic = PI_MAGIC;
1.3 pk 1195: #endif
1.128 christos 1196: cp = (char *)cp + pp->pr_size;
1.125 ad 1197:
1198: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1199: }
1.3 pk 1200: }
1201:
1202: /*
1203: * If the pool was depleted, point at the new page.
1204: */
1205: if (pp->pr_curpage == NULL)
1206: pp->pr_curpage = ph;
1207:
1208: if (++pp->pr_npages > pp->pr_hiwat)
1209: pp->pr_hiwat = pp->pr_npages;
1210: }
1211:
1.20 thorpej 1212: /*
1.52 thorpej 1213: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1214: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1215: *
1.21 thorpej 1216: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1217: *
1.73 thorpej 1218: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1219: * with it locked.
1220: */
1221: static int
1.42 thorpej 1222: pool_catchup(struct pool *pp)
1.20 thorpej 1223: {
1224: int error = 0;
1225:
1.54 thorpej 1226: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1227: error = pool_grow(pp, PR_NOWAIT);
1228: if (error) {
1.20 thorpej 1229: break;
1230: }
1231: }
1.113 yamt 1232: return error;
1.20 thorpej 1233: }
1234:
1.88 chs 1235: static void
1236: pool_update_curpage(struct pool *pp)
1237: {
1238:
1239: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1240: if (pp->pr_curpage == NULL) {
1241: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1242: }
1.168 yamt 1243: KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1244: (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1.88 chs 1245: }
1246:
1.3 pk 1247: void
1.42 thorpej 1248: pool_setlowat(struct pool *pp, int n)
1.3 pk 1249: {
1.15 pk 1250:
1.134 ad 1251: mutex_enter(&pp->pr_lock);
1.21 thorpej 1252:
1.3 pk 1253: pp->pr_minitems = n;
1.15 pk 1254: pp->pr_minpages = (n == 0)
1255: ? 0
1.18 thorpej 1256: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1257:
1258: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1259: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1260: /*
1261: * XXX: Should we log a warning? Should we set up a timeout
1262: * to try again in a second or so? The latter could break
1263: * a caller's assumptions about interrupt protection, etc.
1264: */
1265: }
1.21 thorpej 1266:
1.134 ad 1267: mutex_exit(&pp->pr_lock);
1.3 pk 1268: }
1269:
1270: void
1.42 thorpej 1271: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1272: {
1.15 pk 1273:
1.134 ad 1274: mutex_enter(&pp->pr_lock);
1.21 thorpej 1275:
1.15 pk 1276: pp->pr_maxpages = (n == 0)
1277: ? 0
1.18 thorpej 1278: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1279:
1.134 ad 1280: mutex_exit(&pp->pr_lock);
1.3 pk 1281: }
1282:
1.20 thorpej 1283: void
1.42 thorpej 1284: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1285: {
1286:
1.134 ad 1287: mutex_enter(&pp->pr_lock);
1.20 thorpej 1288:
1289: pp->pr_hardlimit = n;
1290: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1291: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1292: pp->pr_hardlimit_warning_last.tv_sec = 0;
1293: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1294:
1295: /*
1.21 thorpej 1296: * In-line version of pool_sethiwat(), because we don't want to
1297: * release the lock.
1.20 thorpej 1298: */
1299: pp->pr_maxpages = (n == 0)
1300: ? 0
1301: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1302:
1.134 ad 1303: mutex_exit(&pp->pr_lock);
1.20 thorpej 1304: }
1.3 pk 1305:
1306: /*
1307: * Release all complete pages that have not been used recently.
1.184 rmind 1308: *
1.197 jym 1309: * Must not be called from interrupt context.
1.3 pk 1310: */
1.66 thorpej 1311: int
1.56 sommerfe 1312: pool_reclaim(struct pool *pp)
1.3 pk 1313: {
1314: struct pool_item_header *ph, *phnext;
1.61 chs 1315: struct pool_pagelist pq;
1.151 yamt 1316: uint32_t curtime;
1.134 ad 1317: bool klock;
1318: int rv;
1.3 pk 1319:
1.197 jym 1320: KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1.184 rmind 1321:
1.68 thorpej 1322: if (pp->pr_drain_hook != NULL) {
1323: /*
1324: * The drain hook must be called with the pool unlocked.
1325: */
1326: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1327: }
1328:
1.134 ad 1329: /*
1.157 ad 1330: * XXXSMP Because we do not want to cause non-MPSAFE code
1331: * to block.
1.134 ad 1332: */
1333: if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1334: pp->pr_ipl == IPL_SOFTSERIAL) {
1335: KERNEL_LOCK(1, NULL);
1336: klock = true;
1337: } else
1338: klock = false;
1339:
1340: /* Reclaim items from the pool's cache (if any). */
1341: if (pp->pr_cache != NULL)
1342: pool_cache_invalidate(pp->pr_cache);
1343:
1344: if (mutex_tryenter(&pp->pr_lock) == 0) {
1345: if (klock) {
1346: KERNEL_UNLOCK_ONE(NULL);
1347: }
1.66 thorpej 1348: return (0);
1.134 ad 1349: }
1.68 thorpej 1350:
1.88 chs 1351: LIST_INIT(&pq);
1.43 thorpej 1352:
1.151 yamt 1353: curtime = time_uptime;
1.21 thorpej 1354:
1.88 chs 1355: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1356: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1357:
1358: /* Check our minimum page claim */
1359: if (pp->pr_npages <= pp->pr_minpages)
1360: break;
1361:
1.88 chs 1362: KASSERT(ph->ph_nmissing == 0);
1.191 para 1363: if (curtime - ph->ph_time < pool_inactive_time)
1.88 chs 1364: continue;
1.21 thorpej 1365:
1.88 chs 1366: /*
1367: * If freeing this page would put us below
1368: * the low water mark, stop now.
1369: */
1370: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1371: pp->pr_minitems)
1372: break;
1.21 thorpej 1373:
1.88 chs 1374: pr_rmpage(pp, ph, &pq);
1.3 pk 1375: }
1376:
1.134 ad 1377: mutex_exit(&pp->pr_lock);
1378:
1379: if (LIST_EMPTY(&pq))
1380: rv = 0;
1381: else {
1382: pr_pagelist_free(pp, &pq);
1383: rv = 1;
1384: }
1385:
1386: if (klock) {
1387: KERNEL_UNLOCK_ONE(NULL);
1388: }
1.66 thorpej 1389:
1.134 ad 1390: return (rv);
1.3 pk 1391: }
1392:
1393: /*
1.197 jym 1394: * Drain pools, one at a time. The drained pool is returned within ppp.
1.131 ad 1395: *
1.134 ad 1396: * Note, must never be called from interrupt context.
1.3 pk 1397: */
1.197 jym 1398: bool
1399: pool_drain(struct pool **ppp)
1.3 pk 1400: {
1.197 jym 1401: bool reclaimed;
1.3 pk 1402: struct pool *pp;
1.134 ad 1403:
1.145 ad 1404: KASSERT(!TAILQ_EMPTY(&pool_head));
1.3 pk 1405:
1.61 chs 1406: pp = NULL;
1.134 ad 1407:
1408: /* Find next pool to drain, and add a reference. */
1409: mutex_enter(&pool_head_lock);
1410: do {
1411: if (drainpp == NULL) {
1.145 ad 1412: drainpp = TAILQ_FIRST(&pool_head);
1.134 ad 1413: }
1414: if (drainpp != NULL) {
1415: pp = drainpp;
1.145 ad 1416: drainpp = TAILQ_NEXT(pp, pr_poollist);
1.134 ad 1417: }
1418: /*
1419: * Skip completely idle pools. We depend on at least
1420: * one pool in the system being active.
1421: */
1422: } while (pp == NULL || pp->pr_npages == 0);
1423: pp->pr_refcnt++;
1424: mutex_exit(&pool_head_lock);
1425:
1426: /* Drain the cache (if any) and pool.. */
1.186 pooka 1427: reclaimed = pool_reclaim(pp);
1.134 ad 1428:
1429: /* Finally, unlock the pool. */
1430: mutex_enter(&pool_head_lock);
1431: pp->pr_refcnt--;
1432: cv_broadcast(&pool_busy);
1433: mutex_exit(&pool_head_lock);
1.186 pooka 1434:
1.197 jym 1435: if (ppp != NULL)
1436: *ppp = pp;
1437:
1.186 pooka 1438: return reclaimed;
1.3 pk 1439: }
1440:
1441: /*
1442: * Diagnostic helpers.
1443: */
1.21 thorpej 1444:
1.25 thorpej 1445: void
1.108 yamt 1446: pool_printall(const char *modif, void (*pr)(const char *, ...))
1447: {
1448: struct pool *pp;
1449:
1.145 ad 1450: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.108 yamt 1451: pool_printit(pp, modif, pr);
1452: }
1453: }
1454:
1455: void
1.42 thorpej 1456: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1457: {
1458:
1459: if (pp == NULL) {
1460: (*pr)("Must specify a pool to print.\n");
1461: return;
1462: }
1463:
1464: pool_print1(pp, modif, pr);
1465: }
1466:
1.21 thorpej 1467: static void
1.124 yamt 1468: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1469: void (*pr)(const char *, ...))
1.88 chs 1470: {
1471: struct pool_item_header *ph;
1472: #ifdef DIAGNOSTIC
1473: struct pool_item *pi;
1474: #endif
1475:
1476: LIST_FOREACH(ph, pl, ph_pagelist) {
1.151 yamt 1477: (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1478: ph->ph_page, ph->ph_nmissing, ph->ph_time);
1.88 chs 1479: #ifdef DIAGNOSTIC
1.97 yamt 1480: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1481: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1482: if (pi->pi_magic != PI_MAGIC) {
1483: (*pr)("\t\t\titem %p, magic 0x%x\n",
1484: pi, pi->pi_magic);
1485: }
1.88 chs 1486: }
1487: }
1488: #endif
1489: }
1490: }
1491:
1492: static void
1.42 thorpej 1493: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1494: {
1.25 thorpej 1495: struct pool_item_header *ph;
1.134 ad 1496: pool_cache_t pc;
1497: pcg_t *pcg;
1498: pool_cache_cpu_t *cc;
1499: uint64_t cpuhit, cpumiss;
1.44 thorpej 1500: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1501: char c;
1502:
1503: while ((c = *modif++) != '\0') {
1504: if (c == 'l')
1505: print_log = 1;
1506: if (c == 'p')
1507: print_pagelist = 1;
1.44 thorpej 1508: if (c == 'c')
1509: print_cache = 1;
1.25 thorpej 1510: }
1511:
1.134 ad 1512: if ((pc = pp->pr_cache) != NULL) {
1513: (*pr)("POOL CACHE");
1514: } else {
1515: (*pr)("POOL");
1516: }
1517:
1518: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1519: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1520: pp->pr_roflags);
1.66 thorpej 1521: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1522: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1523: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1524: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1525: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1526:
1.134 ad 1527: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1528: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1529: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1530: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1531:
1532: if (print_pagelist == 0)
1533: goto skip_pagelist;
1534:
1.88 chs 1535: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1536: (*pr)("\n\tempty page list:\n");
1.97 yamt 1537: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1538: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1539: (*pr)("\n\tfull page list:\n");
1.97 yamt 1540: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1541: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1542: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1543: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1544:
1.25 thorpej 1545: if (pp->pr_curpage == NULL)
1546: (*pr)("\tno current page\n");
1547: else
1548: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1549:
1550: skip_pagelist:
1551: if (print_log == 0)
1552: goto skip_log;
1553:
1554: (*pr)("\n");
1.3 pk 1555:
1.25 thorpej 1556: skip_log:
1.44 thorpej 1557:
1.102 chs 1558: #define PR_GROUPLIST(pcg) \
1559: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1.142 ad 1560: for (i = 0; i < pcg->pcg_size; i++) { \
1.102 chs 1561: if (pcg->pcg_objects[i].pcgo_pa != \
1562: POOL_PADDR_INVALID) { \
1563: (*pr)("\t\t\t%p, 0x%llx\n", \
1564: pcg->pcg_objects[i].pcgo_va, \
1565: (unsigned long long) \
1566: pcg->pcg_objects[i].pcgo_pa); \
1567: } else { \
1568: (*pr)("\t\t\t%p\n", \
1569: pcg->pcg_objects[i].pcgo_va); \
1570: } \
1571: }
1572:
1.134 ad 1573: if (pc != NULL) {
1574: cpuhit = 0;
1575: cpumiss = 0;
1.183 ad 1576: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.134 ad 1577: if ((cc = pc->pc_cpus[i]) == NULL)
1578: continue;
1579: cpuhit += cc->cc_hits;
1580: cpumiss += cc->cc_misses;
1581: }
1582: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1583: (*pr)("\tcache layer hits %llu misses %llu\n",
1584: pc->pc_hits, pc->pc_misses);
1585: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1586: pc->pc_hits + pc->pc_misses - pc->pc_contended,
1587: pc->pc_contended);
1588: (*pr)("\tcache layer empty groups %u full groups %u\n",
1589: pc->pc_nempty, pc->pc_nfull);
1590: if (print_cache) {
1591: (*pr)("\tfull cache groups:\n");
1592: for (pcg = pc->pc_fullgroups; pcg != NULL;
1593: pcg = pcg->pcg_next) {
1594: PR_GROUPLIST(pcg);
1595: }
1596: (*pr)("\tempty cache groups:\n");
1597: for (pcg = pc->pc_emptygroups; pcg != NULL;
1598: pcg = pcg->pcg_next) {
1599: PR_GROUPLIST(pcg);
1600: }
1.103 chs 1601: }
1.44 thorpej 1602: }
1.102 chs 1603: #undef PR_GROUPLIST
1.88 chs 1604: }
1605:
1606: static int
1607: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1608: {
1609: struct pool_item *pi;
1.128 christos 1610: void *page;
1.88 chs 1611: int n;
1612:
1.121 yamt 1613: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1614: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1615: if (page != ph->ph_page &&
1616: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1617: if (label != NULL)
1618: printf("%s: ", label);
1619: printf("pool(%p:%s): page inconsistency: page %p;"
1620: " at page head addr %p (p %p)\n", pp,
1621: pp->pr_wchan, ph->ph_page,
1622: ph, page);
1623: return 1;
1624: }
1.88 chs 1625: }
1.3 pk 1626:
1.97 yamt 1627: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1628: return 0;
1629:
1.102 chs 1630: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1631: pi != NULL;
1.102 chs 1632: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1633:
1634: #ifdef DIAGNOSTIC
1635: if (pi->pi_magic != PI_MAGIC) {
1636: if (label != NULL)
1637: printf("%s: ", label);
1638: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1639: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1640: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1641: n, pi);
1.88 chs 1642: panic("pool");
1643: }
1644: #endif
1.121 yamt 1645: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1646: continue;
1647: }
1.128 christos 1648: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1649: if (page == ph->ph_page)
1650: continue;
1651:
1652: if (label != NULL)
1653: printf("%s: ", label);
1654: printf("pool(%p:%s): page inconsistency: page %p;"
1655: " item ordinal %d; addr %p (p %p)\n", pp,
1656: pp->pr_wchan, ph->ph_page,
1657: n, pi, page);
1658: return 1;
1659: }
1660: return 0;
1.3 pk 1661: }
1662:
1.88 chs 1663:
1.3 pk 1664: int
1.42 thorpej 1665: pool_chk(struct pool *pp, const char *label)
1.3 pk 1666: {
1667: struct pool_item_header *ph;
1668: int r = 0;
1669:
1.134 ad 1670: mutex_enter(&pp->pr_lock);
1.88 chs 1671: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1672: r = pool_chk_page(pp, label, ph);
1673: if (r) {
1674: goto out;
1675: }
1676: }
1677: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1678: r = pool_chk_page(pp, label, ph);
1679: if (r) {
1.3 pk 1680: goto out;
1681: }
1.88 chs 1682: }
1683: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1684: r = pool_chk_page(pp, label, ph);
1685: if (r) {
1.3 pk 1686: goto out;
1687: }
1688: }
1.88 chs 1689:
1.3 pk 1690: out:
1.134 ad 1691: mutex_exit(&pp->pr_lock);
1.3 pk 1692: return (r);
1.43 thorpej 1693: }
1694:
1695: /*
1696: * pool_cache_init:
1697: *
1698: * Initialize a pool cache.
1.134 ad 1699: */
1700: pool_cache_t
1701: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
1702: const char *wchan, struct pool_allocator *palloc, int ipl,
1703: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
1704: {
1705: pool_cache_t pc;
1706:
1707: pc = pool_get(&cache_pool, PR_WAITOK);
1708: if (pc == NULL)
1709: return NULL;
1710:
1711: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
1712: palloc, ipl, ctor, dtor, arg);
1713:
1714: return pc;
1715: }
1716:
1717: /*
1718: * pool_cache_bootstrap:
1.43 thorpej 1719: *
1.134 ad 1720: * Kernel-private version of pool_cache_init(). The caller
1721: * provides initial storage.
1.43 thorpej 1722: */
1723: void
1.134 ad 1724: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
1725: u_int align_offset, u_int flags, const char *wchan,
1726: struct pool_allocator *palloc, int ipl,
1727: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 1728: void *arg)
1729: {
1.134 ad 1730: CPU_INFO_ITERATOR cii;
1.145 ad 1731: pool_cache_t pc1;
1.134 ad 1732: struct cpu_info *ci;
1733: struct pool *pp;
1734:
1735: pp = &pc->pc_pool;
1736: if (palloc == NULL && ipl == IPL_NONE)
1737: palloc = &pool_allocator_nointr;
1738: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.157 ad 1739: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1.43 thorpej 1740:
1.134 ad 1741: if (ctor == NULL) {
1742: ctor = (int (*)(void *, void *, int))nullop;
1743: }
1744: if (dtor == NULL) {
1745: dtor = (void (*)(void *, void *))nullop;
1746: }
1.43 thorpej 1747:
1.134 ad 1748: pc->pc_emptygroups = NULL;
1749: pc->pc_fullgroups = NULL;
1750: pc->pc_partgroups = NULL;
1.43 thorpej 1751: pc->pc_ctor = ctor;
1752: pc->pc_dtor = dtor;
1753: pc->pc_arg = arg;
1.134 ad 1754: pc->pc_hits = 0;
1.48 thorpej 1755: pc->pc_misses = 0;
1.134 ad 1756: pc->pc_nempty = 0;
1757: pc->pc_npart = 0;
1758: pc->pc_nfull = 0;
1759: pc->pc_contended = 0;
1760: pc->pc_refcnt = 0;
1.136 yamt 1761: pc->pc_freecheck = NULL;
1.134 ad 1762:
1.142 ad 1763: if ((flags & PR_LARGECACHE) != 0) {
1764: pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
1.163 ad 1765: pc->pc_pcgpool = &pcg_large_pool;
1.142 ad 1766: } else {
1767: pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
1.163 ad 1768: pc->pc_pcgpool = &pcg_normal_pool;
1.142 ad 1769: }
1770:
1.134 ad 1771: /* Allocate per-CPU caches. */
1772: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
1773: pc->pc_ncpu = 0;
1.139 ad 1774: if (ncpu < 2) {
1.137 ad 1775: /* XXX For sparc: boot CPU is not attached yet. */
1776: pool_cache_cpu_init1(curcpu(), pc);
1777: } else {
1778: for (CPU_INFO_FOREACH(cii, ci)) {
1779: pool_cache_cpu_init1(ci, pc);
1780: }
1.134 ad 1781: }
1.145 ad 1782:
1783: /* Add to list of all pools. */
1784: if (__predict_true(!cold))
1.134 ad 1785: mutex_enter(&pool_head_lock);
1.145 ad 1786: TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
1787: if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
1788: break;
1789: }
1790: if (pc1 == NULL)
1791: TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
1792: else
1793: TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
1794: if (__predict_true(!cold))
1.134 ad 1795: mutex_exit(&pool_head_lock);
1.145 ad 1796:
1797: membar_sync();
1798: pp->pr_cache = pc;
1.43 thorpej 1799: }
1800:
1801: /*
1802: * pool_cache_destroy:
1803: *
1804: * Destroy a pool cache.
1805: */
1806: void
1.134 ad 1807: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 1808: {
1.191 para 1809:
1810: pool_cache_bootstrap_destroy(pc);
1811: pool_put(&cache_pool, pc);
1812: }
1813:
1814: /*
1815: * pool_cache_bootstrap_destroy:
1816: *
1817: * Destroy a pool cache.
1818: */
1819: void
1820: pool_cache_bootstrap_destroy(pool_cache_t pc)
1821: {
1.134 ad 1822: struct pool *pp = &pc->pc_pool;
1.175 jym 1823: u_int i;
1.134 ad 1824:
1825: /* Remove it from the global list. */
1826: mutex_enter(&pool_head_lock);
1827: while (pc->pc_refcnt != 0)
1828: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 1829: TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1.134 ad 1830: mutex_exit(&pool_head_lock);
1.43 thorpej 1831:
1832: /* First, invalidate the entire cache. */
1833: pool_cache_invalidate(pc);
1834:
1.134 ad 1835: /* Disassociate it from the pool. */
1836: mutex_enter(&pp->pr_lock);
1837: pp->pr_cache = NULL;
1838: mutex_exit(&pp->pr_lock);
1839:
1840: /* Destroy per-CPU data */
1.183 ad 1841: for (i = 0; i < __arraycount(pc->pc_cpus); i++)
1.175 jym 1842: pool_cache_invalidate_cpu(pc, i);
1.134 ad 1843:
1844: /* Finally, destroy it. */
1845: mutex_destroy(&pc->pc_lock);
1846: pool_destroy(pp);
1847: }
1848:
1849: /*
1850: * pool_cache_cpu_init1:
1851: *
1852: * Called for each pool_cache whenever a new CPU is attached.
1853: */
1854: static void
1855: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
1856: {
1857: pool_cache_cpu_t *cc;
1.137 ad 1858: int index;
1.134 ad 1859:
1.137 ad 1860: index = ci->ci_index;
1861:
1.183 ad 1862: KASSERT(index < __arraycount(pc->pc_cpus));
1.134 ad 1863:
1.137 ad 1864: if ((cc = pc->pc_cpus[index]) != NULL) {
1865: KASSERT(cc->cc_cpuindex == index);
1.134 ad 1866: return;
1867: }
1868:
1869: /*
1870: * The first CPU is 'free'. This needs to be the case for
1871: * bootstrap - we may not be able to allocate yet.
1872: */
1873: if (pc->pc_ncpu == 0) {
1874: cc = &pc->pc_cpu0;
1875: pc->pc_ncpu = 1;
1876: } else {
1877: mutex_enter(&pc->pc_lock);
1878: pc->pc_ncpu++;
1879: mutex_exit(&pc->pc_lock);
1880: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
1881: }
1882:
1883: cc->cc_ipl = pc->pc_pool.pr_ipl;
1884: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
1885: cc->cc_cache = pc;
1.137 ad 1886: cc->cc_cpuindex = index;
1.134 ad 1887: cc->cc_hits = 0;
1888: cc->cc_misses = 0;
1.169 yamt 1889: cc->cc_current = __UNCONST(&pcg_dummy);
1890: cc->cc_previous = __UNCONST(&pcg_dummy);
1.134 ad 1891:
1.137 ad 1892: pc->pc_cpus[index] = cc;
1.43 thorpej 1893: }
1894:
1.134 ad 1895: /*
1896: * pool_cache_cpu_init:
1897: *
1898: * Called whenever a new CPU is attached.
1899: */
1900: void
1901: pool_cache_cpu_init(struct cpu_info *ci)
1.43 thorpej 1902: {
1.134 ad 1903: pool_cache_t pc;
1904:
1905: mutex_enter(&pool_head_lock);
1.145 ad 1906: TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1.134 ad 1907: pc->pc_refcnt++;
1908: mutex_exit(&pool_head_lock);
1.43 thorpej 1909:
1.134 ad 1910: pool_cache_cpu_init1(ci, pc);
1.43 thorpej 1911:
1.134 ad 1912: mutex_enter(&pool_head_lock);
1913: pc->pc_refcnt--;
1914: cv_broadcast(&pool_busy);
1915: }
1916: mutex_exit(&pool_head_lock);
1.43 thorpej 1917: }
1918:
1.134 ad 1919: /*
1920: * pool_cache_reclaim:
1921: *
1922: * Reclaim memory from a pool cache.
1923: */
1924: bool
1925: pool_cache_reclaim(pool_cache_t pc)
1.43 thorpej 1926: {
1927:
1.134 ad 1928: return pool_reclaim(&pc->pc_pool);
1929: }
1.43 thorpej 1930:
1.136 yamt 1931: static void
1932: pool_cache_destruct_object1(pool_cache_t pc, void *object)
1933: {
1934:
1935: (*pc->pc_dtor)(pc->pc_arg, object);
1936: pool_put(&pc->pc_pool, object);
1937: }
1938:
1.134 ad 1939: /*
1940: * pool_cache_destruct_object:
1941: *
1942: * Force destruction of an object and its release back into
1943: * the pool.
1944: */
1945: void
1946: pool_cache_destruct_object(pool_cache_t pc, void *object)
1947: {
1948:
1.136 yamt 1949: FREECHECK_IN(&pc->pc_freecheck, object);
1950:
1951: pool_cache_destruct_object1(pc, object);
1.43 thorpej 1952: }
1953:
1.134 ad 1954: /*
1955: * pool_cache_invalidate_groups:
1956: *
1957: * Invalidate a chain of groups and destruct all objects.
1958: */
1.102 chs 1959: static void
1.134 ad 1960: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1.102 chs 1961: {
1.134 ad 1962: void *object;
1963: pcg_t *next;
1964: int i;
1965:
1966: for (; pcg != NULL; pcg = next) {
1967: next = pcg->pcg_next;
1968:
1969: for (i = 0; i < pcg->pcg_avail; i++) {
1970: object = pcg->pcg_objects[i].pcgo_va;
1.136 yamt 1971: pool_cache_destruct_object1(pc, object);
1.134 ad 1972: }
1.102 chs 1973:
1.142 ad 1974: if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
1975: pool_put(&pcg_large_pool, pcg);
1976: } else {
1977: KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
1978: pool_put(&pcg_normal_pool, pcg);
1979: }
1.102 chs 1980: }
1981: }
1982:
1.43 thorpej 1983: /*
1.134 ad 1984: * pool_cache_invalidate:
1.43 thorpej 1985: *
1.134 ad 1986: * Invalidate a pool cache (destruct and release all of the
1987: * cached objects). Does not reclaim objects from the pool.
1.176 thorpej 1988: *
1989: * Note: For pool caches that provide constructed objects, there
1990: * is an assumption that another level of synchronization is occurring
1991: * between the input to the constructor and the cache invalidation.
1.196 jym 1992: *
1993: * Invalidation is a costly process and should not be called from
1994: * interrupt context.
1.43 thorpej 1995: */
1.134 ad 1996: void
1997: pool_cache_invalidate(pool_cache_t pc)
1998: {
1.196 jym 1999: uint64_t where;
1.134 ad 2000: pcg_t *full, *empty, *part;
1.196 jym 2001:
2002: KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1.176 thorpej 2003:
1.177 jym 2004: if (ncpu < 2 || !mp_online) {
1.176 thorpej 2005: /*
2006: * We might be called early enough in the boot process
2007: * for the CPU data structures to not be fully initialized.
1.196 jym 2008: * In this case, transfer the content of the local CPU's
2009: * cache back into global cache as only this CPU is currently
2010: * running.
1.176 thorpej 2011: */
1.196 jym 2012: pool_cache_transfer(pc);
1.176 thorpej 2013: } else {
2014: /*
1.196 jym 2015: * Signal all CPUs that they must transfer their local
2016: * cache back to the global pool then wait for the xcall to
2017: * complete.
1.176 thorpej 2018: */
1.196 jym 2019: where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
2020: pc, NULL);
1.176 thorpej 2021: xc_wait(where);
2022: }
1.196 jym 2023:
2024: /* Empty pool caches, then invalidate objects */
1.134 ad 2025: mutex_enter(&pc->pc_lock);
2026: full = pc->pc_fullgroups;
2027: empty = pc->pc_emptygroups;
2028: part = pc->pc_partgroups;
2029: pc->pc_fullgroups = NULL;
2030: pc->pc_emptygroups = NULL;
2031: pc->pc_partgroups = NULL;
2032: pc->pc_nfull = 0;
2033: pc->pc_nempty = 0;
2034: pc->pc_npart = 0;
2035: mutex_exit(&pc->pc_lock);
2036:
2037: pool_cache_invalidate_groups(pc, full);
2038: pool_cache_invalidate_groups(pc, empty);
2039: pool_cache_invalidate_groups(pc, part);
2040: }
2041:
1.175 jym 2042: /*
2043: * pool_cache_invalidate_cpu:
2044: *
2045: * Invalidate all CPU-bound cached objects in pool cache, the CPU being
2046: * identified by its associated index.
2047: * It is caller's responsibility to ensure that no operation is
2048: * taking place on this pool cache while doing this invalidation.
2049: * WARNING: as no inter-CPU locking is enforced, trying to invalidate
2050: * pool cached objects from a CPU different from the one currently running
2051: * may result in an undefined behaviour.
2052: */
2053: static void
2054: pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2055: {
2056: pool_cache_cpu_t *cc;
2057: pcg_t *pcg;
2058:
2059: if ((cc = pc->pc_cpus[index]) == NULL)
2060: return;
2061:
2062: if ((pcg = cc->cc_current) != &pcg_dummy) {
2063: pcg->pcg_next = NULL;
2064: pool_cache_invalidate_groups(pc, pcg);
2065: }
2066: if ((pcg = cc->cc_previous) != &pcg_dummy) {
2067: pcg->pcg_next = NULL;
2068: pool_cache_invalidate_groups(pc, pcg);
2069: }
2070: if (cc != &pc->pc_cpu0)
2071: pool_put(&cache_cpu_pool, cc);
2072:
2073: }
2074:
1.134 ad 2075: void
2076: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2077: {
2078:
2079: pool_set_drain_hook(&pc->pc_pool, fn, arg);
2080: }
2081:
2082: void
2083: pool_cache_setlowat(pool_cache_t pc, int n)
2084: {
2085:
2086: pool_setlowat(&pc->pc_pool, n);
2087: }
2088:
2089: void
2090: pool_cache_sethiwat(pool_cache_t pc, int n)
2091: {
2092:
2093: pool_sethiwat(&pc->pc_pool, n);
2094: }
2095:
2096: void
2097: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2098: {
2099:
2100: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2101: }
2102:
1.162 ad 2103: static bool __noinline
2104: pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
1.134 ad 2105: paddr_t *pap, int flags)
1.43 thorpej 2106: {
1.134 ad 2107: pcg_t *pcg, *cur;
2108: uint64_t ncsw;
2109: pool_cache_t pc;
1.43 thorpej 2110: void *object;
1.58 thorpej 2111:
1.168 yamt 2112: KASSERT(cc->cc_current->pcg_avail == 0);
2113: KASSERT(cc->cc_previous->pcg_avail == 0);
2114:
1.134 ad 2115: pc = cc->cc_cache;
2116: cc->cc_misses++;
1.43 thorpej 2117:
1.134 ad 2118: /*
2119: * Nothing was available locally. Try and grab a group
2120: * from the cache.
2121: */
1.162 ad 2122: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.134 ad 2123: ncsw = curlwp->l_ncsw;
2124: mutex_enter(&pc->pc_lock);
2125: pc->pc_contended++;
1.43 thorpej 2126:
1.134 ad 2127: /*
2128: * If we context switched while locking, then
2129: * our view of the per-CPU data is invalid:
2130: * retry.
2131: */
2132: if (curlwp->l_ncsw != ncsw) {
2133: mutex_exit(&pc->pc_lock);
1.162 ad 2134: return true;
1.43 thorpej 2135: }
1.102 chs 2136: }
1.43 thorpej 2137:
1.162 ad 2138: if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
1.43 thorpej 2139: /*
1.134 ad 2140: * If there's a full group, release our empty
2141: * group back to the cache. Install the full
2142: * group as cc_current and return.
1.43 thorpej 2143: */
1.162 ad 2144: if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
1.134 ad 2145: KASSERT(cur->pcg_avail == 0);
2146: cur->pcg_next = pc->pc_emptygroups;
2147: pc->pc_emptygroups = cur;
2148: pc->pc_nempty++;
1.87 thorpej 2149: }
1.142 ad 2150: KASSERT(pcg->pcg_avail == pcg->pcg_size);
1.134 ad 2151: cc->cc_current = pcg;
2152: pc->pc_fullgroups = pcg->pcg_next;
2153: pc->pc_hits++;
2154: pc->pc_nfull--;
2155: mutex_exit(&pc->pc_lock);
1.162 ad 2156: return true;
1.134 ad 2157: }
2158:
2159: /*
2160: * Nothing available locally or in cache. Take the slow
2161: * path: fetch a new object from the pool and construct
2162: * it.
2163: */
2164: pc->pc_misses++;
2165: mutex_exit(&pc->pc_lock);
1.162 ad 2166: splx(s);
1.134 ad 2167:
2168: object = pool_get(&pc->pc_pool, flags);
2169: *objectp = object;
1.162 ad 2170: if (__predict_false(object == NULL))
2171: return false;
1.125 ad 2172:
1.162 ad 2173: if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
1.134 ad 2174: pool_put(&pc->pc_pool, object);
2175: *objectp = NULL;
1.162 ad 2176: return false;
1.43 thorpej 2177: }
2178:
1.134 ad 2179: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2180: (pc->pc_pool.pr_align - 1)) == 0);
1.43 thorpej 2181:
1.134 ad 2182: if (pap != NULL) {
2183: #ifdef POOL_VTOPHYS
2184: *pap = POOL_VTOPHYS(object);
2185: #else
2186: *pap = POOL_PADDR_INVALID;
2187: #endif
1.102 chs 2188: }
1.43 thorpej 2189:
1.125 ad 2190: FREECHECK_OUT(&pc->pc_freecheck, object);
1.162 ad 2191: return false;
1.43 thorpej 2192: }
2193:
2194: /*
1.134 ad 2195: * pool_cache_get{,_paddr}:
1.43 thorpej 2196: *
1.134 ad 2197: * Get an object from a pool cache (optionally returning
2198: * the physical address of the object).
1.43 thorpej 2199: */
1.134 ad 2200: void *
2201: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.43 thorpej 2202: {
1.134 ad 2203: pool_cache_cpu_t *cc;
2204: pcg_t *pcg;
2205: void *object;
1.60 thorpej 2206: int s;
1.43 thorpej 2207:
1.184 rmind 2208: KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
1.185 rmind 2209: (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
1.190 jym 2210: "pool '%s' is IPL_NONE, but called from interrupt context\n",
2211: pc->pc_pool.pr_wchan);
1.184 rmind 2212:
1.155 ad 2213: if (flags & PR_WAITOK) {
1.154 yamt 2214: ASSERT_SLEEPABLE();
1.155 ad 2215: }
1.125 ad 2216:
1.162 ad 2217: /* Lock out interrupts and disable preemption. */
2218: s = splvm();
1.165 yamt 2219: while (/* CONSTCOND */ true) {
1.134 ad 2220: /* Try and allocate an object from the current group. */
1.162 ad 2221: cc = pc->pc_cpus[curcpu()->ci_index];
2222: KASSERT(cc->cc_cache == pc);
1.134 ad 2223: pcg = cc->cc_current;
1.162 ad 2224: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2225: object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
1.162 ad 2226: if (__predict_false(pap != NULL))
1.134 ad 2227: *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
1.148 yamt 2228: #if defined(DIAGNOSTIC)
1.134 ad 2229: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
1.163 ad 2230: KASSERT(pcg->pcg_avail < pcg->pcg_size);
1.134 ad 2231: KASSERT(object != NULL);
1.163 ad 2232: #endif
1.134 ad 2233: cc->cc_hits++;
1.162 ad 2234: splx(s);
1.134 ad 2235: FREECHECK_OUT(&pc->pc_freecheck, object);
2236: return object;
1.43 thorpej 2237: }
2238:
2239: /*
1.134 ad 2240: * That failed. If the previous group isn't empty, swap
2241: * it with the current group and allocate from there.
1.43 thorpej 2242: */
1.134 ad 2243: pcg = cc->cc_previous;
1.162 ad 2244: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2245: cc->cc_previous = cc->cc_current;
2246: cc->cc_current = pcg;
2247: continue;
1.43 thorpej 2248: }
2249:
1.134 ad 2250: /*
2251: * Can't allocate from either group: try the slow path.
2252: * If get_slow() allocated an object for us, or if
1.162 ad 2253: * no more objects are available, it will return false.
1.134 ad 2254: * Otherwise, we need to retry.
2255: */
1.165 yamt 2256: if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2257: break;
2258: }
1.43 thorpej 2259:
1.134 ad 2260: return object;
1.51 thorpej 2261: }
2262:
1.162 ad 2263: static bool __noinline
2264: pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
1.51 thorpej 2265: {
1.200 pooka 2266: struct lwp *l = curlwp;
1.163 ad 2267: pcg_t *pcg, *cur;
1.134 ad 2268: uint64_t ncsw;
2269: pool_cache_t pc;
1.51 thorpej 2270:
1.168 yamt 2271: KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2272: KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2273:
1.134 ad 2274: pc = cc->cc_cache;
1.171 ad 2275: pcg = NULL;
1.134 ad 2276: cc->cc_misses++;
1.200 pooka 2277: ncsw = l->l_ncsw;
1.43 thorpej 2278:
1.171 ad 2279: /*
2280: * If there are no empty groups in the cache then allocate one
2281: * while still unlocked.
2282: */
2283: if (__predict_false(pc->pc_emptygroups == NULL)) {
2284: if (__predict_true(!pool_cache_disable)) {
2285: pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2286: }
1.200 pooka 2287: /*
2288: * If pool_get() blocked, then our view of
2289: * the per-CPU data is invalid: retry.
2290: */
2291: if (__predict_false(l->l_ncsw != ncsw)) {
2292: if (pcg != NULL) {
2293: pool_put(pc->pc_pcgpool, pcg);
2294: }
2295: return true;
2296: }
1.171 ad 2297: if (__predict_true(pcg != NULL)) {
2298: pcg->pcg_avail = 0;
2299: pcg->pcg_size = pc->pc_pcgsize;
2300: }
2301: }
2302:
1.162 ad 2303: /* Lock the cache. */
2304: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.134 ad 2305: mutex_enter(&pc->pc_lock);
2306: pc->pc_contended++;
1.162 ad 2307:
1.163 ad 2308: /*
2309: * If we context switched while locking, then our view of
2310: * the per-CPU data is invalid: retry.
2311: */
1.200 pooka 2312: if (__predict_false(l->l_ncsw != ncsw)) {
1.163 ad 2313: mutex_exit(&pc->pc_lock);
1.171 ad 2314: if (pcg != NULL) {
2315: pool_put(pc->pc_pcgpool, pcg);
2316: }
1.163 ad 2317: return true;
2318: }
1.162 ad 2319: }
1.102 chs 2320:
1.163 ad 2321: /* If there are no empty groups in the cache then allocate one. */
1.171 ad 2322: if (pcg == NULL && pc->pc_emptygroups != NULL) {
2323: pcg = pc->pc_emptygroups;
1.163 ad 2324: pc->pc_emptygroups = pcg->pcg_next;
2325: pc->pc_nempty--;
1.134 ad 2326: }
1.130 ad 2327:
1.162 ad 2328: /*
2329: * If there's a empty group, release our full group back
2330: * to the cache. Install the empty group to the local CPU
2331: * and return.
2332: */
1.163 ad 2333: if (pcg != NULL) {
1.134 ad 2334: KASSERT(pcg->pcg_avail == 0);
1.162 ad 2335: if (__predict_false(cc->cc_previous == &pcg_dummy)) {
1.146 ad 2336: cc->cc_previous = pcg;
2337: } else {
1.162 ad 2338: cur = cc->cc_current;
2339: if (__predict_true(cur != &pcg_dummy)) {
1.163 ad 2340: KASSERT(cur->pcg_avail == cur->pcg_size);
1.146 ad 2341: cur->pcg_next = pc->pc_fullgroups;
2342: pc->pc_fullgroups = cur;
2343: pc->pc_nfull++;
2344: }
2345: cc->cc_current = pcg;
2346: }
1.163 ad 2347: pc->pc_hits++;
1.134 ad 2348: mutex_exit(&pc->pc_lock);
1.162 ad 2349: return true;
1.102 chs 2350: }
1.105 christos 2351:
1.134 ad 2352: /*
1.162 ad 2353: * Nothing available locally or in cache, and we didn't
2354: * allocate an empty group. Take the slow path and destroy
2355: * the object here and now.
1.134 ad 2356: */
2357: pc->pc_misses++;
2358: mutex_exit(&pc->pc_lock);
1.162 ad 2359: splx(s);
2360: pool_cache_destruct_object(pc, object);
1.105 christos 2361:
1.162 ad 2362: return false;
1.134 ad 2363: }
1.102 chs 2364:
1.43 thorpej 2365: /*
1.134 ad 2366: * pool_cache_put{,_paddr}:
1.43 thorpej 2367: *
1.134 ad 2368: * Put an object back to the pool cache (optionally caching the
2369: * physical address of the object).
1.43 thorpej 2370: */
1.101 thorpej 2371: void
1.134 ad 2372: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2373: {
1.134 ad 2374: pool_cache_cpu_t *cc;
2375: pcg_t *pcg;
2376: int s;
1.101 thorpej 2377:
1.172 yamt 2378: KASSERT(object != NULL);
1.134 ad 2379: FREECHECK_IN(&pc->pc_freecheck, object);
1.101 thorpej 2380:
1.162 ad 2381: /* Lock out interrupts and disable preemption. */
2382: s = splvm();
1.165 yamt 2383: while (/* CONSTCOND */ true) {
1.134 ad 2384: /* If the current group isn't full, release it there. */
1.162 ad 2385: cc = pc->pc_cpus[curcpu()->ci_index];
2386: KASSERT(cc->cc_cache == pc);
1.134 ad 2387: pcg = cc->cc_current;
1.162 ad 2388: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2389: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2390: pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2391: pcg->pcg_avail++;
2392: cc->cc_hits++;
1.162 ad 2393: splx(s);
1.134 ad 2394: return;
2395: }
1.43 thorpej 2396:
1.134 ad 2397: /*
1.162 ad 2398: * That failed. If the previous group isn't full, swap
1.134 ad 2399: * it with the current group and try again.
2400: */
2401: pcg = cc->cc_previous;
1.162 ad 2402: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2403: cc->cc_previous = cc->cc_current;
2404: cc->cc_current = pcg;
2405: continue;
2406: }
1.43 thorpej 2407:
1.134 ad 2408: /*
2409: * Can't free to either group: try the slow path.
2410: * If put_slow() releases the object for us, it
1.162 ad 2411: * will return false. Otherwise we need to retry.
1.134 ad 2412: */
1.165 yamt 2413: if (!pool_cache_put_slow(cc, s, object))
2414: break;
2415: }
1.43 thorpej 2416: }
2417:
2418: /*
1.196 jym 2419: * pool_cache_transfer:
1.43 thorpej 2420: *
1.134 ad 2421: * Transfer objects from the per-CPU cache to the global cache.
2422: * Run within a cross-call thread.
1.43 thorpej 2423: */
2424: static void
1.196 jym 2425: pool_cache_transfer(pool_cache_t pc)
1.43 thorpej 2426: {
1.134 ad 2427: pool_cache_cpu_t *cc;
2428: pcg_t *prev, *cur, **list;
1.162 ad 2429: int s;
1.134 ad 2430:
1.162 ad 2431: s = splvm();
2432: mutex_enter(&pc->pc_lock);
2433: cc = pc->pc_cpus[curcpu()->ci_index];
1.134 ad 2434: cur = cc->cc_current;
1.169 yamt 2435: cc->cc_current = __UNCONST(&pcg_dummy);
1.134 ad 2436: prev = cc->cc_previous;
1.169 yamt 2437: cc->cc_previous = __UNCONST(&pcg_dummy);
1.162 ad 2438: if (cur != &pcg_dummy) {
1.142 ad 2439: if (cur->pcg_avail == cur->pcg_size) {
1.134 ad 2440: list = &pc->pc_fullgroups;
2441: pc->pc_nfull++;
2442: } else if (cur->pcg_avail == 0) {
2443: list = &pc->pc_emptygroups;
2444: pc->pc_nempty++;
2445: } else {
2446: list = &pc->pc_partgroups;
2447: pc->pc_npart++;
2448: }
2449: cur->pcg_next = *list;
2450: *list = cur;
2451: }
1.162 ad 2452: if (prev != &pcg_dummy) {
1.142 ad 2453: if (prev->pcg_avail == prev->pcg_size) {
1.134 ad 2454: list = &pc->pc_fullgroups;
2455: pc->pc_nfull++;
2456: } else if (prev->pcg_avail == 0) {
2457: list = &pc->pc_emptygroups;
2458: pc->pc_nempty++;
2459: } else {
2460: list = &pc->pc_partgroups;
2461: pc->pc_npart++;
2462: }
2463: prev->pcg_next = *list;
2464: *list = prev;
2465: }
2466: mutex_exit(&pc->pc_lock);
2467: splx(s);
1.3 pk 2468: }
1.66 thorpej 2469:
2470: /*
2471: * Pool backend allocators.
2472: *
2473: * Each pool has a backend allocator that handles allocation, deallocation,
2474: * and any additional draining that might be needed.
2475: *
2476: * We provide two standard allocators:
2477: *
2478: * pool_allocator_kmem - the default when no allocator is specified
2479: *
2480: * pool_allocator_nointr - used for pools that will not be accessed
2481: * in interrupt context.
2482: */
2483: void *pool_page_alloc(struct pool *, int);
2484: void pool_page_free(struct pool *, void *);
2485:
1.112 bjh21 2486: #ifdef POOL_SUBPAGE
2487: struct pool_allocator pool_allocator_kmem_fullpage = {
1.192 rmind 2488: .pa_alloc = pool_page_alloc,
2489: .pa_free = pool_page_free,
2490: .pa_pagesz = 0
1.112 bjh21 2491: };
2492: #else
1.66 thorpej 2493: struct pool_allocator pool_allocator_kmem = {
1.191 para 2494: .pa_alloc = pool_page_alloc,
2495: .pa_free = pool_page_free,
2496: .pa_pagesz = 0
1.66 thorpej 2497: };
1.112 bjh21 2498: #endif
1.66 thorpej 2499:
1.112 bjh21 2500: #ifdef POOL_SUBPAGE
2501: struct pool_allocator pool_allocator_nointr_fullpage = {
1.194 para 2502: .pa_alloc = pool_page_alloc,
2503: .pa_free = pool_page_free,
1.192 rmind 2504: .pa_pagesz = 0
1.112 bjh21 2505: };
2506: #else
1.66 thorpej 2507: struct pool_allocator pool_allocator_nointr = {
1.191 para 2508: .pa_alloc = pool_page_alloc,
2509: .pa_free = pool_page_free,
2510: .pa_pagesz = 0
1.66 thorpej 2511: };
1.112 bjh21 2512: #endif
1.66 thorpej 2513:
2514: #ifdef POOL_SUBPAGE
2515: void *pool_subpage_alloc(struct pool *, int);
2516: void pool_subpage_free(struct pool *, void *);
2517:
1.112 bjh21 2518: struct pool_allocator pool_allocator_kmem = {
1.193 he 2519: .pa_alloc = pool_subpage_alloc,
2520: .pa_free = pool_subpage_free,
2521: .pa_pagesz = POOL_SUBPAGE
1.112 bjh21 2522: };
2523:
2524: struct pool_allocator pool_allocator_nointr = {
1.192 rmind 2525: .pa_alloc = pool_subpage_alloc,
2526: .pa_free = pool_subpage_free,
2527: .pa_pagesz = POOL_SUBPAGE
1.66 thorpej 2528: };
2529: #endif /* POOL_SUBPAGE */
2530:
1.117 yamt 2531: static void *
2532: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2533: {
1.117 yamt 2534: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2535: void *res;
2536:
1.117 yamt 2537: res = (*pa->pa_alloc)(pp, flags);
2538: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2539: /*
1.117 yamt 2540: * We only run the drain hook here if PR_NOWAIT.
2541: * In other cases, the hook will be run in
2542: * pool_reclaim().
1.66 thorpej 2543: */
1.117 yamt 2544: if (pp->pr_drain_hook != NULL) {
2545: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2546: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2547: }
1.117 yamt 2548: }
2549: return res;
1.66 thorpej 2550: }
2551:
1.117 yamt 2552: static void
1.66 thorpej 2553: pool_allocator_free(struct pool *pp, void *v)
2554: {
2555: struct pool_allocator *pa = pp->pr_alloc;
2556:
2557: (*pa->pa_free)(pp, v);
2558: }
2559:
2560: void *
1.124 yamt 2561: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2562: {
1.192 rmind 2563: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
1.191 para 2564: vmem_addr_t va;
1.192 rmind 2565: int ret;
1.191 para 2566:
1.192 rmind 2567: ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2568: vflags | VM_INSTANTFIT, &va);
1.66 thorpej 2569:
1.192 rmind 2570: return ret ? NULL : (void *)va;
1.66 thorpej 2571: }
2572:
2573: void
1.124 yamt 2574: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2575: {
2576:
1.191 para 2577: uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
1.98 yamt 2578: }
2579:
2580: static void *
1.124 yamt 2581: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2582: {
1.192 rmind 2583: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2584: vmem_addr_t va;
2585: int ret;
1.191 para 2586:
1.192 rmind 2587: ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2588: vflags | VM_INSTANTFIT, &va);
1.98 yamt 2589:
1.192 rmind 2590: return ret ? NULL : (void *)va;
1.98 yamt 2591: }
2592:
2593: static void
1.124 yamt 2594: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2595: {
2596:
1.192 rmind 2597: vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
1.66 thorpej 2598: }
2599:
2600: #ifdef POOL_SUBPAGE
2601: /* Sub-page allocator, for machines with large hardware pages. */
2602: void *
2603: pool_subpage_alloc(struct pool *pp, int flags)
2604: {
1.134 ad 2605: return pool_get(&psppool, flags);
1.66 thorpej 2606: }
2607:
2608: void
2609: pool_subpage_free(struct pool *pp, void *v)
2610: {
2611: pool_put(&psppool, v);
2612: }
2613:
1.112 bjh21 2614: #endif /* POOL_SUBPAGE */
1.141 yamt 2615:
2616: #if defined(DDB)
2617: static bool
2618: pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2619: {
2620:
2621: return (uintptr_t)ph->ph_page <= addr &&
2622: addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2623: }
2624:
1.143 yamt 2625: static bool
2626: pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2627: {
2628:
2629: return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2630: }
2631:
2632: static bool
2633: pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2634: {
2635: int i;
2636:
2637: if (pcg == NULL) {
2638: return false;
2639: }
1.144 yamt 2640: for (i = 0; i < pcg->pcg_avail; i++) {
1.143 yamt 2641: if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2642: return true;
2643: }
2644: }
2645: return false;
2646: }
2647:
2648: static bool
2649: pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2650: {
2651:
2652: if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2653: unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2654: pool_item_bitmap_t *bitmap =
2655: ph->ph_bitmap + (idx / BITMAP_SIZE);
2656: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2657:
2658: return (*bitmap & mask) == 0;
2659: } else {
2660: struct pool_item *pi;
2661:
2662: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2663: if (pool_in_item(pp, pi, addr)) {
2664: return false;
2665: }
2666: }
2667: return true;
2668: }
2669: }
2670:
1.141 yamt 2671: void
2672: pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2673: {
2674: struct pool *pp;
2675:
1.145 ad 2676: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.141 yamt 2677: struct pool_item_header *ph;
2678: uintptr_t item;
1.143 yamt 2679: bool allocated = true;
2680: bool incache = false;
2681: bool incpucache = false;
2682: char cpucachestr[32];
1.141 yamt 2683:
2684: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2685: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2686: if (pool_in_page(pp, ph, addr)) {
2687: goto found;
2688: }
2689: }
2690: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2691: if (pool_in_page(pp, ph, addr)) {
1.143 yamt 2692: allocated =
2693: pool_allocated(pp, ph, addr);
2694: goto found;
2695: }
2696: }
2697: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2698: if (pool_in_page(pp, ph, addr)) {
2699: allocated = false;
1.141 yamt 2700: goto found;
2701: }
2702: }
2703: continue;
2704: } else {
2705: ph = pr_find_pagehead_noalign(pp, (void *)addr);
2706: if (ph == NULL || !pool_in_page(pp, ph, addr)) {
2707: continue;
2708: }
1.143 yamt 2709: allocated = pool_allocated(pp, ph, addr);
1.141 yamt 2710: }
2711: found:
1.143 yamt 2712: if (allocated && pp->pr_cache) {
2713: pool_cache_t pc = pp->pr_cache;
2714: struct pool_cache_group *pcg;
2715: int i;
2716:
2717: for (pcg = pc->pc_fullgroups; pcg != NULL;
2718: pcg = pcg->pcg_next) {
2719: if (pool_in_cg(pp, pcg, addr)) {
2720: incache = true;
2721: goto print;
2722: }
2723: }
1.183 ad 2724: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.143 yamt 2725: pool_cache_cpu_t *cc;
2726:
2727: if ((cc = pc->pc_cpus[i]) == NULL) {
2728: continue;
2729: }
2730: if (pool_in_cg(pp, cc->cc_current, addr) ||
2731: pool_in_cg(pp, cc->cc_previous, addr)) {
2732: struct cpu_info *ci =
1.170 ad 2733: cpu_lookup(i);
1.143 yamt 2734:
2735: incpucache = true;
2736: snprintf(cpucachestr,
2737: sizeof(cpucachestr),
2738: "cached by CPU %u",
1.153 martin 2739: ci->ci_index);
1.143 yamt 2740: goto print;
2741: }
2742: }
2743: }
2744: print:
1.141 yamt 2745: item = (uintptr_t)ph->ph_page + ph->ph_off;
2746: item = item + rounddown(addr - item, pp->pr_size);
1.143 yamt 2747: (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
1.141 yamt 2748: (void *)addr, item, (size_t)(addr - item),
1.143 yamt 2749: pp->pr_wchan,
2750: incpucache ? cpucachestr :
2751: incache ? "cached" : allocated ? "allocated" : "free");
1.141 yamt 2752: }
2753: }
2754: #endif /* defined(DDB) */
1.201.2.1! tls 2755:
! 2756: static int
! 2757: pool_sysctl(SYSCTLFN_ARGS)
! 2758: {
! 2759: struct pool_sysctl data;
! 2760: struct pool *pp;
! 2761: struct pool_cache *pc;
! 2762: pool_cache_cpu_t *cc;
! 2763: int error;
! 2764: size_t i, written;
! 2765:
! 2766: if (oldp == NULL) {
! 2767: *oldlenp = 0;
! 2768: TAILQ_FOREACH(pp, &pool_head, pr_poollist)
! 2769: *oldlenp += sizeof(data);
! 2770: return 0;
! 2771: }
! 2772:
! 2773: memset(&data, 0, sizeof(data));
! 2774: error = 0;
! 2775: written = 0;
! 2776: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
! 2777: if (written + sizeof(data) > *oldlenp)
! 2778: break;
! 2779: strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
! 2780: data.pr_pagesize = pp->pr_alloc->pa_pagesz;
! 2781: data.pr_flags = pp->pr_roflags | pp->pr_flags;
! 2782: #define COPY(field) data.field = pp->field
! 2783: COPY(pr_size);
! 2784:
! 2785: COPY(pr_itemsperpage);
! 2786: COPY(pr_nitems);
! 2787: COPY(pr_nout);
! 2788: COPY(pr_hardlimit);
! 2789: COPY(pr_npages);
! 2790: COPY(pr_minpages);
! 2791: COPY(pr_maxpages);
! 2792:
! 2793: COPY(pr_nget);
! 2794: COPY(pr_nfail);
! 2795: COPY(pr_nput);
! 2796: COPY(pr_npagealloc);
! 2797: COPY(pr_npagefree);
! 2798: COPY(pr_hiwat);
! 2799: COPY(pr_nidle);
! 2800: #undef COPY
! 2801:
! 2802: data.pr_cache_nmiss_pcpu = 0;
! 2803: data.pr_cache_nhit_pcpu = 0;
! 2804: if (pp->pr_cache) {
! 2805: pc = pp->pr_cache;
! 2806: data.pr_cache_meta_size = pc->pc_pcgsize;
! 2807: data.pr_cache_nfull = pc->pc_nfull;
! 2808: data.pr_cache_npartial = pc->pc_npart;
! 2809: data.pr_cache_nempty = pc->pc_nempty;
! 2810: data.pr_cache_ncontended = pc->pc_contended;
! 2811: data.pr_cache_nmiss_global = pc->pc_misses;
! 2812: data.pr_cache_nhit_global = pc->pc_hits;
! 2813: for (i = 0; i < pc->pc_ncpu; ++i) {
! 2814: cc = pc->pc_cpus[i];
! 2815: if (cc == NULL)
! 2816: continue;
! 2817: data.pr_cache_nmiss_pcpu = cc->cc_misses;
! 2818: data.pr_cache_nhit_pcpu = cc->cc_hits;
! 2819: }
! 2820: } else {
! 2821: data.pr_cache_meta_size = 0;
! 2822: data.pr_cache_nfull = 0;
! 2823: data.pr_cache_npartial = 0;
! 2824: data.pr_cache_nempty = 0;
! 2825: data.pr_cache_ncontended = 0;
! 2826: data.pr_cache_nmiss_global = 0;
! 2827: data.pr_cache_nhit_global = 0;
! 2828: }
! 2829:
! 2830: error = sysctl_copyout(l, &data, oldp, sizeof(data));
! 2831: if (error)
! 2832: break;
! 2833: written += sizeof(data);
! 2834: oldp = (char *)oldp + sizeof(data);
! 2835: }
! 2836:
! 2837: *oldlenp = written;
! 2838: return error;
! 2839: }
! 2840:
! 2841: SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
! 2842: {
! 2843: const struct sysctlnode *rnode = NULL;
! 2844:
! 2845: sysctl_createv(clog, 0, NULL, &rnode,
! 2846: CTLFLAG_PERMANENT,
! 2847: CTLTYPE_STRUCT, "pool",
! 2848: SYSCTL_DESCR("Get pool statistics"),
! 2849: pool_sysctl, 0, NULL, 0,
! 2850: CTL_KERN, CTL_CREATE, CTL_EOL);
! 2851: }
CVSweb <webmaster@jp.NetBSD.org>