Annotation of src/sys/kern/subr_pool.c, Revision 1.203.4.2
1.203.4.2! skrll 1: /* $NetBSD: subr_pool.c,v 1.203.4.1 2015/09/22 12:06:07 skrll Exp $ */
1.1 pk 2:
3: /*-
1.203.4.1 skrll 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015
1.183 ad 5: * The NetBSD Foundation, Inc.
1.1 pk 6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 9: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.203.4.1 skrll 10: * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
11: * Maxime Villard.
1.1 pk 12: *
13: * Redistribution and use in source and binary forms, with or without
14: * modification, are permitted provided that the following conditions
15: * are met:
16: * 1. Redistributions of source code must retain the above copyright
17: * notice, this list of conditions and the following disclaimer.
18: * 2. Redistributions in binary form must reproduce the above copyright
19: * notice, this list of conditions and the following disclaimer in the
20: * documentation and/or other materials provided with the distribution.
21: *
22: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32: * POSSIBILITY OF SUCH DAMAGE.
33: */
1.64 lukem 34:
35: #include <sys/cdefs.h>
1.203.4.2! skrll 36: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.203.4.1 2015/09/22 12:06:07 skrll Exp $");
1.24 scottr 37:
1.203.4.1 skrll 38: #ifdef _KERNEL_OPT
1.141 yamt 39: #include "opt_ddb.h"
1.28 thorpej 40: #include "opt_lockdebug.h"
1.203.4.1 skrll 41: #endif
1.1 pk 42:
43: #include <sys/param.h>
44: #include <sys/systm.h>
1.203 joerg 45: #include <sys/sysctl.h>
1.135 yamt 46: #include <sys/bitops.h>
1.1 pk 47: #include <sys/proc.h>
48: #include <sys/errno.h>
49: #include <sys/kernel.h>
1.191 para 50: #include <sys/vmem.h>
1.1 pk 51: #include <sys/pool.h>
1.20 thorpej 52: #include <sys/syslog.h>
1.125 ad 53: #include <sys/debug.h>
1.134 ad 54: #include <sys/lockdebug.h>
55: #include <sys/xcall.h>
56: #include <sys/cpu.h>
1.145 ad 57: #include <sys/atomic.h>
1.3 pk 58:
1.187 uebayasi 59: #include <uvm/uvm_extern.h>
1.3 pk 60:
1.1 pk 61: /*
62: * Pool resource management utility.
1.3 pk 63: *
1.88 chs 64: * Memory is allocated in pages which are split into pieces according to
65: * the pool item size. Each page is kept on one of three lists in the
66: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
67: * for empty, full and partially-full pages respectively. The individual
68: * pool items are on a linked list headed by `ph_itemlist' in each page
69: * header. The memory for building the page list is either taken from
70: * the allocated pages themselves (for small pool items) or taken from
71: * an internal pool of page headers (`phpool').
1.1 pk 72: */
73:
1.202 abs 74: /* List of all pools. Non static as needed by 'vmstat -i' */
75: TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.134 ad 76:
1.3 pk 77: /* Private pool for page header structures */
1.97 yamt 78: #define PHPOOL_MAX 8
79: static struct pool phpool[PHPOOL_MAX];
1.135 yamt 80: #define PHPOOL_FREELIST_NELEM(idx) \
81: (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
1.3 pk 82:
1.62 bjh21 83: #ifdef POOL_SUBPAGE
84: /* Pool of subpages for use by normal pools. */
85: static struct pool psppool;
86: #endif
87:
1.203.4.1 skrll 88: #ifdef POOL_REDZONE
89: # define POOL_REDZONE_SIZE 2
90: static void pool_redzone_init(struct pool *, size_t);
91: static void pool_redzone_fill(struct pool *, void *);
92: static void pool_redzone_check(struct pool *, void *);
93: #else
94: # define pool_redzone_init(pp, sz) /* NOTHING */
95: # define pool_redzone_fill(pp, ptr) /* NOTHING */
96: # define pool_redzone_check(pp, ptr) /* NOTHING */
97: #endif
98:
1.98 yamt 99: static void *pool_page_alloc_meta(struct pool *, int);
100: static void pool_page_free_meta(struct pool *, void *);
101:
102: /* allocator for pool metadata */
1.134 ad 103: struct pool_allocator pool_allocator_meta = {
1.191 para 104: .pa_alloc = pool_page_alloc_meta,
105: .pa_free = pool_page_free_meta,
106: .pa_pagesz = 0
1.98 yamt 107: };
108:
1.3 pk 109: /* # of seconds to retain page after last use */
110: int pool_inactive_time = 10;
111:
112: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 113: static struct pool *drainpp;
114:
1.134 ad 115: /* This lock protects both pool_head and drainpp. */
116: static kmutex_t pool_head_lock;
117: static kcondvar_t pool_busy;
1.3 pk 118:
1.178 elad 119: /* This lock protects initialization of a potentially shared pool allocator */
120: static kmutex_t pool_allocator_lock;
121:
1.135 yamt 122: typedef uint32_t pool_item_bitmap_t;
123: #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
124: #define BITMAP_MASK (BITMAP_SIZE - 1)
1.99 yamt 125:
1.3 pk 126: struct pool_item_header {
127: /* Page headers */
1.88 chs 128: LIST_ENTRY(pool_item_header)
1.3 pk 129: ph_pagelist; /* pool page list */
1.88 chs 130: SPLAY_ENTRY(pool_item_header)
131: ph_node; /* Off-page page headers */
1.128 christos 132: void * ph_page; /* this page's address */
1.151 yamt 133: uint32_t ph_time; /* last referenced */
1.135 yamt 134: uint16_t ph_nmissing; /* # of chunks in use */
1.141 yamt 135: uint16_t ph_off; /* start offset in page */
1.97 yamt 136: union {
137: /* !PR_NOTOUCH */
138: struct {
1.102 chs 139: LIST_HEAD(, pool_item)
1.97 yamt 140: phu_itemlist; /* chunk list for this page */
141: } phu_normal;
142: /* PR_NOTOUCH */
143: struct {
1.141 yamt 144: pool_item_bitmap_t phu_bitmap[1];
1.97 yamt 145: } phu_notouch;
146: } ph_u;
1.3 pk 147: };
1.97 yamt 148: #define ph_itemlist ph_u.phu_normal.phu_itemlist
1.135 yamt 149: #define ph_bitmap ph_u.phu_notouch.phu_bitmap
1.3 pk 150:
1.1 pk 151: struct pool_item {
1.3 pk 152: #ifdef DIAGNOSTIC
1.82 thorpej 153: u_int pi_magic;
1.33 chs 154: #endif
1.134 ad 155: #define PI_MAGIC 0xdeaddeadU
1.3 pk 156: /* Other entries use only this list entry */
1.102 chs 157: LIST_ENTRY(pool_item) pi_list;
1.3 pk 158: };
159:
1.53 thorpej 160: #define POOL_NEEDS_CATCHUP(pp) \
161: ((pp)->pr_nitems < (pp)->pr_minitems)
162:
1.43 thorpej 163: /*
164: * Pool cache management.
165: *
166: * Pool caches provide a way for constructed objects to be cached by the
167: * pool subsystem. This can lead to performance improvements by avoiding
168: * needless object construction/destruction; it is deferred until absolutely
169: * necessary.
170: *
1.134 ad 171: * Caches are grouped into cache groups. Each cache group references up
172: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
173: * object from the pool, it calls the object's constructor and places it
174: * into a cache group. When a cache group frees an object back to the
175: * pool, it first calls the object's destructor. This allows the object
176: * to persist in constructed form while freed to the cache.
177: *
178: * The pool references each cache, so that when a pool is drained by the
179: * pagedaemon, it can drain each individual cache as well. Each time a
180: * cache is drained, the most idle cache group is freed to the pool in
181: * its entirety.
1.43 thorpej 182: *
183: * Pool caches are layed on top of pools. By layering them, we can avoid
184: * the complexity of cache management for pools which would not benefit
185: * from it.
186: */
187:
1.142 ad 188: static struct pool pcg_normal_pool;
189: static struct pool pcg_large_pool;
1.134 ad 190: static struct pool cache_pool;
191: static struct pool cache_cpu_pool;
1.3 pk 192:
1.189 pooka 193: pool_cache_t pnbuf_cache; /* pathname buffer cache */
194:
1.145 ad 195: /* List of all caches. */
196: TAILQ_HEAD(,pool_cache) pool_cache_head =
197: TAILQ_HEAD_INITIALIZER(pool_cache_head);
198:
1.162 ad 199: int pool_cache_disable; /* global disable for caching */
1.169 yamt 200: static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */
1.145 ad 201:
1.162 ad 202: static bool pool_cache_put_slow(pool_cache_cpu_t *, int,
203: void *);
204: static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
205: void **, paddr_t *, int);
1.134 ad 206: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
207: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
1.175 jym 208: static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
1.196 jym 209: static void pool_cache_transfer(pool_cache_t);
1.3 pk 210:
1.42 thorpej 211: static int pool_catchup(struct pool *);
1.128 christos 212: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 213: struct pool_item_header *);
1.88 chs 214: static void pool_update_curpage(struct pool *);
1.66 thorpej 215:
1.113 yamt 216: static int pool_grow(struct pool *, int);
1.117 yamt 217: static void *pool_allocator_alloc(struct pool *, int);
218: static void pool_allocator_free(struct pool *, void *);
1.3 pk 219:
1.97 yamt 220: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.199 christos 221: void (*)(const char *, ...) __printflike(1, 2));
1.42 thorpej 222: static void pool_print1(struct pool *, const char *,
1.199 christos 223: void (*)(const char *, ...) __printflike(1, 2));
1.3 pk 224:
1.88 chs 225: static int pool_chk_page(struct pool *, const char *,
226: struct pool_item_header *);
227:
1.135 yamt 228: static inline unsigned int
1.97 yamt 229: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
230: const void *v)
231: {
232: const char *cp = v;
1.135 yamt 233: unsigned int idx;
1.97 yamt 234:
235: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 236: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 237: KASSERT(idx < pp->pr_itemsperpage);
238: return idx;
239: }
240:
1.110 perry 241: static inline void
1.97 yamt 242: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
243: void *obj)
244: {
1.135 yamt 245: unsigned int idx = pr_item_notouch_index(pp, ph, obj);
246: pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
247: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
1.97 yamt 248:
1.135 yamt 249: KASSERT((*bitmap & mask) == 0);
250: *bitmap |= mask;
1.97 yamt 251: }
252:
1.110 perry 253: static inline void *
1.97 yamt 254: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
255: {
1.135 yamt 256: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
257: unsigned int idx;
258: int i;
1.97 yamt 259:
1.135 yamt 260: for (i = 0; ; i++) {
261: int bit;
1.97 yamt 262:
1.135 yamt 263: KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
264: bit = ffs32(bitmap[i]);
265: if (bit) {
266: pool_item_bitmap_t mask;
267:
268: bit--;
269: idx = (i * BITMAP_SIZE) + bit;
270: mask = 1 << bit;
271: KASSERT((bitmap[i] & mask) != 0);
272: bitmap[i] &= ~mask;
273: break;
274: }
275: }
276: KASSERT(idx < pp->pr_itemsperpage);
1.128 christos 277: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 278: }
279:
1.135 yamt 280: static inline void
1.141 yamt 281: pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
1.135 yamt 282: {
283: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
284: const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
285: int i;
286:
287: for (i = 0; i < n; i++) {
288: bitmap[i] = (pool_item_bitmap_t)-1;
289: }
290: }
291:
1.110 perry 292: static inline int
1.88 chs 293: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
294: {
1.121 yamt 295:
296: /*
297: * we consider pool_item_header with smaller ph_page bigger.
298: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
299: */
300:
1.88 chs 301: if (a->ph_page < b->ph_page)
1.121 yamt 302: return (1);
303: else if (a->ph_page > b->ph_page)
1.88 chs 304: return (-1);
305: else
306: return (0);
307: }
308:
309: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
310: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
311:
1.141 yamt 312: static inline struct pool_item_header *
313: pr_find_pagehead_noalign(struct pool *pp, void *v)
314: {
315: struct pool_item_header *ph, tmp;
316:
317: tmp.ph_page = (void *)(uintptr_t)v;
318: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
319: if (ph == NULL) {
320: ph = SPLAY_ROOT(&pp->pr_phtree);
321: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
322: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
323: }
324: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
325: }
326:
327: return ph;
328: }
329:
1.3 pk 330: /*
1.121 yamt 331: * Return the pool page header based on item address.
1.3 pk 332: */
1.110 perry 333: static inline struct pool_item_header *
1.121 yamt 334: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 335: {
1.88 chs 336: struct pool_item_header *ph, tmp;
1.3 pk 337:
1.121 yamt 338: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.141 yamt 339: ph = pr_find_pagehead_noalign(pp, v);
1.121 yamt 340: } else {
1.128 christos 341: void *page =
342: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 343:
344: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 345: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 346: } else {
347: tmp.ph_page = page;
348: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
349: }
350: }
1.3 pk 351:
1.121 yamt 352: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 353: ((char *)ph->ph_page <= (char *)v &&
354: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 355: return ph;
1.3 pk 356: }
357:
1.101 thorpej 358: static void
359: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
360: {
361: struct pool_item_header *ph;
362:
363: while ((ph = LIST_FIRST(pq)) != NULL) {
364: LIST_REMOVE(ph, ph_pagelist);
365: pool_allocator_free(pp, ph->ph_page);
1.134 ad 366: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 367: pool_put(pp->pr_phpool, ph);
368: }
369: }
370:
1.3 pk 371: /*
372: * Remove a page from the pool.
373: */
1.110 perry 374: static inline void
1.61 chs 375: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
376: struct pool_pagelist *pq)
1.3 pk 377: {
378:
1.134 ad 379: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 380:
1.3 pk 381: /*
1.7 thorpej 382: * If the page was idle, decrement the idle page count.
1.3 pk 383: */
1.6 thorpej 384: if (ph->ph_nmissing == 0) {
385: #ifdef DIAGNOSTIC
386: if (pp->pr_nidle == 0)
387: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 388: if (pp->pr_nitems < pp->pr_itemsperpage)
389: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 390: #endif
391: pp->pr_nidle--;
392: }
1.7 thorpej 393:
1.20 thorpej 394: pp->pr_nitems -= pp->pr_itemsperpage;
395:
1.7 thorpej 396: /*
1.101 thorpej 397: * Unlink the page from the pool and queue it for release.
1.7 thorpej 398: */
1.88 chs 399: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 400: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
401: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 402: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
403:
1.7 thorpej 404: pp->pr_npages--;
405: pp->pr_npagefree++;
1.6 thorpej 406:
1.88 chs 407: pool_update_curpage(pp);
1.3 pk 408: }
409:
410: /*
1.94 simonb 411: * Initialize all the pools listed in the "pools" link set.
412: */
413: void
1.117 yamt 414: pool_subsystem_init(void)
1.94 simonb 415: {
1.192 rmind 416: size_t size;
1.191 para 417: int idx;
1.94 simonb 418:
1.134 ad 419: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
1.179 mlelstv 420: mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
1.134 ad 421: cv_init(&pool_busy, "poolbusy");
422:
1.191 para 423: /*
424: * Initialize private page header pool and cache magazine pool if we
425: * haven't done so yet.
426: */
427: for (idx = 0; idx < PHPOOL_MAX; idx++) {
428: static char phpool_names[PHPOOL_MAX][6+1+6+1];
429: int nelem;
430: size_t sz;
431:
432: nelem = PHPOOL_FREELIST_NELEM(idx);
433: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
434: "phpool-%d", nelem);
435: sz = sizeof(struct pool_item_header);
436: if (nelem) {
437: sz = offsetof(struct pool_item_header,
438: ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
439: }
440: pool_init(&phpool[idx], sz, 0, 0, 0,
441: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.117 yamt 442: }
1.191 para 443: #ifdef POOL_SUBPAGE
444: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
445: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
446: #endif
447:
448: size = sizeof(pcg_t) +
449: (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
450: pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
451: "pcgnormal", &pool_allocator_meta, IPL_VM);
452:
453: size = sizeof(pcg_t) +
454: (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
455: pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
456: "pcglarge", &pool_allocator_meta, IPL_VM);
1.134 ad 457:
1.156 ad 458: pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
1.191 para 459: 0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
1.134 ad 460:
1.156 ad 461: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
1.191 para 462: 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
1.94 simonb 463: }
464:
465: /*
1.3 pk 466: * Initialize the given pool resource structure.
467: *
468: * We export this routine to allow other kernel parts to declare
1.195 rmind 469: * static pools that must be initialized before kmem(9) is available.
1.3 pk 470: */
471: void
1.42 thorpej 472: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.129 ad 473: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 474: {
1.116 simonb 475: struct pool *pp1;
1.203.4.1 skrll 476: size_t trysize, phsize, prsize;
1.134 ad 477: int off, slack;
1.3 pk 478:
1.116 simonb 479: #ifdef DEBUG
1.198 christos 480: if (__predict_true(!cold))
481: mutex_enter(&pool_head_lock);
1.116 simonb 482: /*
483: * Check that the pool hasn't already been initialised and
484: * added to the list of all pools.
485: */
1.145 ad 486: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
1.116 simonb 487: if (pp == pp1)
488: panic("pool_init: pool %s already initialised",
489: wchan);
490: }
1.198 christos 491: if (__predict_true(!cold))
492: mutex_exit(&pool_head_lock);
1.116 simonb 493: #endif
494:
1.66 thorpej 495: if (palloc == NULL)
496: palloc = &pool_allocator_kmem;
1.112 bjh21 497: #ifdef POOL_SUBPAGE
498: if (size > palloc->pa_pagesz) {
499: if (palloc == &pool_allocator_kmem)
500: palloc = &pool_allocator_kmem_fullpage;
501: else if (palloc == &pool_allocator_nointr)
502: palloc = &pool_allocator_nointr_fullpage;
503: }
1.66 thorpej 504: #endif /* POOL_SUBPAGE */
1.180 mlelstv 505: if (!cold)
506: mutex_enter(&pool_allocator_lock);
1.178 elad 507: if (palloc->pa_refcnt++ == 0) {
1.112 bjh21 508: if (palloc->pa_pagesz == 0)
1.66 thorpej 509: palloc->pa_pagesz = PAGE_SIZE;
510:
511: TAILQ_INIT(&palloc->pa_list);
512:
1.134 ad 513: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 514: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
515: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.4 thorpej 516: }
1.180 mlelstv 517: if (!cold)
518: mutex_exit(&pool_allocator_lock);
1.3 pk 519:
520: if (align == 0)
521: align = ALIGN(1);
1.14 thorpej 522:
1.203.4.1 skrll 523: prsize = size;
524: if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
525: prsize = sizeof(struct pool_item);
1.3 pk 526:
1.203.4.1 skrll 527: prsize = roundup(prsize, align);
1.66 thorpej 528: #ifdef DIAGNOSTIC
1.203.4.1 skrll 529: if (prsize > palloc->pa_pagesz)
530: panic("pool_init: pool item size (%zu) too large", prsize);
1.66 thorpej 531: #endif
1.35 pk 532:
1.3 pk 533: /*
534: * Initialize the pool structure.
535: */
1.88 chs 536: LIST_INIT(&pp->pr_emptypages);
537: LIST_INIT(&pp->pr_fullpages);
538: LIST_INIT(&pp->pr_partpages);
1.134 ad 539: pp->pr_cache = NULL;
1.3 pk 540: pp->pr_curpage = NULL;
541: pp->pr_npages = 0;
542: pp->pr_minitems = 0;
543: pp->pr_minpages = 0;
544: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 545: pp->pr_roflags = flags;
546: pp->pr_flags = 0;
1.203.4.1 skrll 547: pp->pr_size = prsize;
1.3 pk 548: pp->pr_align = align;
549: pp->pr_wchan = wchan;
1.66 thorpej 550: pp->pr_alloc = palloc;
1.20 thorpej 551: pp->pr_nitems = 0;
552: pp->pr_nout = 0;
553: pp->pr_hardlimit = UINT_MAX;
554: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 555: pp->pr_hardlimit_ratecap.tv_sec = 0;
556: pp->pr_hardlimit_ratecap.tv_usec = 0;
557: pp->pr_hardlimit_warning_last.tv_sec = 0;
558: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 559: pp->pr_drain_hook = NULL;
560: pp->pr_drain_hook_arg = NULL;
1.125 ad 561: pp->pr_freecheck = NULL;
1.203.4.1 skrll 562: pool_redzone_init(pp, size);
1.3 pk 563:
564: /*
565: * Decide whether to put the page header off page to avoid
1.92 enami 566: * wasting too large a part of the page or too big item.
567: * Off-page page headers go on a hash table, so we can match
568: * a returned item with its header based on the page address.
569: * We use 1/16 of the page size and about 8 times of the item
570: * size as the threshold (XXX: tune)
571: *
572: * However, we'll put the header into the page if we can put
573: * it without wasting any items.
574: *
575: * Silently enforce `0 <= ioff < align'.
1.3 pk 576: */
1.92 enami 577: pp->pr_itemoffset = ioff %= align;
578: /* See the comment below about reserved bytes. */
579: trysize = palloc->pa_pagesz - ((align - ioff) % align);
580: phsize = ALIGN(sizeof(struct pool_item_header));
1.201 para 581: if (pp->pr_roflags & PR_PHINPAGE ||
582: ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 583: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
1.201 para 584: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) {
1.3 pk 585: /* Use the end of the page for the page header */
1.20 thorpej 586: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 587: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 588: } else {
1.3 pk 589: /* The page header will be taken from our page header pool */
590: pp->pr_phoffset = 0;
1.66 thorpej 591: off = palloc->pa_pagesz;
1.88 chs 592: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 593: }
1.1 pk 594:
1.3 pk 595: /*
596: * Alignment is to take place at `ioff' within the item. This means
597: * we must reserve up to `align - 1' bytes on the page to allow
598: * appropriate positioning of each item.
599: */
600: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 601: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 602: if ((pp->pr_roflags & PR_NOTOUCH)) {
603: int idx;
604:
605: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
606: idx++) {
607: /* nothing */
608: }
609: if (idx >= PHPOOL_MAX) {
610: /*
611: * if you see this panic, consider to tweak
612: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
613: */
614: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
615: pp->pr_wchan, pp->pr_itemsperpage);
616: }
617: pp->pr_phpool = &phpool[idx];
618: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
619: pp->pr_phpool = &phpool[0];
620: }
621: #if defined(DIAGNOSTIC)
622: else {
623: pp->pr_phpool = NULL;
624: }
625: #endif
1.3 pk 626:
627: /*
628: * Use the slack between the chunks and the page header
629: * for "cache coloring".
630: */
631: slack = off - pp->pr_itemsperpage * pp->pr_size;
632: pp->pr_maxcolor = (slack / align) * align;
633: pp->pr_curcolor = 0;
634:
635: pp->pr_nget = 0;
636: pp->pr_nfail = 0;
637: pp->pr_nput = 0;
638: pp->pr_npagealloc = 0;
639: pp->pr_npagefree = 0;
1.1 pk 640: pp->pr_hiwat = 0;
1.8 thorpej 641: pp->pr_nidle = 0;
1.134 ad 642: pp->pr_refcnt = 0;
1.3 pk 643:
1.157 ad 644: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
1.134 ad 645: cv_init(&pp->pr_cv, wchan);
646: pp->pr_ipl = ipl;
1.1 pk 647:
1.145 ad 648: /* Insert into the list of all pools. */
1.181 mlelstv 649: if (!cold)
1.134 ad 650: mutex_enter(&pool_head_lock);
1.145 ad 651: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
652: if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
653: break;
654: }
655: if (pp1 == NULL)
656: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
657: else
658: TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
1.181 mlelstv 659: if (!cold)
1.134 ad 660: mutex_exit(&pool_head_lock);
661:
1.167 skrll 662: /* Insert this into the list of pools using this allocator. */
1.181 mlelstv 663: if (!cold)
1.134 ad 664: mutex_enter(&palloc->pa_lock);
1.145 ad 665: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
1.181 mlelstv 666: if (!cold)
1.134 ad 667: mutex_exit(&palloc->pa_lock);
1.1 pk 668: }
669:
670: /*
671: * De-commision a pool resource.
672: */
673: void
1.42 thorpej 674: pool_destroy(struct pool *pp)
1.1 pk 675: {
1.101 thorpej 676: struct pool_pagelist pq;
1.3 pk 677: struct pool_item_header *ph;
1.43 thorpej 678:
1.101 thorpej 679: /* Remove from global pool list */
1.134 ad 680: mutex_enter(&pool_head_lock);
681: while (pp->pr_refcnt != 0)
682: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 683: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.101 thorpej 684: if (drainpp == pp)
685: drainpp = NULL;
1.134 ad 686: mutex_exit(&pool_head_lock);
1.101 thorpej 687:
688: /* Remove this pool from its allocator's list of pools. */
1.134 ad 689: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 690: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.134 ad 691: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 692:
1.178 elad 693: mutex_enter(&pool_allocator_lock);
694: if (--pp->pr_alloc->pa_refcnt == 0)
695: mutex_destroy(&pp->pr_alloc->pa_lock);
696: mutex_exit(&pool_allocator_lock);
697:
1.134 ad 698: mutex_enter(&pp->pr_lock);
1.101 thorpej 699:
1.134 ad 700: KASSERT(pp->pr_cache == NULL);
1.3 pk 701:
702: #ifdef DIAGNOSTIC
1.20 thorpej 703: if (pp->pr_nout != 0) {
1.80 provos 704: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 705: pp->pr_nout);
1.3 pk 706: }
707: #endif
1.1 pk 708:
1.101 thorpej 709: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
710: KASSERT(LIST_EMPTY(&pp->pr_partpages));
711:
1.3 pk 712: /* Remove all pages */
1.101 thorpej 713: LIST_INIT(&pq);
1.88 chs 714: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 715: pr_rmpage(pp, ph, &pq);
716:
1.134 ad 717: mutex_exit(&pp->pr_lock);
1.3 pk 718:
1.101 thorpej 719: pr_pagelist_free(pp, &pq);
1.134 ad 720: cv_destroy(&pp->pr_cv);
721: mutex_destroy(&pp->pr_lock);
1.1 pk 722: }
723:
1.68 thorpej 724: void
725: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
726: {
727:
728: /* XXX no locking -- must be used just after pool_init() */
729: #ifdef DIAGNOSTIC
730: if (pp->pr_drain_hook != NULL)
731: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
732: #endif
733: pp->pr_drain_hook = fn;
734: pp->pr_drain_hook_arg = arg;
735: }
736:
1.88 chs 737: static struct pool_item_header *
1.128 christos 738: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 739: {
740: struct pool_item_header *ph;
741:
742: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 743: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.134 ad 744: else
1.97 yamt 745: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 746:
747: return (ph);
748: }
1.1 pk 749:
750: /*
1.134 ad 751: * Grab an item from the pool.
1.1 pk 752: */
1.3 pk 753: void *
1.56 sommerfe 754: pool_get(struct pool *pp, int flags)
1.1 pk 755: {
756: struct pool_item *pi;
1.3 pk 757: struct pool_item_header *ph;
1.55 thorpej 758: void *v;
1.1 pk 759:
1.2 pk 760: #ifdef DIAGNOSTIC
1.184 rmind 761: if (pp->pr_itemsperpage == 0)
762: panic("pool_get: pool '%s': pr_itemsperpage is zero, "
763: "pool not initialized?", pp->pr_wchan);
1.185 rmind 764: if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE &&
765: !cold && panicstr == NULL)
1.184 rmind 766: panic("pool '%s' is IPL_NONE, but called from "
767: "interrupt context\n", pp->pr_wchan);
768: #endif
1.155 ad 769: if (flags & PR_WAITOK) {
1.154 yamt 770: ASSERT_SLEEPABLE();
1.155 ad 771: }
1.1 pk 772:
1.134 ad 773: mutex_enter(&pp->pr_lock);
1.20 thorpej 774: startover:
775: /*
776: * Check to see if we've reached the hard limit. If we have,
777: * and we can wait, then wait until an item has been returned to
778: * the pool.
779: */
780: #ifdef DIAGNOSTIC
1.34 thorpej 781: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.134 ad 782: mutex_exit(&pp->pr_lock);
1.20 thorpej 783: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
784: }
785: #endif
1.34 thorpej 786: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 787: if (pp->pr_drain_hook != NULL) {
788: /*
789: * Since the drain hook is going to free things
790: * back to the pool, unlock, call the hook, re-lock,
791: * and check the hardlimit condition again.
792: */
1.134 ad 793: mutex_exit(&pp->pr_lock);
1.68 thorpej 794: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.134 ad 795: mutex_enter(&pp->pr_lock);
1.68 thorpej 796: if (pp->pr_nout < pp->pr_hardlimit)
797: goto startover;
798: }
799:
1.29 sommerfe 800: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 801: /*
802: * XXX: A warning isn't logged in this case. Should
803: * it be?
804: */
805: pp->pr_flags |= PR_WANTED;
1.134 ad 806: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.20 thorpej 807: goto startover;
808: }
1.31 thorpej 809:
810: /*
811: * Log a message that the hard limit has been hit.
812: */
813: if (pp->pr_hardlimit_warning != NULL &&
814: ratecheck(&pp->pr_hardlimit_warning_last,
815: &pp->pr_hardlimit_ratecap))
816: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 817:
818: pp->pr_nfail++;
819:
1.134 ad 820: mutex_exit(&pp->pr_lock);
1.20 thorpej 821: return (NULL);
822: }
823:
1.3 pk 824: /*
825: * The convention we use is that if `curpage' is not NULL, then
826: * it points at a non-empty bucket. In particular, `curpage'
827: * never points at a page header which has PR_PHINPAGE set and
828: * has no items in its bucket.
829: */
1.20 thorpej 830: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 831: int error;
832:
1.20 thorpej 833: #ifdef DIAGNOSTIC
834: if (pp->pr_nitems != 0) {
1.134 ad 835: mutex_exit(&pp->pr_lock);
1.20 thorpej 836: printf("pool_get: %s: curpage NULL, nitems %u\n",
837: pp->pr_wchan, pp->pr_nitems);
1.80 provos 838: panic("pool_get: nitems inconsistent");
1.20 thorpej 839: }
840: #endif
841:
1.21 thorpej 842: /*
843: * Call the back-end page allocator for more memory.
844: * Release the pool lock, as the back-end page allocator
845: * may block.
846: */
1.113 yamt 847: error = pool_grow(pp, flags);
848: if (error != 0) {
1.21 thorpej 849: /*
1.55 thorpej 850: * We were unable to allocate a page or item
851: * header, but we released the lock during
852: * allocation, so perhaps items were freed
853: * back to the pool. Check for this case.
1.21 thorpej 854: */
855: if (pp->pr_curpage != NULL)
856: goto startover;
1.15 pk 857:
1.117 yamt 858: pp->pr_nfail++;
1.134 ad 859: mutex_exit(&pp->pr_lock);
1.117 yamt 860: return (NULL);
1.1 pk 861: }
1.3 pk 862:
1.20 thorpej 863: /* Start the allocation process over. */
864: goto startover;
1.3 pk 865: }
1.97 yamt 866: if (pp->pr_roflags & PR_NOTOUCH) {
867: #ifdef DIAGNOSTIC
868: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1.134 ad 869: mutex_exit(&pp->pr_lock);
1.97 yamt 870: panic("pool_get: %s: page empty", pp->pr_wchan);
871: }
872: #endif
873: v = pr_item_notouch_get(pp, ph);
874: } else {
1.102 chs 875: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 876: if (__predict_false(v == NULL)) {
1.134 ad 877: mutex_exit(&pp->pr_lock);
1.97 yamt 878: panic("pool_get: %s: page empty", pp->pr_wchan);
879: }
1.20 thorpej 880: #ifdef DIAGNOSTIC
1.97 yamt 881: if (__predict_false(pp->pr_nitems == 0)) {
1.134 ad 882: mutex_exit(&pp->pr_lock);
1.97 yamt 883: printf("pool_get: %s: items on itemlist, nitems %u\n",
884: pp->pr_wchan, pp->pr_nitems);
885: panic("pool_get: nitems inconsistent");
886: }
1.65 enami 887: #endif
1.56 sommerfe 888:
1.65 enami 889: #ifdef DIAGNOSTIC
1.97 yamt 890: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
891: panic("pool_get(%s): free list modified: "
892: "magic=%x; page %p; item addr %p\n",
893: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
894: }
1.3 pk 895: #endif
896:
1.97 yamt 897: /*
898: * Remove from item list.
899: */
1.102 chs 900: LIST_REMOVE(pi, pi_list);
1.97 yamt 901: }
1.20 thorpej 902: pp->pr_nitems--;
903: pp->pr_nout++;
1.6 thorpej 904: if (ph->ph_nmissing == 0) {
905: #ifdef DIAGNOSTIC
1.34 thorpej 906: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 907: panic("pool_get: nidle inconsistent");
908: #endif
909: pp->pr_nidle--;
1.88 chs 910:
911: /*
912: * This page was previously empty. Move it to the list of
913: * partially-full pages. This page is already curpage.
914: */
915: LIST_REMOVE(ph, ph_pagelist);
916: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 917: }
1.3 pk 918: ph->ph_nmissing++;
1.97 yamt 919: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 920: #ifdef DIAGNOSTIC
1.97 yamt 921: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 922: !LIST_EMPTY(&ph->ph_itemlist))) {
1.134 ad 923: mutex_exit(&pp->pr_lock);
1.21 thorpej 924: panic("pool_get: %s: nmissing inconsistent",
925: pp->pr_wchan);
926: }
927: #endif
1.3 pk 928: /*
1.88 chs 929: * This page is now full. Move it to the full list
930: * and select a new current page.
1.3 pk 931: */
1.88 chs 932: LIST_REMOVE(ph, ph_pagelist);
933: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
934: pool_update_curpage(pp);
1.1 pk 935: }
1.3 pk 936:
937: pp->pr_nget++;
1.20 thorpej 938:
939: /*
940: * If we have a low water mark and we are now below that low
941: * water mark, add more items to the pool.
942: */
1.53 thorpej 943: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 944: /*
945: * XXX: Should we log a warning? Should we set up a timeout
946: * to try again in a second or so? The latter could break
947: * a caller's assumptions about interrupt protection, etc.
948: */
949: }
950:
1.134 ad 951: mutex_exit(&pp->pr_lock);
1.125 ad 952: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
953: FREECHECK_OUT(&pp->pr_freecheck, v);
1.203.4.1 skrll 954: pool_redzone_fill(pp, v);
1.1 pk 955: return (v);
956: }
957:
958: /*
1.43 thorpej 959: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 960: */
1.43 thorpej 961: static void
1.101 thorpej 962: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 963: {
964: struct pool_item *pi = v;
1.3 pk 965: struct pool_item_header *ph;
966:
1.134 ad 967: KASSERT(mutex_owned(&pp->pr_lock));
1.203.4.1 skrll 968: pool_redzone_check(pp, v);
1.125 ad 969: FREECHECK_IN(&pp->pr_freecheck, v);
1.134 ad 970: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 971:
1.30 thorpej 972: #ifdef DIAGNOSTIC
1.34 thorpej 973: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 974: printf("pool %s: putting with none out\n",
975: pp->pr_wchan);
976: panic("pool_put");
977: }
978: #endif
1.3 pk 979:
1.121 yamt 980: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.3 pk 981: panic("pool_put: %s: page header missing", pp->pr_wchan);
982: }
1.28 thorpej 983:
1.3 pk 984: /*
985: * Return to item list.
986: */
1.97 yamt 987: if (pp->pr_roflags & PR_NOTOUCH) {
988: pr_item_notouch_put(pp, ph, v);
989: } else {
1.2 pk 990: #ifdef DIAGNOSTIC
1.97 yamt 991: pi->pi_magic = PI_MAGIC;
1.3 pk 992: #endif
1.32 chs 993: #ifdef DEBUG
1.97 yamt 994: {
995: int i, *ip = v;
1.32 chs 996:
1.97 yamt 997: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
998: *ip++ = PI_MAGIC;
999: }
1.32 chs 1000: }
1001: #endif
1002:
1.102 chs 1003: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 1004: }
1.79 thorpej 1005: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1006: ph->ph_nmissing--;
1007: pp->pr_nput++;
1.20 thorpej 1008: pp->pr_nitems++;
1009: pp->pr_nout--;
1.3 pk 1010:
1011: /* Cancel "pool empty" condition if it exists */
1012: if (pp->pr_curpage == NULL)
1013: pp->pr_curpage = ph;
1014:
1015: if (pp->pr_flags & PR_WANTED) {
1016: pp->pr_flags &= ~PR_WANTED;
1.134 ad 1017: cv_broadcast(&pp->pr_cv);
1.3 pk 1018: }
1019:
1020: /*
1.88 chs 1021: * If this page is now empty, do one of two things:
1.21 thorpej 1022: *
1.88 chs 1023: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1024: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1025: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1026: * CLAIM.
1.21 thorpej 1027: *
1.88 chs 1028: * (2) Otherwise, move the page to the empty page list.
1029: *
1030: * Either way, select a new current page (so we use a partially-full
1031: * page if one is available).
1.3 pk 1032: */
1033: if (ph->ph_nmissing == 0) {
1.6 thorpej 1034: pp->pr_nidle++;
1.90 thorpej 1035: if (pp->pr_npages > pp->pr_minpages &&
1.152 yamt 1036: pp->pr_npages > pp->pr_maxpages) {
1.101 thorpej 1037: pr_rmpage(pp, ph, pq);
1.3 pk 1038: } else {
1.88 chs 1039: LIST_REMOVE(ph, ph_pagelist);
1040: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1041:
1.21 thorpej 1042: /*
1043: * Update the timestamp on the page. A page must
1044: * be idle for some period of time before it can
1045: * be reclaimed by the pagedaemon. This minimizes
1046: * ping-pong'ing for memory.
1.151 yamt 1047: *
1048: * note for 64-bit time_t: truncating to 32-bit is not
1049: * a problem for our usage.
1.21 thorpej 1050: */
1.151 yamt 1051: ph->ph_time = time_uptime;
1.1 pk 1052: }
1.88 chs 1053: pool_update_curpage(pp);
1.1 pk 1054: }
1.88 chs 1055:
1.21 thorpej 1056: /*
1.88 chs 1057: * If the page was previously completely full, move it to the
1058: * partially-full list and make it the current page. The next
1059: * allocation will get the item from this page, instead of
1060: * further fragmenting the pool.
1.21 thorpej 1061: */
1062: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1063: LIST_REMOVE(ph, ph_pagelist);
1064: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1065: pp->pr_curpage = ph;
1066: }
1.43 thorpej 1067: }
1068:
1.56 sommerfe 1069: void
1070: pool_put(struct pool *pp, void *v)
1071: {
1.101 thorpej 1072: struct pool_pagelist pq;
1073:
1074: LIST_INIT(&pq);
1.56 sommerfe 1075:
1.134 ad 1076: mutex_enter(&pp->pr_lock);
1.101 thorpej 1077: pool_do_put(pp, v, &pq);
1.134 ad 1078: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1079:
1.102 chs 1080: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1081: }
1.57 sommerfe 1082:
1.74 thorpej 1083: /*
1.113 yamt 1084: * pool_grow: grow a pool by a page.
1085: *
1086: * => called with pool locked.
1087: * => unlock and relock the pool.
1088: * => return with pool locked.
1089: */
1090:
1091: static int
1092: pool_grow(struct pool *pp, int flags)
1093: {
1094: struct pool_item_header *ph = NULL;
1095: char *cp;
1096:
1.134 ad 1097: mutex_exit(&pp->pr_lock);
1.113 yamt 1098: cp = pool_allocator_alloc(pp, flags);
1099: if (__predict_true(cp != NULL)) {
1100: ph = pool_alloc_item_header(pp, cp, flags);
1101: }
1102: if (__predict_false(cp == NULL || ph == NULL)) {
1103: if (cp != NULL) {
1104: pool_allocator_free(pp, cp);
1105: }
1.134 ad 1106: mutex_enter(&pp->pr_lock);
1.113 yamt 1107: return ENOMEM;
1108: }
1109:
1.134 ad 1110: mutex_enter(&pp->pr_lock);
1.113 yamt 1111: pool_prime_page(pp, cp, ph);
1112: pp->pr_npagealloc++;
1113: return 0;
1114: }
1115:
1116: /*
1.74 thorpej 1117: * Add N items to the pool.
1118: */
1119: int
1120: pool_prime(struct pool *pp, int n)
1121: {
1.75 simonb 1122: int newpages;
1.113 yamt 1123: int error = 0;
1.74 thorpej 1124:
1.134 ad 1125: mutex_enter(&pp->pr_lock);
1.74 thorpej 1126:
1127: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1128:
1129: while (newpages-- > 0) {
1.113 yamt 1130: error = pool_grow(pp, PR_NOWAIT);
1131: if (error) {
1.74 thorpej 1132: break;
1133: }
1134: pp->pr_minpages++;
1135: }
1136:
1137: if (pp->pr_minpages >= pp->pr_maxpages)
1138: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1139:
1.134 ad 1140: mutex_exit(&pp->pr_lock);
1.113 yamt 1141: return error;
1.74 thorpej 1142: }
1.55 thorpej 1143:
1144: /*
1.3 pk 1145: * Add a page worth of items to the pool.
1.21 thorpej 1146: *
1147: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1148: */
1.55 thorpej 1149: static void
1.128 christos 1150: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1151: {
1152: struct pool_item *pi;
1.128 christos 1153: void *cp = storage;
1.125 ad 1154: const unsigned int align = pp->pr_align;
1155: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1156: int n;
1.36 pk 1157:
1.134 ad 1158: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 1159:
1.66 thorpej 1160: #ifdef DIAGNOSTIC
1.121 yamt 1161: if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1.150 skrll 1162: ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1163: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1164: #endif
1.3 pk 1165:
1166: /*
1167: * Insert page header.
1168: */
1.88 chs 1169: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1170: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1171: ph->ph_page = storage;
1172: ph->ph_nmissing = 0;
1.151 yamt 1173: ph->ph_time = time_uptime;
1.88 chs 1174: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1175: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1176:
1.6 thorpej 1177: pp->pr_nidle++;
1178:
1.3 pk 1179: /*
1180: * Color this page.
1181: */
1.141 yamt 1182: ph->ph_off = pp->pr_curcolor;
1183: cp = (char *)cp + ph->ph_off;
1.3 pk 1184: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1185: pp->pr_curcolor = 0;
1186:
1187: /*
1188: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1189: */
1190: if (ioff != 0)
1.128 christos 1191: cp = (char *)cp + align - ioff;
1.3 pk 1192:
1.125 ad 1193: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1194:
1.3 pk 1195: /*
1196: * Insert remaining chunks on the bucket list.
1197: */
1198: n = pp->pr_itemsperpage;
1.20 thorpej 1199: pp->pr_nitems += n;
1.3 pk 1200:
1.97 yamt 1201: if (pp->pr_roflags & PR_NOTOUCH) {
1.141 yamt 1202: pr_item_notouch_init(pp, ph);
1.97 yamt 1203: } else {
1204: while (n--) {
1205: pi = (struct pool_item *)cp;
1.78 thorpej 1206:
1.97 yamt 1207: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1208:
1.97 yamt 1209: /* Insert on page list */
1.102 chs 1210: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1211: #ifdef DIAGNOSTIC
1.97 yamt 1212: pi->pi_magic = PI_MAGIC;
1.3 pk 1213: #endif
1.128 christos 1214: cp = (char *)cp + pp->pr_size;
1.125 ad 1215:
1216: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1217: }
1.3 pk 1218: }
1219:
1220: /*
1221: * If the pool was depleted, point at the new page.
1222: */
1223: if (pp->pr_curpage == NULL)
1224: pp->pr_curpage = ph;
1225:
1226: if (++pp->pr_npages > pp->pr_hiwat)
1227: pp->pr_hiwat = pp->pr_npages;
1228: }
1229:
1.20 thorpej 1230: /*
1.52 thorpej 1231: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1232: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1233: *
1.21 thorpej 1234: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1235: *
1.73 thorpej 1236: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1237: * with it locked.
1238: */
1239: static int
1.42 thorpej 1240: pool_catchup(struct pool *pp)
1.20 thorpej 1241: {
1242: int error = 0;
1243:
1.54 thorpej 1244: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1245: error = pool_grow(pp, PR_NOWAIT);
1246: if (error) {
1.20 thorpej 1247: break;
1248: }
1249: }
1.113 yamt 1250: return error;
1.20 thorpej 1251: }
1252:
1.88 chs 1253: static void
1254: pool_update_curpage(struct pool *pp)
1255: {
1256:
1257: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1258: if (pp->pr_curpage == NULL) {
1259: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1260: }
1.168 yamt 1261: KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1262: (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1.88 chs 1263: }
1264:
1.3 pk 1265: void
1.42 thorpej 1266: pool_setlowat(struct pool *pp, int n)
1.3 pk 1267: {
1.15 pk 1268:
1.134 ad 1269: mutex_enter(&pp->pr_lock);
1.21 thorpej 1270:
1.3 pk 1271: pp->pr_minitems = n;
1.15 pk 1272: pp->pr_minpages = (n == 0)
1273: ? 0
1.18 thorpej 1274: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1275:
1276: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1277: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1278: /*
1279: * XXX: Should we log a warning? Should we set up a timeout
1280: * to try again in a second or so? The latter could break
1281: * a caller's assumptions about interrupt protection, etc.
1282: */
1283: }
1.21 thorpej 1284:
1.134 ad 1285: mutex_exit(&pp->pr_lock);
1.3 pk 1286: }
1287:
1288: void
1.42 thorpej 1289: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1290: {
1.15 pk 1291:
1.134 ad 1292: mutex_enter(&pp->pr_lock);
1.21 thorpej 1293:
1.15 pk 1294: pp->pr_maxpages = (n == 0)
1295: ? 0
1.18 thorpej 1296: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1297:
1.134 ad 1298: mutex_exit(&pp->pr_lock);
1.3 pk 1299: }
1300:
1.20 thorpej 1301: void
1.42 thorpej 1302: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1303: {
1304:
1.134 ad 1305: mutex_enter(&pp->pr_lock);
1.20 thorpej 1306:
1307: pp->pr_hardlimit = n;
1308: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1309: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1310: pp->pr_hardlimit_warning_last.tv_sec = 0;
1311: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1312:
1313: /*
1.21 thorpej 1314: * In-line version of pool_sethiwat(), because we don't want to
1315: * release the lock.
1.20 thorpej 1316: */
1317: pp->pr_maxpages = (n == 0)
1318: ? 0
1319: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1320:
1.134 ad 1321: mutex_exit(&pp->pr_lock);
1.20 thorpej 1322: }
1.3 pk 1323:
1324: /*
1325: * Release all complete pages that have not been used recently.
1.184 rmind 1326: *
1.197 jym 1327: * Must not be called from interrupt context.
1.3 pk 1328: */
1.66 thorpej 1329: int
1.56 sommerfe 1330: pool_reclaim(struct pool *pp)
1.3 pk 1331: {
1332: struct pool_item_header *ph, *phnext;
1.61 chs 1333: struct pool_pagelist pq;
1.151 yamt 1334: uint32_t curtime;
1.134 ad 1335: bool klock;
1336: int rv;
1.3 pk 1337:
1.197 jym 1338: KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1.184 rmind 1339:
1.68 thorpej 1340: if (pp->pr_drain_hook != NULL) {
1341: /*
1342: * The drain hook must be called with the pool unlocked.
1343: */
1344: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1345: }
1346:
1.134 ad 1347: /*
1.157 ad 1348: * XXXSMP Because we do not want to cause non-MPSAFE code
1349: * to block.
1.134 ad 1350: */
1351: if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1352: pp->pr_ipl == IPL_SOFTSERIAL) {
1353: KERNEL_LOCK(1, NULL);
1354: klock = true;
1355: } else
1356: klock = false;
1357:
1358: /* Reclaim items from the pool's cache (if any). */
1359: if (pp->pr_cache != NULL)
1360: pool_cache_invalidate(pp->pr_cache);
1361:
1362: if (mutex_tryenter(&pp->pr_lock) == 0) {
1363: if (klock) {
1364: KERNEL_UNLOCK_ONE(NULL);
1365: }
1.66 thorpej 1366: return (0);
1.134 ad 1367: }
1.68 thorpej 1368:
1.88 chs 1369: LIST_INIT(&pq);
1.43 thorpej 1370:
1.151 yamt 1371: curtime = time_uptime;
1.21 thorpej 1372:
1.88 chs 1373: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1374: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1375:
1376: /* Check our minimum page claim */
1377: if (pp->pr_npages <= pp->pr_minpages)
1378: break;
1379:
1.88 chs 1380: KASSERT(ph->ph_nmissing == 0);
1.191 para 1381: if (curtime - ph->ph_time < pool_inactive_time)
1.88 chs 1382: continue;
1.21 thorpej 1383:
1.88 chs 1384: /*
1385: * If freeing this page would put us below
1386: * the low water mark, stop now.
1387: */
1388: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1389: pp->pr_minitems)
1390: break;
1.21 thorpej 1391:
1.88 chs 1392: pr_rmpage(pp, ph, &pq);
1.3 pk 1393: }
1394:
1.134 ad 1395: mutex_exit(&pp->pr_lock);
1396:
1397: if (LIST_EMPTY(&pq))
1398: rv = 0;
1399: else {
1400: pr_pagelist_free(pp, &pq);
1401: rv = 1;
1402: }
1403:
1404: if (klock) {
1405: KERNEL_UNLOCK_ONE(NULL);
1406: }
1.66 thorpej 1407:
1.134 ad 1408: return (rv);
1.3 pk 1409: }
1410:
1411: /*
1.197 jym 1412: * Drain pools, one at a time. The drained pool is returned within ppp.
1.131 ad 1413: *
1.134 ad 1414: * Note, must never be called from interrupt context.
1.3 pk 1415: */
1.197 jym 1416: bool
1417: pool_drain(struct pool **ppp)
1.3 pk 1418: {
1.197 jym 1419: bool reclaimed;
1.3 pk 1420: struct pool *pp;
1.134 ad 1421:
1.145 ad 1422: KASSERT(!TAILQ_EMPTY(&pool_head));
1.3 pk 1423:
1.61 chs 1424: pp = NULL;
1.134 ad 1425:
1426: /* Find next pool to drain, and add a reference. */
1427: mutex_enter(&pool_head_lock);
1428: do {
1429: if (drainpp == NULL) {
1.145 ad 1430: drainpp = TAILQ_FIRST(&pool_head);
1.134 ad 1431: }
1432: if (drainpp != NULL) {
1433: pp = drainpp;
1.145 ad 1434: drainpp = TAILQ_NEXT(pp, pr_poollist);
1.134 ad 1435: }
1436: /*
1437: * Skip completely idle pools. We depend on at least
1438: * one pool in the system being active.
1439: */
1440: } while (pp == NULL || pp->pr_npages == 0);
1441: pp->pr_refcnt++;
1442: mutex_exit(&pool_head_lock);
1443:
1444: /* Drain the cache (if any) and pool.. */
1.186 pooka 1445: reclaimed = pool_reclaim(pp);
1.134 ad 1446:
1447: /* Finally, unlock the pool. */
1448: mutex_enter(&pool_head_lock);
1449: pp->pr_refcnt--;
1450: cv_broadcast(&pool_busy);
1451: mutex_exit(&pool_head_lock);
1.186 pooka 1452:
1.197 jym 1453: if (ppp != NULL)
1454: *ppp = pp;
1455:
1.186 pooka 1456: return reclaimed;
1.3 pk 1457: }
1458:
1459: /*
1460: * Diagnostic helpers.
1461: */
1.21 thorpej 1462:
1.25 thorpej 1463: void
1.108 yamt 1464: pool_printall(const char *modif, void (*pr)(const char *, ...))
1465: {
1466: struct pool *pp;
1467:
1.145 ad 1468: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.108 yamt 1469: pool_printit(pp, modif, pr);
1470: }
1471: }
1472:
1473: void
1.42 thorpej 1474: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1475: {
1476:
1477: if (pp == NULL) {
1478: (*pr)("Must specify a pool to print.\n");
1479: return;
1480: }
1481:
1482: pool_print1(pp, modif, pr);
1483: }
1484:
1.21 thorpej 1485: static void
1.124 yamt 1486: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1487: void (*pr)(const char *, ...))
1.88 chs 1488: {
1489: struct pool_item_header *ph;
1490: #ifdef DIAGNOSTIC
1491: struct pool_item *pi;
1492: #endif
1493:
1494: LIST_FOREACH(ph, pl, ph_pagelist) {
1.151 yamt 1495: (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1496: ph->ph_page, ph->ph_nmissing, ph->ph_time);
1.88 chs 1497: #ifdef DIAGNOSTIC
1.97 yamt 1498: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1499: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1500: if (pi->pi_magic != PI_MAGIC) {
1501: (*pr)("\t\t\titem %p, magic 0x%x\n",
1502: pi, pi->pi_magic);
1503: }
1.88 chs 1504: }
1505: }
1506: #endif
1507: }
1508: }
1509:
1510: static void
1.42 thorpej 1511: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1512: {
1.25 thorpej 1513: struct pool_item_header *ph;
1.134 ad 1514: pool_cache_t pc;
1515: pcg_t *pcg;
1516: pool_cache_cpu_t *cc;
1517: uint64_t cpuhit, cpumiss;
1.44 thorpej 1518: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1519: char c;
1520:
1521: while ((c = *modif++) != '\0') {
1522: if (c == 'l')
1523: print_log = 1;
1524: if (c == 'p')
1525: print_pagelist = 1;
1.44 thorpej 1526: if (c == 'c')
1527: print_cache = 1;
1.25 thorpej 1528: }
1529:
1.134 ad 1530: if ((pc = pp->pr_cache) != NULL) {
1531: (*pr)("POOL CACHE");
1532: } else {
1533: (*pr)("POOL");
1534: }
1535:
1536: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1537: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1538: pp->pr_roflags);
1.66 thorpej 1539: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1540: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1541: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1542: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1543: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1544:
1.134 ad 1545: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1546: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1547: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1548: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1549:
1550: if (print_pagelist == 0)
1551: goto skip_pagelist;
1552:
1.88 chs 1553: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1554: (*pr)("\n\tempty page list:\n");
1.97 yamt 1555: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1556: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1557: (*pr)("\n\tfull page list:\n");
1.97 yamt 1558: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1559: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1560: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1561: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1562:
1.25 thorpej 1563: if (pp->pr_curpage == NULL)
1564: (*pr)("\tno current page\n");
1565: else
1566: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1567:
1568: skip_pagelist:
1569: if (print_log == 0)
1570: goto skip_log;
1571:
1572: (*pr)("\n");
1.3 pk 1573:
1.25 thorpej 1574: skip_log:
1.44 thorpej 1575:
1.102 chs 1576: #define PR_GROUPLIST(pcg) \
1577: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1.142 ad 1578: for (i = 0; i < pcg->pcg_size; i++) { \
1.102 chs 1579: if (pcg->pcg_objects[i].pcgo_pa != \
1580: POOL_PADDR_INVALID) { \
1581: (*pr)("\t\t\t%p, 0x%llx\n", \
1582: pcg->pcg_objects[i].pcgo_va, \
1583: (unsigned long long) \
1584: pcg->pcg_objects[i].pcgo_pa); \
1585: } else { \
1586: (*pr)("\t\t\t%p\n", \
1587: pcg->pcg_objects[i].pcgo_va); \
1588: } \
1589: }
1590:
1.134 ad 1591: if (pc != NULL) {
1592: cpuhit = 0;
1593: cpumiss = 0;
1.183 ad 1594: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.134 ad 1595: if ((cc = pc->pc_cpus[i]) == NULL)
1596: continue;
1597: cpuhit += cc->cc_hits;
1598: cpumiss += cc->cc_misses;
1599: }
1600: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1601: (*pr)("\tcache layer hits %llu misses %llu\n",
1602: pc->pc_hits, pc->pc_misses);
1603: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1604: pc->pc_hits + pc->pc_misses - pc->pc_contended,
1605: pc->pc_contended);
1606: (*pr)("\tcache layer empty groups %u full groups %u\n",
1607: pc->pc_nempty, pc->pc_nfull);
1608: if (print_cache) {
1609: (*pr)("\tfull cache groups:\n");
1610: for (pcg = pc->pc_fullgroups; pcg != NULL;
1611: pcg = pcg->pcg_next) {
1612: PR_GROUPLIST(pcg);
1613: }
1614: (*pr)("\tempty cache groups:\n");
1615: for (pcg = pc->pc_emptygroups; pcg != NULL;
1616: pcg = pcg->pcg_next) {
1617: PR_GROUPLIST(pcg);
1618: }
1.103 chs 1619: }
1.44 thorpej 1620: }
1.102 chs 1621: #undef PR_GROUPLIST
1.88 chs 1622: }
1623:
1624: static int
1625: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1626: {
1627: struct pool_item *pi;
1.128 christos 1628: void *page;
1.88 chs 1629: int n;
1630:
1.121 yamt 1631: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1632: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1633: if (page != ph->ph_page &&
1634: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1635: if (label != NULL)
1636: printf("%s: ", label);
1637: printf("pool(%p:%s): page inconsistency: page %p;"
1638: " at page head addr %p (p %p)\n", pp,
1639: pp->pr_wchan, ph->ph_page,
1640: ph, page);
1641: return 1;
1642: }
1.88 chs 1643: }
1.3 pk 1644:
1.97 yamt 1645: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1646: return 0;
1647:
1.102 chs 1648: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1649: pi != NULL;
1.102 chs 1650: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1651:
1652: #ifdef DIAGNOSTIC
1653: if (pi->pi_magic != PI_MAGIC) {
1654: if (label != NULL)
1655: printf("%s: ", label);
1656: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1657: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1658: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1659: n, pi);
1.88 chs 1660: panic("pool");
1661: }
1662: #endif
1.121 yamt 1663: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1664: continue;
1665: }
1.128 christos 1666: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1667: if (page == ph->ph_page)
1668: continue;
1669:
1670: if (label != NULL)
1671: printf("%s: ", label);
1672: printf("pool(%p:%s): page inconsistency: page %p;"
1673: " item ordinal %d; addr %p (p %p)\n", pp,
1674: pp->pr_wchan, ph->ph_page,
1675: n, pi, page);
1676: return 1;
1677: }
1678: return 0;
1.3 pk 1679: }
1680:
1.88 chs 1681:
1.3 pk 1682: int
1.42 thorpej 1683: pool_chk(struct pool *pp, const char *label)
1.3 pk 1684: {
1685: struct pool_item_header *ph;
1686: int r = 0;
1687:
1.134 ad 1688: mutex_enter(&pp->pr_lock);
1.88 chs 1689: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1690: r = pool_chk_page(pp, label, ph);
1691: if (r) {
1692: goto out;
1693: }
1694: }
1695: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1696: r = pool_chk_page(pp, label, ph);
1697: if (r) {
1.3 pk 1698: goto out;
1699: }
1.88 chs 1700: }
1701: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1702: r = pool_chk_page(pp, label, ph);
1703: if (r) {
1.3 pk 1704: goto out;
1705: }
1706: }
1.88 chs 1707:
1.3 pk 1708: out:
1.134 ad 1709: mutex_exit(&pp->pr_lock);
1.3 pk 1710: return (r);
1.43 thorpej 1711: }
1712:
1713: /*
1714: * pool_cache_init:
1715: *
1716: * Initialize a pool cache.
1.134 ad 1717: */
1718: pool_cache_t
1719: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
1720: const char *wchan, struct pool_allocator *palloc, int ipl,
1721: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
1722: {
1723: pool_cache_t pc;
1724:
1725: pc = pool_get(&cache_pool, PR_WAITOK);
1726: if (pc == NULL)
1727: return NULL;
1728:
1729: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
1730: palloc, ipl, ctor, dtor, arg);
1731:
1732: return pc;
1733: }
1734:
1735: /*
1736: * pool_cache_bootstrap:
1.43 thorpej 1737: *
1.134 ad 1738: * Kernel-private version of pool_cache_init(). The caller
1739: * provides initial storage.
1.43 thorpej 1740: */
1741: void
1.134 ad 1742: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
1743: u_int align_offset, u_int flags, const char *wchan,
1744: struct pool_allocator *palloc, int ipl,
1745: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 1746: void *arg)
1747: {
1.134 ad 1748: CPU_INFO_ITERATOR cii;
1.145 ad 1749: pool_cache_t pc1;
1.134 ad 1750: struct cpu_info *ci;
1751: struct pool *pp;
1752:
1753: pp = &pc->pc_pool;
1754: if (palloc == NULL && ipl == IPL_NONE)
1755: palloc = &pool_allocator_nointr;
1756: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.157 ad 1757: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1.43 thorpej 1758:
1.134 ad 1759: if (ctor == NULL) {
1760: ctor = (int (*)(void *, void *, int))nullop;
1761: }
1762: if (dtor == NULL) {
1763: dtor = (void (*)(void *, void *))nullop;
1764: }
1.43 thorpej 1765:
1.134 ad 1766: pc->pc_emptygroups = NULL;
1767: pc->pc_fullgroups = NULL;
1768: pc->pc_partgroups = NULL;
1.43 thorpej 1769: pc->pc_ctor = ctor;
1770: pc->pc_dtor = dtor;
1771: pc->pc_arg = arg;
1.134 ad 1772: pc->pc_hits = 0;
1.48 thorpej 1773: pc->pc_misses = 0;
1.134 ad 1774: pc->pc_nempty = 0;
1775: pc->pc_npart = 0;
1776: pc->pc_nfull = 0;
1777: pc->pc_contended = 0;
1778: pc->pc_refcnt = 0;
1.136 yamt 1779: pc->pc_freecheck = NULL;
1.134 ad 1780:
1.142 ad 1781: if ((flags & PR_LARGECACHE) != 0) {
1782: pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
1.163 ad 1783: pc->pc_pcgpool = &pcg_large_pool;
1.142 ad 1784: } else {
1785: pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
1.163 ad 1786: pc->pc_pcgpool = &pcg_normal_pool;
1.142 ad 1787: }
1788:
1.134 ad 1789: /* Allocate per-CPU caches. */
1790: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
1791: pc->pc_ncpu = 0;
1.139 ad 1792: if (ncpu < 2) {
1.137 ad 1793: /* XXX For sparc: boot CPU is not attached yet. */
1794: pool_cache_cpu_init1(curcpu(), pc);
1795: } else {
1796: for (CPU_INFO_FOREACH(cii, ci)) {
1797: pool_cache_cpu_init1(ci, pc);
1798: }
1.134 ad 1799: }
1.145 ad 1800:
1801: /* Add to list of all pools. */
1802: if (__predict_true(!cold))
1.134 ad 1803: mutex_enter(&pool_head_lock);
1.145 ad 1804: TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
1805: if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
1806: break;
1807: }
1808: if (pc1 == NULL)
1809: TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
1810: else
1811: TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
1812: if (__predict_true(!cold))
1.134 ad 1813: mutex_exit(&pool_head_lock);
1.145 ad 1814:
1815: membar_sync();
1816: pp->pr_cache = pc;
1.43 thorpej 1817: }
1818:
1819: /*
1820: * pool_cache_destroy:
1821: *
1822: * Destroy a pool cache.
1823: */
1824: void
1.134 ad 1825: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 1826: {
1.191 para 1827:
1828: pool_cache_bootstrap_destroy(pc);
1829: pool_put(&cache_pool, pc);
1830: }
1831:
1832: /*
1833: * pool_cache_bootstrap_destroy:
1834: *
1835: * Destroy a pool cache.
1836: */
1837: void
1838: pool_cache_bootstrap_destroy(pool_cache_t pc)
1839: {
1.134 ad 1840: struct pool *pp = &pc->pc_pool;
1.175 jym 1841: u_int i;
1.134 ad 1842:
1843: /* Remove it from the global list. */
1844: mutex_enter(&pool_head_lock);
1845: while (pc->pc_refcnt != 0)
1846: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 1847: TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1.134 ad 1848: mutex_exit(&pool_head_lock);
1.43 thorpej 1849:
1850: /* First, invalidate the entire cache. */
1851: pool_cache_invalidate(pc);
1852:
1.134 ad 1853: /* Disassociate it from the pool. */
1854: mutex_enter(&pp->pr_lock);
1855: pp->pr_cache = NULL;
1856: mutex_exit(&pp->pr_lock);
1857:
1858: /* Destroy per-CPU data */
1.183 ad 1859: for (i = 0; i < __arraycount(pc->pc_cpus); i++)
1.175 jym 1860: pool_cache_invalidate_cpu(pc, i);
1.134 ad 1861:
1862: /* Finally, destroy it. */
1863: mutex_destroy(&pc->pc_lock);
1864: pool_destroy(pp);
1865: }
1866:
1867: /*
1868: * pool_cache_cpu_init1:
1869: *
1870: * Called for each pool_cache whenever a new CPU is attached.
1871: */
1872: static void
1873: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
1874: {
1875: pool_cache_cpu_t *cc;
1.137 ad 1876: int index;
1.134 ad 1877:
1.137 ad 1878: index = ci->ci_index;
1879:
1.183 ad 1880: KASSERT(index < __arraycount(pc->pc_cpus));
1.134 ad 1881:
1.137 ad 1882: if ((cc = pc->pc_cpus[index]) != NULL) {
1883: KASSERT(cc->cc_cpuindex == index);
1.134 ad 1884: return;
1885: }
1886:
1887: /*
1888: * The first CPU is 'free'. This needs to be the case for
1889: * bootstrap - we may not be able to allocate yet.
1890: */
1891: if (pc->pc_ncpu == 0) {
1892: cc = &pc->pc_cpu0;
1893: pc->pc_ncpu = 1;
1894: } else {
1895: mutex_enter(&pc->pc_lock);
1896: pc->pc_ncpu++;
1897: mutex_exit(&pc->pc_lock);
1898: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
1899: }
1900:
1901: cc->cc_ipl = pc->pc_pool.pr_ipl;
1902: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
1903: cc->cc_cache = pc;
1.137 ad 1904: cc->cc_cpuindex = index;
1.134 ad 1905: cc->cc_hits = 0;
1906: cc->cc_misses = 0;
1.169 yamt 1907: cc->cc_current = __UNCONST(&pcg_dummy);
1908: cc->cc_previous = __UNCONST(&pcg_dummy);
1.134 ad 1909:
1.137 ad 1910: pc->pc_cpus[index] = cc;
1.43 thorpej 1911: }
1912:
1.134 ad 1913: /*
1914: * pool_cache_cpu_init:
1915: *
1916: * Called whenever a new CPU is attached.
1917: */
1918: void
1919: pool_cache_cpu_init(struct cpu_info *ci)
1.43 thorpej 1920: {
1.134 ad 1921: pool_cache_t pc;
1922:
1923: mutex_enter(&pool_head_lock);
1.145 ad 1924: TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1.134 ad 1925: pc->pc_refcnt++;
1926: mutex_exit(&pool_head_lock);
1.43 thorpej 1927:
1.134 ad 1928: pool_cache_cpu_init1(ci, pc);
1.43 thorpej 1929:
1.134 ad 1930: mutex_enter(&pool_head_lock);
1931: pc->pc_refcnt--;
1932: cv_broadcast(&pool_busy);
1933: }
1934: mutex_exit(&pool_head_lock);
1.43 thorpej 1935: }
1936:
1.134 ad 1937: /*
1938: * pool_cache_reclaim:
1939: *
1940: * Reclaim memory from a pool cache.
1941: */
1942: bool
1943: pool_cache_reclaim(pool_cache_t pc)
1.43 thorpej 1944: {
1945:
1.134 ad 1946: return pool_reclaim(&pc->pc_pool);
1947: }
1.43 thorpej 1948:
1.136 yamt 1949: static void
1950: pool_cache_destruct_object1(pool_cache_t pc, void *object)
1951: {
1952:
1953: (*pc->pc_dtor)(pc->pc_arg, object);
1954: pool_put(&pc->pc_pool, object);
1955: }
1956:
1.134 ad 1957: /*
1958: * pool_cache_destruct_object:
1959: *
1960: * Force destruction of an object and its release back into
1961: * the pool.
1962: */
1963: void
1964: pool_cache_destruct_object(pool_cache_t pc, void *object)
1965: {
1966:
1.136 yamt 1967: FREECHECK_IN(&pc->pc_freecheck, object);
1968:
1969: pool_cache_destruct_object1(pc, object);
1.43 thorpej 1970: }
1971:
1.134 ad 1972: /*
1973: * pool_cache_invalidate_groups:
1974: *
1975: * Invalidate a chain of groups and destruct all objects.
1976: */
1.102 chs 1977: static void
1.134 ad 1978: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1.102 chs 1979: {
1.134 ad 1980: void *object;
1981: pcg_t *next;
1982: int i;
1983:
1984: for (; pcg != NULL; pcg = next) {
1985: next = pcg->pcg_next;
1986:
1987: for (i = 0; i < pcg->pcg_avail; i++) {
1988: object = pcg->pcg_objects[i].pcgo_va;
1.136 yamt 1989: pool_cache_destruct_object1(pc, object);
1.134 ad 1990: }
1.102 chs 1991:
1.142 ad 1992: if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
1993: pool_put(&pcg_large_pool, pcg);
1994: } else {
1995: KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
1996: pool_put(&pcg_normal_pool, pcg);
1997: }
1.102 chs 1998: }
1999: }
2000:
1.43 thorpej 2001: /*
1.134 ad 2002: * pool_cache_invalidate:
1.43 thorpej 2003: *
1.134 ad 2004: * Invalidate a pool cache (destruct and release all of the
2005: * cached objects). Does not reclaim objects from the pool.
1.176 thorpej 2006: *
2007: * Note: For pool caches that provide constructed objects, there
2008: * is an assumption that another level of synchronization is occurring
2009: * between the input to the constructor and the cache invalidation.
1.196 jym 2010: *
2011: * Invalidation is a costly process and should not be called from
2012: * interrupt context.
1.43 thorpej 2013: */
1.134 ad 2014: void
2015: pool_cache_invalidate(pool_cache_t pc)
2016: {
1.196 jym 2017: uint64_t where;
1.134 ad 2018: pcg_t *full, *empty, *part;
1.196 jym 2019:
2020: KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1.176 thorpej 2021:
1.177 jym 2022: if (ncpu < 2 || !mp_online) {
1.176 thorpej 2023: /*
2024: * We might be called early enough in the boot process
2025: * for the CPU data structures to not be fully initialized.
1.196 jym 2026: * In this case, transfer the content of the local CPU's
2027: * cache back into global cache as only this CPU is currently
2028: * running.
1.176 thorpej 2029: */
1.196 jym 2030: pool_cache_transfer(pc);
1.176 thorpej 2031: } else {
2032: /*
1.196 jym 2033: * Signal all CPUs that they must transfer their local
2034: * cache back to the global pool then wait for the xcall to
2035: * complete.
1.176 thorpej 2036: */
1.196 jym 2037: where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
2038: pc, NULL);
1.176 thorpej 2039: xc_wait(where);
2040: }
1.196 jym 2041:
2042: /* Empty pool caches, then invalidate objects */
1.134 ad 2043: mutex_enter(&pc->pc_lock);
2044: full = pc->pc_fullgroups;
2045: empty = pc->pc_emptygroups;
2046: part = pc->pc_partgroups;
2047: pc->pc_fullgroups = NULL;
2048: pc->pc_emptygroups = NULL;
2049: pc->pc_partgroups = NULL;
2050: pc->pc_nfull = 0;
2051: pc->pc_nempty = 0;
2052: pc->pc_npart = 0;
2053: mutex_exit(&pc->pc_lock);
2054:
2055: pool_cache_invalidate_groups(pc, full);
2056: pool_cache_invalidate_groups(pc, empty);
2057: pool_cache_invalidate_groups(pc, part);
2058: }
2059:
1.175 jym 2060: /*
2061: * pool_cache_invalidate_cpu:
2062: *
2063: * Invalidate all CPU-bound cached objects in pool cache, the CPU being
2064: * identified by its associated index.
2065: * It is caller's responsibility to ensure that no operation is
2066: * taking place on this pool cache while doing this invalidation.
2067: * WARNING: as no inter-CPU locking is enforced, trying to invalidate
2068: * pool cached objects from a CPU different from the one currently running
2069: * may result in an undefined behaviour.
2070: */
2071: static void
2072: pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2073: {
2074: pool_cache_cpu_t *cc;
2075: pcg_t *pcg;
2076:
2077: if ((cc = pc->pc_cpus[index]) == NULL)
2078: return;
2079:
2080: if ((pcg = cc->cc_current) != &pcg_dummy) {
2081: pcg->pcg_next = NULL;
2082: pool_cache_invalidate_groups(pc, pcg);
2083: }
2084: if ((pcg = cc->cc_previous) != &pcg_dummy) {
2085: pcg->pcg_next = NULL;
2086: pool_cache_invalidate_groups(pc, pcg);
2087: }
2088: if (cc != &pc->pc_cpu0)
2089: pool_put(&cache_cpu_pool, cc);
2090:
2091: }
2092:
1.134 ad 2093: void
2094: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2095: {
2096:
2097: pool_set_drain_hook(&pc->pc_pool, fn, arg);
2098: }
2099:
2100: void
2101: pool_cache_setlowat(pool_cache_t pc, int n)
2102: {
2103:
2104: pool_setlowat(&pc->pc_pool, n);
2105: }
2106:
2107: void
2108: pool_cache_sethiwat(pool_cache_t pc, int n)
2109: {
2110:
2111: pool_sethiwat(&pc->pc_pool, n);
2112: }
2113:
2114: void
2115: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2116: {
2117:
2118: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2119: }
2120:
1.162 ad 2121: static bool __noinline
2122: pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
1.134 ad 2123: paddr_t *pap, int flags)
1.43 thorpej 2124: {
1.134 ad 2125: pcg_t *pcg, *cur;
2126: uint64_t ncsw;
2127: pool_cache_t pc;
1.43 thorpej 2128: void *object;
1.58 thorpej 2129:
1.168 yamt 2130: KASSERT(cc->cc_current->pcg_avail == 0);
2131: KASSERT(cc->cc_previous->pcg_avail == 0);
2132:
1.134 ad 2133: pc = cc->cc_cache;
2134: cc->cc_misses++;
1.43 thorpej 2135:
1.134 ad 2136: /*
2137: * Nothing was available locally. Try and grab a group
2138: * from the cache.
2139: */
1.162 ad 2140: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.134 ad 2141: ncsw = curlwp->l_ncsw;
2142: mutex_enter(&pc->pc_lock);
2143: pc->pc_contended++;
1.43 thorpej 2144:
1.134 ad 2145: /*
2146: * If we context switched while locking, then
2147: * our view of the per-CPU data is invalid:
2148: * retry.
2149: */
2150: if (curlwp->l_ncsw != ncsw) {
2151: mutex_exit(&pc->pc_lock);
1.162 ad 2152: return true;
1.43 thorpej 2153: }
1.102 chs 2154: }
1.43 thorpej 2155:
1.162 ad 2156: if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
1.43 thorpej 2157: /*
1.134 ad 2158: * If there's a full group, release our empty
2159: * group back to the cache. Install the full
2160: * group as cc_current and return.
1.43 thorpej 2161: */
1.162 ad 2162: if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
1.134 ad 2163: KASSERT(cur->pcg_avail == 0);
2164: cur->pcg_next = pc->pc_emptygroups;
2165: pc->pc_emptygroups = cur;
2166: pc->pc_nempty++;
1.87 thorpej 2167: }
1.142 ad 2168: KASSERT(pcg->pcg_avail == pcg->pcg_size);
1.134 ad 2169: cc->cc_current = pcg;
2170: pc->pc_fullgroups = pcg->pcg_next;
2171: pc->pc_hits++;
2172: pc->pc_nfull--;
2173: mutex_exit(&pc->pc_lock);
1.162 ad 2174: return true;
1.134 ad 2175: }
2176:
2177: /*
2178: * Nothing available locally or in cache. Take the slow
2179: * path: fetch a new object from the pool and construct
2180: * it.
2181: */
2182: pc->pc_misses++;
2183: mutex_exit(&pc->pc_lock);
1.162 ad 2184: splx(s);
1.134 ad 2185:
2186: object = pool_get(&pc->pc_pool, flags);
2187: *objectp = object;
1.162 ad 2188: if (__predict_false(object == NULL))
2189: return false;
1.125 ad 2190:
1.162 ad 2191: if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
1.134 ad 2192: pool_put(&pc->pc_pool, object);
2193: *objectp = NULL;
1.162 ad 2194: return false;
1.43 thorpej 2195: }
2196:
1.134 ad 2197: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2198: (pc->pc_pool.pr_align - 1)) == 0);
1.43 thorpej 2199:
1.134 ad 2200: if (pap != NULL) {
2201: #ifdef POOL_VTOPHYS
2202: *pap = POOL_VTOPHYS(object);
2203: #else
2204: *pap = POOL_PADDR_INVALID;
2205: #endif
1.102 chs 2206: }
1.43 thorpej 2207:
1.125 ad 2208: FREECHECK_OUT(&pc->pc_freecheck, object);
1.203.4.1 skrll 2209: pool_redzone_fill(&pc->pc_pool, object);
1.162 ad 2210: return false;
1.43 thorpej 2211: }
2212:
2213: /*
1.134 ad 2214: * pool_cache_get{,_paddr}:
1.43 thorpej 2215: *
1.134 ad 2216: * Get an object from a pool cache (optionally returning
2217: * the physical address of the object).
1.43 thorpej 2218: */
1.134 ad 2219: void *
2220: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.43 thorpej 2221: {
1.134 ad 2222: pool_cache_cpu_t *cc;
2223: pcg_t *pcg;
2224: void *object;
1.60 thorpej 2225: int s;
1.43 thorpej 2226:
1.184 rmind 2227: KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
1.185 rmind 2228: (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
1.190 jym 2229: "pool '%s' is IPL_NONE, but called from interrupt context\n",
2230: pc->pc_pool.pr_wchan);
1.184 rmind 2231:
1.155 ad 2232: if (flags & PR_WAITOK) {
1.154 yamt 2233: ASSERT_SLEEPABLE();
1.155 ad 2234: }
1.125 ad 2235:
1.162 ad 2236: /* Lock out interrupts and disable preemption. */
2237: s = splvm();
1.165 yamt 2238: while (/* CONSTCOND */ true) {
1.134 ad 2239: /* Try and allocate an object from the current group. */
1.162 ad 2240: cc = pc->pc_cpus[curcpu()->ci_index];
2241: KASSERT(cc->cc_cache == pc);
1.134 ad 2242: pcg = cc->cc_current;
1.162 ad 2243: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2244: object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
1.162 ad 2245: if (__predict_false(pap != NULL))
1.134 ad 2246: *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
1.148 yamt 2247: #if defined(DIAGNOSTIC)
1.134 ad 2248: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
1.163 ad 2249: KASSERT(pcg->pcg_avail < pcg->pcg_size);
1.134 ad 2250: KASSERT(object != NULL);
1.163 ad 2251: #endif
1.134 ad 2252: cc->cc_hits++;
1.162 ad 2253: splx(s);
1.134 ad 2254: FREECHECK_OUT(&pc->pc_freecheck, object);
1.203.4.1 skrll 2255: pool_redzone_fill(&pc->pc_pool, object);
1.134 ad 2256: return object;
1.43 thorpej 2257: }
2258:
2259: /*
1.134 ad 2260: * That failed. If the previous group isn't empty, swap
2261: * it with the current group and allocate from there.
1.43 thorpej 2262: */
1.134 ad 2263: pcg = cc->cc_previous;
1.162 ad 2264: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2265: cc->cc_previous = cc->cc_current;
2266: cc->cc_current = pcg;
2267: continue;
1.43 thorpej 2268: }
2269:
1.134 ad 2270: /*
2271: * Can't allocate from either group: try the slow path.
2272: * If get_slow() allocated an object for us, or if
1.162 ad 2273: * no more objects are available, it will return false.
1.134 ad 2274: * Otherwise, we need to retry.
2275: */
1.165 yamt 2276: if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2277: break;
2278: }
1.43 thorpej 2279:
1.134 ad 2280: return object;
1.51 thorpej 2281: }
2282:
1.162 ad 2283: static bool __noinline
2284: pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
1.51 thorpej 2285: {
1.200 pooka 2286: struct lwp *l = curlwp;
1.163 ad 2287: pcg_t *pcg, *cur;
1.134 ad 2288: uint64_t ncsw;
2289: pool_cache_t pc;
1.51 thorpej 2290:
1.168 yamt 2291: KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2292: KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2293:
1.134 ad 2294: pc = cc->cc_cache;
1.171 ad 2295: pcg = NULL;
1.134 ad 2296: cc->cc_misses++;
1.200 pooka 2297: ncsw = l->l_ncsw;
1.43 thorpej 2298:
1.171 ad 2299: /*
2300: * If there are no empty groups in the cache then allocate one
2301: * while still unlocked.
2302: */
2303: if (__predict_false(pc->pc_emptygroups == NULL)) {
2304: if (__predict_true(!pool_cache_disable)) {
2305: pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2306: }
1.200 pooka 2307: /*
2308: * If pool_get() blocked, then our view of
2309: * the per-CPU data is invalid: retry.
2310: */
2311: if (__predict_false(l->l_ncsw != ncsw)) {
2312: if (pcg != NULL) {
2313: pool_put(pc->pc_pcgpool, pcg);
2314: }
2315: return true;
2316: }
1.171 ad 2317: if (__predict_true(pcg != NULL)) {
2318: pcg->pcg_avail = 0;
2319: pcg->pcg_size = pc->pc_pcgsize;
2320: }
2321: }
2322:
1.162 ad 2323: /* Lock the cache. */
2324: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.134 ad 2325: mutex_enter(&pc->pc_lock);
2326: pc->pc_contended++;
1.162 ad 2327:
1.163 ad 2328: /*
2329: * If we context switched while locking, then our view of
2330: * the per-CPU data is invalid: retry.
2331: */
1.200 pooka 2332: if (__predict_false(l->l_ncsw != ncsw)) {
1.163 ad 2333: mutex_exit(&pc->pc_lock);
1.171 ad 2334: if (pcg != NULL) {
2335: pool_put(pc->pc_pcgpool, pcg);
2336: }
1.163 ad 2337: return true;
2338: }
1.162 ad 2339: }
1.102 chs 2340:
1.163 ad 2341: /* If there are no empty groups in the cache then allocate one. */
1.171 ad 2342: if (pcg == NULL && pc->pc_emptygroups != NULL) {
2343: pcg = pc->pc_emptygroups;
1.163 ad 2344: pc->pc_emptygroups = pcg->pcg_next;
2345: pc->pc_nempty--;
1.134 ad 2346: }
1.130 ad 2347:
1.162 ad 2348: /*
2349: * If there's a empty group, release our full group back
2350: * to the cache. Install the empty group to the local CPU
2351: * and return.
2352: */
1.163 ad 2353: if (pcg != NULL) {
1.134 ad 2354: KASSERT(pcg->pcg_avail == 0);
1.162 ad 2355: if (__predict_false(cc->cc_previous == &pcg_dummy)) {
1.146 ad 2356: cc->cc_previous = pcg;
2357: } else {
1.162 ad 2358: cur = cc->cc_current;
2359: if (__predict_true(cur != &pcg_dummy)) {
1.163 ad 2360: KASSERT(cur->pcg_avail == cur->pcg_size);
1.146 ad 2361: cur->pcg_next = pc->pc_fullgroups;
2362: pc->pc_fullgroups = cur;
2363: pc->pc_nfull++;
2364: }
2365: cc->cc_current = pcg;
2366: }
1.163 ad 2367: pc->pc_hits++;
1.134 ad 2368: mutex_exit(&pc->pc_lock);
1.162 ad 2369: return true;
1.102 chs 2370: }
1.105 christos 2371:
1.134 ad 2372: /*
1.162 ad 2373: * Nothing available locally or in cache, and we didn't
2374: * allocate an empty group. Take the slow path and destroy
2375: * the object here and now.
1.134 ad 2376: */
2377: pc->pc_misses++;
2378: mutex_exit(&pc->pc_lock);
1.162 ad 2379: splx(s);
2380: pool_cache_destruct_object(pc, object);
1.105 christos 2381:
1.162 ad 2382: return false;
1.134 ad 2383: }
1.102 chs 2384:
1.43 thorpej 2385: /*
1.134 ad 2386: * pool_cache_put{,_paddr}:
1.43 thorpej 2387: *
1.134 ad 2388: * Put an object back to the pool cache (optionally caching the
2389: * physical address of the object).
1.43 thorpej 2390: */
1.101 thorpej 2391: void
1.134 ad 2392: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2393: {
1.134 ad 2394: pool_cache_cpu_t *cc;
2395: pcg_t *pcg;
2396: int s;
1.101 thorpej 2397:
1.172 yamt 2398: KASSERT(object != NULL);
1.203.4.1 skrll 2399: pool_redzone_check(&pc->pc_pool, object);
1.134 ad 2400: FREECHECK_IN(&pc->pc_freecheck, object);
1.101 thorpej 2401:
1.162 ad 2402: /* Lock out interrupts and disable preemption. */
2403: s = splvm();
1.165 yamt 2404: while (/* CONSTCOND */ true) {
1.134 ad 2405: /* If the current group isn't full, release it there. */
1.162 ad 2406: cc = pc->pc_cpus[curcpu()->ci_index];
2407: KASSERT(cc->cc_cache == pc);
1.134 ad 2408: pcg = cc->cc_current;
1.162 ad 2409: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2410: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2411: pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2412: pcg->pcg_avail++;
2413: cc->cc_hits++;
1.162 ad 2414: splx(s);
1.134 ad 2415: return;
2416: }
1.43 thorpej 2417:
1.134 ad 2418: /*
1.162 ad 2419: * That failed. If the previous group isn't full, swap
1.134 ad 2420: * it with the current group and try again.
2421: */
2422: pcg = cc->cc_previous;
1.162 ad 2423: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2424: cc->cc_previous = cc->cc_current;
2425: cc->cc_current = pcg;
2426: continue;
2427: }
1.43 thorpej 2428:
1.134 ad 2429: /*
2430: * Can't free to either group: try the slow path.
2431: * If put_slow() releases the object for us, it
1.162 ad 2432: * will return false. Otherwise we need to retry.
1.134 ad 2433: */
1.165 yamt 2434: if (!pool_cache_put_slow(cc, s, object))
2435: break;
2436: }
1.43 thorpej 2437: }
2438:
2439: /*
1.196 jym 2440: * pool_cache_transfer:
1.43 thorpej 2441: *
1.134 ad 2442: * Transfer objects from the per-CPU cache to the global cache.
2443: * Run within a cross-call thread.
1.43 thorpej 2444: */
2445: static void
1.196 jym 2446: pool_cache_transfer(pool_cache_t pc)
1.43 thorpej 2447: {
1.134 ad 2448: pool_cache_cpu_t *cc;
2449: pcg_t *prev, *cur, **list;
1.162 ad 2450: int s;
1.134 ad 2451:
1.162 ad 2452: s = splvm();
2453: mutex_enter(&pc->pc_lock);
2454: cc = pc->pc_cpus[curcpu()->ci_index];
1.134 ad 2455: cur = cc->cc_current;
1.169 yamt 2456: cc->cc_current = __UNCONST(&pcg_dummy);
1.134 ad 2457: prev = cc->cc_previous;
1.169 yamt 2458: cc->cc_previous = __UNCONST(&pcg_dummy);
1.162 ad 2459: if (cur != &pcg_dummy) {
1.142 ad 2460: if (cur->pcg_avail == cur->pcg_size) {
1.134 ad 2461: list = &pc->pc_fullgroups;
2462: pc->pc_nfull++;
2463: } else if (cur->pcg_avail == 0) {
2464: list = &pc->pc_emptygroups;
2465: pc->pc_nempty++;
2466: } else {
2467: list = &pc->pc_partgroups;
2468: pc->pc_npart++;
2469: }
2470: cur->pcg_next = *list;
2471: *list = cur;
2472: }
1.162 ad 2473: if (prev != &pcg_dummy) {
1.142 ad 2474: if (prev->pcg_avail == prev->pcg_size) {
1.134 ad 2475: list = &pc->pc_fullgroups;
2476: pc->pc_nfull++;
2477: } else if (prev->pcg_avail == 0) {
2478: list = &pc->pc_emptygroups;
2479: pc->pc_nempty++;
2480: } else {
2481: list = &pc->pc_partgroups;
2482: pc->pc_npart++;
2483: }
2484: prev->pcg_next = *list;
2485: *list = prev;
2486: }
2487: mutex_exit(&pc->pc_lock);
2488: splx(s);
1.3 pk 2489: }
1.66 thorpej 2490:
2491: /*
2492: * Pool backend allocators.
2493: *
2494: * Each pool has a backend allocator that handles allocation, deallocation,
2495: * and any additional draining that might be needed.
2496: *
2497: * We provide two standard allocators:
2498: *
2499: * pool_allocator_kmem - the default when no allocator is specified
2500: *
2501: * pool_allocator_nointr - used for pools that will not be accessed
2502: * in interrupt context.
2503: */
2504: void *pool_page_alloc(struct pool *, int);
2505: void pool_page_free(struct pool *, void *);
2506:
1.112 bjh21 2507: #ifdef POOL_SUBPAGE
2508: struct pool_allocator pool_allocator_kmem_fullpage = {
1.192 rmind 2509: .pa_alloc = pool_page_alloc,
2510: .pa_free = pool_page_free,
2511: .pa_pagesz = 0
1.112 bjh21 2512: };
2513: #else
1.66 thorpej 2514: struct pool_allocator pool_allocator_kmem = {
1.191 para 2515: .pa_alloc = pool_page_alloc,
2516: .pa_free = pool_page_free,
2517: .pa_pagesz = 0
1.66 thorpej 2518: };
1.112 bjh21 2519: #endif
1.66 thorpej 2520:
1.112 bjh21 2521: #ifdef POOL_SUBPAGE
2522: struct pool_allocator pool_allocator_nointr_fullpage = {
1.194 para 2523: .pa_alloc = pool_page_alloc,
2524: .pa_free = pool_page_free,
1.192 rmind 2525: .pa_pagesz = 0
1.112 bjh21 2526: };
2527: #else
1.66 thorpej 2528: struct pool_allocator pool_allocator_nointr = {
1.191 para 2529: .pa_alloc = pool_page_alloc,
2530: .pa_free = pool_page_free,
2531: .pa_pagesz = 0
1.66 thorpej 2532: };
1.112 bjh21 2533: #endif
1.66 thorpej 2534:
2535: #ifdef POOL_SUBPAGE
2536: void *pool_subpage_alloc(struct pool *, int);
2537: void pool_subpage_free(struct pool *, void *);
2538:
1.112 bjh21 2539: struct pool_allocator pool_allocator_kmem = {
1.193 he 2540: .pa_alloc = pool_subpage_alloc,
2541: .pa_free = pool_subpage_free,
2542: .pa_pagesz = POOL_SUBPAGE
1.112 bjh21 2543: };
2544:
2545: struct pool_allocator pool_allocator_nointr = {
1.192 rmind 2546: .pa_alloc = pool_subpage_alloc,
2547: .pa_free = pool_subpage_free,
2548: .pa_pagesz = POOL_SUBPAGE
1.66 thorpej 2549: };
2550: #endif /* POOL_SUBPAGE */
2551:
1.117 yamt 2552: static void *
2553: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2554: {
1.117 yamt 2555: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2556: void *res;
2557:
1.117 yamt 2558: res = (*pa->pa_alloc)(pp, flags);
2559: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2560: /*
1.117 yamt 2561: * We only run the drain hook here if PR_NOWAIT.
2562: * In other cases, the hook will be run in
2563: * pool_reclaim().
1.66 thorpej 2564: */
1.117 yamt 2565: if (pp->pr_drain_hook != NULL) {
2566: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2567: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2568: }
1.117 yamt 2569: }
2570: return res;
1.66 thorpej 2571: }
2572:
1.117 yamt 2573: static void
1.66 thorpej 2574: pool_allocator_free(struct pool *pp, void *v)
2575: {
2576: struct pool_allocator *pa = pp->pr_alloc;
2577:
2578: (*pa->pa_free)(pp, v);
2579: }
2580:
2581: void *
1.124 yamt 2582: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2583: {
1.192 rmind 2584: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
1.191 para 2585: vmem_addr_t va;
1.192 rmind 2586: int ret;
1.191 para 2587:
1.192 rmind 2588: ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2589: vflags | VM_INSTANTFIT, &va);
1.66 thorpej 2590:
1.192 rmind 2591: return ret ? NULL : (void *)va;
1.66 thorpej 2592: }
2593:
2594: void
1.124 yamt 2595: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2596: {
2597:
1.191 para 2598: uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
1.98 yamt 2599: }
2600:
2601: static void *
1.124 yamt 2602: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2603: {
1.192 rmind 2604: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2605: vmem_addr_t va;
2606: int ret;
1.191 para 2607:
1.192 rmind 2608: ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2609: vflags | VM_INSTANTFIT, &va);
1.98 yamt 2610:
1.192 rmind 2611: return ret ? NULL : (void *)va;
1.98 yamt 2612: }
2613:
2614: static void
1.124 yamt 2615: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2616: {
2617:
1.192 rmind 2618: vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
1.66 thorpej 2619: }
2620:
1.203.4.1 skrll 2621: #ifdef POOL_REDZONE
2622: #if defined(_LP64)
2623: # define PRIME 0x9e37fffffffc0000UL
2624: #else /* defined(_LP64) */
2625: # define PRIME 0x9e3779b1
2626: #endif /* defined(_LP64) */
2627: #define STATIC_BYTE 0xFE
2628: CTASSERT(POOL_REDZONE_SIZE > 1);
2629:
2630: static inline uint8_t
2631: pool_pattern_generate(const void *p)
2632: {
2633: return (uint8_t)(((uintptr_t)p) * PRIME
2634: >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
2635: }
2636:
2637: static void
2638: pool_redzone_init(struct pool *pp, size_t requested_size)
2639: {
2640: size_t nsz;
2641:
2642: if (pp->pr_roflags & PR_NOTOUCH) {
2643: pp->pr_reqsize = 0;
2644: pp->pr_redzone = false;
2645: return;
2646: }
2647:
2648: /*
2649: * We may have extended the requested size earlier; check if
2650: * there's naturally space in the padding for a red zone.
2651: */
2652: if (pp->pr_size - requested_size >= POOL_REDZONE_SIZE) {
2653: pp->pr_reqsize = requested_size;
2654: pp->pr_redzone = true;
2655: return;
2656: }
2657:
2658: /*
2659: * No space in the natural padding; check if we can extend a
2660: * bit the size of the pool.
2661: */
2662: nsz = roundup(pp->pr_size + POOL_REDZONE_SIZE, pp->pr_align);
2663: if (nsz <= pp->pr_alloc->pa_pagesz) {
2664: /* Ok, we can */
2665: pp->pr_size = nsz;
2666: pp->pr_reqsize = requested_size;
2667: pp->pr_redzone = true;
2668: } else {
2669: /* No space for a red zone... snif :'( */
2670: pp->pr_reqsize = 0;
2671: pp->pr_redzone = false;
2672: printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
2673: }
2674: }
2675:
2676: static void
2677: pool_redzone_fill(struct pool *pp, void *p)
2678: {
2679: uint8_t *cp, pat;
2680: const uint8_t *ep;
2681:
2682: if (!pp->pr_redzone)
2683: return;
2684:
2685: cp = (uint8_t *)p + pp->pr_reqsize;
2686: ep = cp + POOL_REDZONE_SIZE;
2687:
2688: /*
2689: * We really don't want the first byte of the red zone to be '\0';
2690: * an off-by-one in a string may not be properly detected.
2691: */
2692: pat = pool_pattern_generate(cp);
2693: *cp = (pat == '\0') ? STATIC_BYTE: pat;
2694: cp++;
2695:
2696: while (cp < ep) {
2697: *cp = pool_pattern_generate(cp);
2698: cp++;
2699: }
2700: }
2701:
2702: static void
2703: pool_redzone_check(struct pool *pp, void *p)
2704: {
2705: uint8_t *cp, pat, expected;
2706: const uint8_t *ep;
2707:
2708: if (!pp->pr_redzone)
2709: return;
2710:
2711: cp = (uint8_t *)p + pp->pr_reqsize;
2712: ep = cp + POOL_REDZONE_SIZE;
2713:
2714: pat = pool_pattern_generate(cp);
2715: expected = (pat == '\0') ? STATIC_BYTE: pat;
2716: if (expected != *cp) {
2717: panic("%s: %p: 0x%02x != 0x%02x\n",
2718: __func__, cp, *cp, expected);
2719: }
2720: cp++;
2721:
2722: while (cp < ep) {
2723: expected = pool_pattern_generate(cp);
2724: if (*cp != expected) {
2725: panic("%s: %p: 0x%02x != 0x%02x\n",
2726: __func__, cp, *cp, expected);
2727: }
2728: cp++;
2729: }
2730: }
2731:
2732: #endif /* POOL_REDZONE */
2733:
2734:
1.66 thorpej 2735: #ifdef POOL_SUBPAGE
2736: /* Sub-page allocator, for machines with large hardware pages. */
2737: void *
2738: pool_subpage_alloc(struct pool *pp, int flags)
2739: {
1.134 ad 2740: return pool_get(&psppool, flags);
1.66 thorpej 2741: }
2742:
2743: void
2744: pool_subpage_free(struct pool *pp, void *v)
2745: {
2746: pool_put(&psppool, v);
2747: }
2748:
1.112 bjh21 2749: #endif /* POOL_SUBPAGE */
1.141 yamt 2750:
2751: #if defined(DDB)
2752: static bool
2753: pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2754: {
2755:
2756: return (uintptr_t)ph->ph_page <= addr &&
2757: addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2758: }
2759:
1.143 yamt 2760: static bool
2761: pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2762: {
2763:
2764: return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2765: }
2766:
2767: static bool
2768: pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2769: {
2770: int i;
2771:
2772: if (pcg == NULL) {
2773: return false;
2774: }
1.144 yamt 2775: for (i = 0; i < pcg->pcg_avail; i++) {
1.143 yamt 2776: if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2777: return true;
2778: }
2779: }
2780: return false;
2781: }
2782:
2783: static bool
2784: pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2785: {
2786:
2787: if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2788: unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2789: pool_item_bitmap_t *bitmap =
2790: ph->ph_bitmap + (idx / BITMAP_SIZE);
2791: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2792:
2793: return (*bitmap & mask) == 0;
2794: } else {
2795: struct pool_item *pi;
2796:
2797: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2798: if (pool_in_item(pp, pi, addr)) {
2799: return false;
2800: }
2801: }
2802: return true;
2803: }
2804: }
2805:
1.141 yamt 2806: void
2807: pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2808: {
2809: struct pool *pp;
2810:
1.145 ad 2811: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.141 yamt 2812: struct pool_item_header *ph;
2813: uintptr_t item;
1.143 yamt 2814: bool allocated = true;
2815: bool incache = false;
2816: bool incpucache = false;
2817: char cpucachestr[32];
1.141 yamt 2818:
2819: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2820: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2821: if (pool_in_page(pp, ph, addr)) {
2822: goto found;
2823: }
2824: }
2825: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2826: if (pool_in_page(pp, ph, addr)) {
1.143 yamt 2827: allocated =
2828: pool_allocated(pp, ph, addr);
2829: goto found;
2830: }
2831: }
2832: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2833: if (pool_in_page(pp, ph, addr)) {
2834: allocated = false;
1.141 yamt 2835: goto found;
2836: }
2837: }
2838: continue;
2839: } else {
2840: ph = pr_find_pagehead_noalign(pp, (void *)addr);
2841: if (ph == NULL || !pool_in_page(pp, ph, addr)) {
2842: continue;
2843: }
1.143 yamt 2844: allocated = pool_allocated(pp, ph, addr);
1.141 yamt 2845: }
2846: found:
1.143 yamt 2847: if (allocated && pp->pr_cache) {
2848: pool_cache_t pc = pp->pr_cache;
2849: struct pool_cache_group *pcg;
2850: int i;
2851:
2852: for (pcg = pc->pc_fullgroups; pcg != NULL;
2853: pcg = pcg->pcg_next) {
2854: if (pool_in_cg(pp, pcg, addr)) {
2855: incache = true;
2856: goto print;
2857: }
2858: }
1.183 ad 2859: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.143 yamt 2860: pool_cache_cpu_t *cc;
2861:
2862: if ((cc = pc->pc_cpus[i]) == NULL) {
2863: continue;
2864: }
2865: if (pool_in_cg(pp, cc->cc_current, addr) ||
2866: pool_in_cg(pp, cc->cc_previous, addr)) {
2867: struct cpu_info *ci =
1.170 ad 2868: cpu_lookup(i);
1.143 yamt 2869:
2870: incpucache = true;
2871: snprintf(cpucachestr,
2872: sizeof(cpucachestr),
2873: "cached by CPU %u",
1.153 martin 2874: ci->ci_index);
1.143 yamt 2875: goto print;
2876: }
2877: }
2878: }
2879: print:
1.141 yamt 2880: item = (uintptr_t)ph->ph_page + ph->ph_off;
2881: item = item + rounddown(addr - item, pp->pr_size);
1.143 yamt 2882: (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
1.141 yamt 2883: (void *)addr, item, (size_t)(addr - item),
1.143 yamt 2884: pp->pr_wchan,
2885: incpucache ? cpucachestr :
2886: incache ? "cached" : allocated ? "allocated" : "free");
1.141 yamt 2887: }
2888: }
2889: #endif /* defined(DDB) */
1.203 joerg 2890:
2891: static int
2892: pool_sysctl(SYSCTLFN_ARGS)
2893: {
2894: struct pool_sysctl data;
2895: struct pool *pp;
2896: struct pool_cache *pc;
2897: pool_cache_cpu_t *cc;
2898: int error;
2899: size_t i, written;
2900:
2901: if (oldp == NULL) {
2902: *oldlenp = 0;
2903: TAILQ_FOREACH(pp, &pool_head, pr_poollist)
2904: *oldlenp += sizeof(data);
2905: return 0;
2906: }
2907:
2908: memset(&data, 0, sizeof(data));
2909: error = 0;
2910: written = 0;
2911: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
2912: if (written + sizeof(data) > *oldlenp)
2913: break;
2914: strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
2915: data.pr_pagesize = pp->pr_alloc->pa_pagesz;
2916: data.pr_flags = pp->pr_roflags | pp->pr_flags;
2917: #define COPY(field) data.field = pp->field
2918: COPY(pr_size);
2919:
2920: COPY(pr_itemsperpage);
2921: COPY(pr_nitems);
2922: COPY(pr_nout);
2923: COPY(pr_hardlimit);
2924: COPY(pr_npages);
2925: COPY(pr_minpages);
2926: COPY(pr_maxpages);
2927:
2928: COPY(pr_nget);
2929: COPY(pr_nfail);
2930: COPY(pr_nput);
2931: COPY(pr_npagealloc);
2932: COPY(pr_npagefree);
2933: COPY(pr_hiwat);
2934: COPY(pr_nidle);
2935: #undef COPY
2936:
2937: data.pr_cache_nmiss_pcpu = 0;
2938: data.pr_cache_nhit_pcpu = 0;
2939: if (pp->pr_cache) {
2940: pc = pp->pr_cache;
2941: data.pr_cache_meta_size = pc->pc_pcgsize;
2942: data.pr_cache_nfull = pc->pc_nfull;
2943: data.pr_cache_npartial = pc->pc_npart;
2944: data.pr_cache_nempty = pc->pc_nempty;
2945: data.pr_cache_ncontended = pc->pc_contended;
2946: data.pr_cache_nmiss_global = pc->pc_misses;
2947: data.pr_cache_nhit_global = pc->pc_hits;
2948: for (i = 0; i < pc->pc_ncpu; ++i) {
2949: cc = pc->pc_cpus[i];
2950: if (cc == NULL)
2951: continue;
1.203.4.2! skrll 2952: data.pr_cache_nmiss_pcpu += cc->cc_misses;
! 2953: data.pr_cache_nhit_pcpu += cc->cc_hits;
1.203 joerg 2954: }
2955: } else {
2956: data.pr_cache_meta_size = 0;
2957: data.pr_cache_nfull = 0;
2958: data.pr_cache_npartial = 0;
2959: data.pr_cache_nempty = 0;
2960: data.pr_cache_ncontended = 0;
2961: data.pr_cache_nmiss_global = 0;
2962: data.pr_cache_nhit_global = 0;
2963: }
2964:
2965: error = sysctl_copyout(l, &data, oldp, sizeof(data));
2966: if (error)
2967: break;
2968: written += sizeof(data);
2969: oldp = (char *)oldp + sizeof(data);
2970: }
2971:
2972: *oldlenp = written;
2973: return error;
2974: }
2975:
2976: SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
2977: {
2978: const struct sysctlnode *rnode = NULL;
2979:
2980: sysctl_createv(clog, 0, NULL, &rnode,
2981: CTLFLAG_PERMANENT,
2982: CTLTYPE_STRUCT, "pool",
2983: SYSCTL_DESCR("Get pool statistics"),
2984: pool_sysctl, 0, NULL, 0,
2985: CTL_KERN, CTL_CREATE, CTL_EOL);
2986: }
CVSweb <webmaster@jp.NetBSD.org>