Annotation of src/sys/kern/subr_pool.c, Revision 1.222
1.222 ! kamil 1: /* $NetBSD: subr_pool.c,v 1.221 2018/01/12 18:54:37 para Exp $ */
1.1 pk 2:
3: /*-
1.204 maxv 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015
1.183 ad 5: * The NetBSD Foundation, Inc.
1.1 pk 6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 9: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.204 maxv 10: * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
11: * Maxime Villard.
1.1 pk 12: *
13: * Redistribution and use in source and binary forms, with or without
14: * modification, are permitted provided that the following conditions
15: * are met:
16: * 1. Redistributions of source code must retain the above copyright
17: * notice, this list of conditions and the following disclaimer.
18: * 2. Redistributions in binary form must reproduce the above copyright
19: * notice, this list of conditions and the following disclaimer in the
20: * documentation and/or other materials provided with the distribution.
21: *
22: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32: * POSSIBILITY OF SUCH DAMAGE.
33: */
1.64 lukem 34:
35: #include <sys/cdefs.h>
1.222 ! kamil 36: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.221 2018/01/12 18:54:37 para Exp $");
1.24 scottr 37:
1.205 pooka 38: #ifdef _KERNEL_OPT
1.141 yamt 39: #include "opt_ddb.h"
1.28 thorpej 40: #include "opt_lockdebug.h"
1.205 pooka 41: #endif
1.1 pk 42:
43: #include <sys/param.h>
44: #include <sys/systm.h>
1.203 joerg 45: #include <sys/sysctl.h>
1.135 yamt 46: #include <sys/bitops.h>
1.1 pk 47: #include <sys/proc.h>
48: #include <sys/errno.h>
49: #include <sys/kernel.h>
1.191 para 50: #include <sys/vmem.h>
1.1 pk 51: #include <sys/pool.h>
1.20 thorpej 52: #include <sys/syslog.h>
1.125 ad 53: #include <sys/debug.h>
1.134 ad 54: #include <sys/lockdebug.h>
55: #include <sys/xcall.h>
56: #include <sys/cpu.h>
1.145 ad 57: #include <sys/atomic.h>
1.3 pk 58:
1.187 uebayasi 59: #include <uvm/uvm_extern.h>
1.3 pk 60:
1.1 pk 61: /*
62: * Pool resource management utility.
1.3 pk 63: *
1.88 chs 64: * Memory is allocated in pages which are split into pieces according to
65: * the pool item size. Each page is kept on one of three lists in the
66: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
67: * for empty, full and partially-full pages respectively. The individual
68: * pool items are on a linked list headed by `ph_itemlist' in each page
69: * header. The memory for building the page list is either taken from
70: * the allocated pages themselves (for small pool items) or taken from
71: * an internal pool of page headers (`phpool').
1.1 pk 72: */
73:
1.221 para 74: /* List of all pools. Non static as needed by 'vmstat -m' */
1.202 abs 75: TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.134 ad 76:
1.3 pk 77: /* Private pool for page header structures */
1.97 yamt 78: #define PHPOOL_MAX 8
79: static struct pool phpool[PHPOOL_MAX];
1.135 yamt 80: #define PHPOOL_FREELIST_NELEM(idx) \
81: (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
1.3 pk 82:
1.62 bjh21 83: #ifdef POOL_SUBPAGE
84: /* Pool of subpages for use by normal pools. */
85: static struct pool psppool;
86: #endif
87:
1.204 maxv 88: #ifdef POOL_REDZONE
89: # define POOL_REDZONE_SIZE 2
90: static void pool_redzone_init(struct pool *, size_t);
91: static void pool_redzone_fill(struct pool *, void *);
92: static void pool_redzone_check(struct pool *, void *);
93: #else
94: # define pool_redzone_init(pp, sz) /* NOTHING */
95: # define pool_redzone_fill(pp, ptr) /* NOTHING */
96: # define pool_redzone_check(pp, ptr) /* NOTHING */
97: #endif
98:
1.98 yamt 99: static void *pool_page_alloc_meta(struct pool *, int);
100: static void pool_page_free_meta(struct pool *, void *);
101:
102: /* allocator for pool metadata */
1.134 ad 103: struct pool_allocator pool_allocator_meta = {
1.191 para 104: .pa_alloc = pool_page_alloc_meta,
105: .pa_free = pool_page_free_meta,
106: .pa_pagesz = 0
1.98 yamt 107: };
108:
1.208 chs 109: #define POOL_ALLOCATOR_BIG_BASE 13
110: extern struct pool_allocator pool_allocator_big[];
111: static int pool_bigidx(size_t);
112:
1.3 pk 113: /* # of seconds to retain page after last use */
114: int pool_inactive_time = 10;
115:
116: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 117: static struct pool *drainpp;
118:
1.134 ad 119: /* This lock protects both pool_head and drainpp. */
120: static kmutex_t pool_head_lock;
121: static kcondvar_t pool_busy;
1.3 pk 122:
1.178 elad 123: /* This lock protects initialization of a potentially shared pool allocator */
124: static kmutex_t pool_allocator_lock;
125:
1.135 yamt 126: typedef uint32_t pool_item_bitmap_t;
127: #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
128: #define BITMAP_MASK (BITMAP_SIZE - 1)
1.99 yamt 129:
1.3 pk 130: struct pool_item_header {
131: /* Page headers */
1.88 chs 132: LIST_ENTRY(pool_item_header)
1.3 pk 133: ph_pagelist; /* pool page list */
1.88 chs 134: SPLAY_ENTRY(pool_item_header)
135: ph_node; /* Off-page page headers */
1.128 christos 136: void * ph_page; /* this page's address */
1.151 yamt 137: uint32_t ph_time; /* last referenced */
1.135 yamt 138: uint16_t ph_nmissing; /* # of chunks in use */
1.141 yamt 139: uint16_t ph_off; /* start offset in page */
1.97 yamt 140: union {
141: /* !PR_NOTOUCH */
142: struct {
1.102 chs 143: LIST_HEAD(, pool_item)
1.97 yamt 144: phu_itemlist; /* chunk list for this page */
145: } phu_normal;
146: /* PR_NOTOUCH */
147: struct {
1.141 yamt 148: pool_item_bitmap_t phu_bitmap[1];
1.97 yamt 149: } phu_notouch;
150: } ph_u;
1.3 pk 151: };
1.97 yamt 152: #define ph_itemlist ph_u.phu_normal.phu_itemlist
1.135 yamt 153: #define ph_bitmap ph_u.phu_notouch.phu_bitmap
1.3 pk 154:
1.1 pk 155: struct pool_item {
1.3 pk 156: #ifdef DIAGNOSTIC
1.82 thorpej 157: u_int pi_magic;
1.33 chs 158: #endif
1.134 ad 159: #define PI_MAGIC 0xdeaddeadU
1.3 pk 160: /* Other entries use only this list entry */
1.102 chs 161: LIST_ENTRY(pool_item) pi_list;
1.3 pk 162: };
163:
1.53 thorpej 164: #define POOL_NEEDS_CATCHUP(pp) \
165: ((pp)->pr_nitems < (pp)->pr_minitems)
166:
1.43 thorpej 167: /*
168: * Pool cache management.
169: *
170: * Pool caches provide a way for constructed objects to be cached by the
171: * pool subsystem. This can lead to performance improvements by avoiding
172: * needless object construction/destruction; it is deferred until absolutely
173: * necessary.
174: *
1.134 ad 175: * Caches are grouped into cache groups. Each cache group references up
176: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
177: * object from the pool, it calls the object's constructor and places it
178: * into a cache group. When a cache group frees an object back to the
179: * pool, it first calls the object's destructor. This allows the object
180: * to persist in constructed form while freed to the cache.
181: *
182: * The pool references each cache, so that when a pool is drained by the
183: * pagedaemon, it can drain each individual cache as well. Each time a
184: * cache is drained, the most idle cache group is freed to the pool in
185: * its entirety.
1.43 thorpej 186: *
187: * Pool caches are layed on top of pools. By layering them, we can avoid
188: * the complexity of cache management for pools which would not benefit
189: * from it.
190: */
191:
1.142 ad 192: static struct pool pcg_normal_pool;
193: static struct pool pcg_large_pool;
1.134 ad 194: static struct pool cache_pool;
195: static struct pool cache_cpu_pool;
1.3 pk 196:
1.189 pooka 197: pool_cache_t pnbuf_cache; /* pathname buffer cache */
198:
1.145 ad 199: /* List of all caches. */
200: TAILQ_HEAD(,pool_cache) pool_cache_head =
201: TAILQ_HEAD_INITIALIZER(pool_cache_head);
202:
1.162 ad 203: int pool_cache_disable; /* global disable for caching */
1.169 yamt 204: static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */
1.145 ad 205:
1.162 ad 206: static bool pool_cache_put_slow(pool_cache_cpu_t *, int,
207: void *);
208: static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
209: void **, paddr_t *, int);
1.134 ad 210: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
211: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
1.175 jym 212: static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
1.196 jym 213: static void pool_cache_transfer(pool_cache_t);
1.3 pk 214:
1.42 thorpej 215: static int pool_catchup(struct pool *);
1.128 christos 216: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 217: struct pool_item_header *);
1.88 chs 218: static void pool_update_curpage(struct pool *);
1.66 thorpej 219:
1.113 yamt 220: static int pool_grow(struct pool *, int);
1.117 yamt 221: static void *pool_allocator_alloc(struct pool *, int);
222: static void pool_allocator_free(struct pool *, void *);
1.3 pk 223:
1.97 yamt 224: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.199 christos 225: void (*)(const char *, ...) __printflike(1, 2));
1.42 thorpej 226: static void pool_print1(struct pool *, const char *,
1.199 christos 227: void (*)(const char *, ...) __printflike(1, 2));
1.3 pk 228:
1.88 chs 229: static int pool_chk_page(struct pool *, const char *,
230: struct pool_item_header *);
231:
1.135 yamt 232: static inline unsigned int
1.97 yamt 233: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
234: const void *v)
235: {
236: const char *cp = v;
1.135 yamt 237: unsigned int idx;
1.97 yamt 238:
239: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 240: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 241: KASSERT(idx < pp->pr_itemsperpage);
242: return idx;
243: }
244:
1.110 perry 245: static inline void
1.97 yamt 246: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
247: void *obj)
248: {
1.135 yamt 249: unsigned int idx = pr_item_notouch_index(pp, ph, obj);
250: pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
251: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
1.97 yamt 252:
1.135 yamt 253: KASSERT((*bitmap & mask) == 0);
254: *bitmap |= mask;
1.97 yamt 255: }
256:
1.110 perry 257: static inline void *
1.97 yamt 258: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
259: {
1.135 yamt 260: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
261: unsigned int idx;
262: int i;
1.97 yamt 263:
1.135 yamt 264: for (i = 0; ; i++) {
265: int bit;
1.97 yamt 266:
1.135 yamt 267: KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
268: bit = ffs32(bitmap[i]);
269: if (bit) {
270: pool_item_bitmap_t mask;
271:
272: bit--;
273: idx = (i * BITMAP_SIZE) + bit;
1.222 ! kamil 274: mask = 1U << bit;
1.135 yamt 275: KASSERT((bitmap[i] & mask) != 0);
276: bitmap[i] &= ~mask;
277: break;
278: }
279: }
280: KASSERT(idx < pp->pr_itemsperpage);
1.128 christos 281: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 282: }
283:
1.135 yamt 284: static inline void
1.141 yamt 285: pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
1.135 yamt 286: {
287: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
288: const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
289: int i;
290:
291: for (i = 0; i < n; i++) {
292: bitmap[i] = (pool_item_bitmap_t)-1;
293: }
294: }
295:
1.110 perry 296: static inline int
1.88 chs 297: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
298: {
1.121 yamt 299:
300: /*
301: * we consider pool_item_header with smaller ph_page bigger.
302: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
303: */
304:
1.88 chs 305: if (a->ph_page < b->ph_page)
1.121 yamt 306: return (1);
307: else if (a->ph_page > b->ph_page)
1.88 chs 308: return (-1);
309: else
310: return (0);
311: }
312:
313: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
314: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
315:
1.141 yamt 316: static inline struct pool_item_header *
317: pr_find_pagehead_noalign(struct pool *pp, void *v)
318: {
319: struct pool_item_header *ph, tmp;
320:
321: tmp.ph_page = (void *)(uintptr_t)v;
322: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
323: if (ph == NULL) {
324: ph = SPLAY_ROOT(&pp->pr_phtree);
325: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
326: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
327: }
328: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
329: }
330:
331: return ph;
332: }
333:
1.3 pk 334: /*
1.121 yamt 335: * Return the pool page header based on item address.
1.3 pk 336: */
1.110 perry 337: static inline struct pool_item_header *
1.121 yamt 338: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 339: {
1.88 chs 340: struct pool_item_header *ph, tmp;
1.3 pk 341:
1.121 yamt 342: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.141 yamt 343: ph = pr_find_pagehead_noalign(pp, v);
1.121 yamt 344: } else {
1.128 christos 345: void *page =
346: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 347:
348: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 349: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 350: } else {
351: tmp.ph_page = page;
352: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
353: }
354: }
1.3 pk 355:
1.121 yamt 356: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 357: ((char *)ph->ph_page <= (char *)v &&
358: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 359: return ph;
1.3 pk 360: }
361:
1.101 thorpej 362: static void
363: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
364: {
365: struct pool_item_header *ph;
366:
367: while ((ph = LIST_FIRST(pq)) != NULL) {
368: LIST_REMOVE(ph, ph_pagelist);
369: pool_allocator_free(pp, ph->ph_page);
1.134 ad 370: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 371: pool_put(pp->pr_phpool, ph);
372: }
373: }
374:
1.3 pk 375: /*
376: * Remove a page from the pool.
377: */
1.110 perry 378: static inline void
1.61 chs 379: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
380: struct pool_pagelist *pq)
1.3 pk 381: {
382:
1.134 ad 383: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 384:
1.3 pk 385: /*
1.7 thorpej 386: * If the page was idle, decrement the idle page count.
1.3 pk 387: */
1.6 thorpej 388: if (ph->ph_nmissing == 0) {
1.207 riastrad 389: KASSERT(pp->pr_nidle != 0);
390: KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage),
391: "nitems=%u < itemsperpage=%u",
392: pp->pr_nitems, pp->pr_itemsperpage);
1.6 thorpej 393: pp->pr_nidle--;
394: }
1.7 thorpej 395:
1.20 thorpej 396: pp->pr_nitems -= pp->pr_itemsperpage;
397:
1.7 thorpej 398: /*
1.101 thorpej 399: * Unlink the page from the pool and queue it for release.
1.7 thorpej 400: */
1.88 chs 401: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 402: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
403: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 404: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
405:
1.7 thorpej 406: pp->pr_npages--;
407: pp->pr_npagefree++;
1.6 thorpej 408:
1.88 chs 409: pool_update_curpage(pp);
1.3 pk 410: }
411:
412: /*
1.94 simonb 413: * Initialize all the pools listed in the "pools" link set.
414: */
415: void
1.117 yamt 416: pool_subsystem_init(void)
1.94 simonb 417: {
1.192 rmind 418: size_t size;
1.191 para 419: int idx;
1.94 simonb 420:
1.134 ad 421: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
1.179 mlelstv 422: mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
1.134 ad 423: cv_init(&pool_busy, "poolbusy");
424:
1.191 para 425: /*
426: * Initialize private page header pool and cache magazine pool if we
427: * haven't done so yet.
428: */
429: for (idx = 0; idx < PHPOOL_MAX; idx++) {
430: static char phpool_names[PHPOOL_MAX][6+1+6+1];
431: int nelem;
432: size_t sz;
433:
434: nelem = PHPOOL_FREELIST_NELEM(idx);
435: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
436: "phpool-%d", nelem);
437: sz = sizeof(struct pool_item_header);
438: if (nelem) {
439: sz = offsetof(struct pool_item_header,
440: ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
441: }
442: pool_init(&phpool[idx], sz, 0, 0, 0,
443: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.117 yamt 444: }
1.191 para 445: #ifdef POOL_SUBPAGE
446: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
447: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
448: #endif
449:
450: size = sizeof(pcg_t) +
451: (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
452: pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
453: "pcgnormal", &pool_allocator_meta, IPL_VM);
454:
455: size = sizeof(pcg_t) +
456: (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
457: pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
458: "pcglarge", &pool_allocator_meta, IPL_VM);
1.134 ad 459:
1.156 ad 460: pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
1.191 para 461: 0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
1.134 ad 462:
1.156 ad 463: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
1.191 para 464: 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
1.94 simonb 465: }
466:
467: /*
1.3 pk 468: * Initialize the given pool resource structure.
469: *
470: * We export this routine to allow other kernel parts to declare
1.195 rmind 471: * static pools that must be initialized before kmem(9) is available.
1.3 pk 472: */
473: void
1.42 thorpej 474: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.129 ad 475: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 476: {
1.116 simonb 477: struct pool *pp1;
1.204 maxv 478: size_t trysize, phsize, prsize;
1.134 ad 479: int off, slack;
1.3 pk 480:
1.116 simonb 481: #ifdef DEBUG
1.198 christos 482: if (__predict_true(!cold))
483: mutex_enter(&pool_head_lock);
1.116 simonb 484: /*
485: * Check that the pool hasn't already been initialised and
486: * added to the list of all pools.
487: */
1.145 ad 488: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
1.116 simonb 489: if (pp == pp1)
1.213 christos 490: panic("%s: [%s] already initialised", __func__,
1.116 simonb 491: wchan);
492: }
1.198 christos 493: if (__predict_true(!cold))
494: mutex_exit(&pool_head_lock);
1.116 simonb 495: #endif
496:
1.66 thorpej 497: if (palloc == NULL)
498: palloc = &pool_allocator_kmem;
1.112 bjh21 499: #ifdef POOL_SUBPAGE
500: if (size > palloc->pa_pagesz) {
501: if (palloc == &pool_allocator_kmem)
502: palloc = &pool_allocator_kmem_fullpage;
503: else if (palloc == &pool_allocator_nointr)
504: palloc = &pool_allocator_nointr_fullpage;
505: }
1.66 thorpej 506: #endif /* POOL_SUBPAGE */
1.180 mlelstv 507: if (!cold)
508: mutex_enter(&pool_allocator_lock);
1.178 elad 509: if (palloc->pa_refcnt++ == 0) {
1.112 bjh21 510: if (palloc->pa_pagesz == 0)
1.66 thorpej 511: palloc->pa_pagesz = PAGE_SIZE;
512:
513: TAILQ_INIT(&palloc->pa_list);
514:
1.134 ad 515: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 516: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
517: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.4 thorpej 518: }
1.180 mlelstv 519: if (!cold)
520: mutex_exit(&pool_allocator_lock);
1.3 pk 521:
522: if (align == 0)
523: align = ALIGN(1);
1.14 thorpej 524:
1.204 maxv 525: prsize = size;
526: if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
527: prsize = sizeof(struct pool_item);
1.3 pk 528:
1.204 maxv 529: prsize = roundup(prsize, align);
1.207 riastrad 530: KASSERTMSG((prsize <= palloc->pa_pagesz),
1.213 christos 531: "%s: [%s] pool item size (%zu) larger than page size (%u)",
532: __func__, wchan, prsize, palloc->pa_pagesz);
1.35 pk 533:
1.3 pk 534: /*
535: * Initialize the pool structure.
536: */
1.88 chs 537: LIST_INIT(&pp->pr_emptypages);
538: LIST_INIT(&pp->pr_fullpages);
539: LIST_INIT(&pp->pr_partpages);
1.134 ad 540: pp->pr_cache = NULL;
1.3 pk 541: pp->pr_curpage = NULL;
542: pp->pr_npages = 0;
543: pp->pr_minitems = 0;
544: pp->pr_minpages = 0;
545: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 546: pp->pr_roflags = flags;
547: pp->pr_flags = 0;
1.204 maxv 548: pp->pr_size = prsize;
1.3 pk 549: pp->pr_align = align;
550: pp->pr_wchan = wchan;
1.66 thorpej 551: pp->pr_alloc = palloc;
1.20 thorpej 552: pp->pr_nitems = 0;
553: pp->pr_nout = 0;
554: pp->pr_hardlimit = UINT_MAX;
555: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 556: pp->pr_hardlimit_ratecap.tv_sec = 0;
557: pp->pr_hardlimit_ratecap.tv_usec = 0;
558: pp->pr_hardlimit_warning_last.tv_sec = 0;
559: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 560: pp->pr_drain_hook = NULL;
561: pp->pr_drain_hook_arg = NULL;
1.125 ad 562: pp->pr_freecheck = NULL;
1.204 maxv 563: pool_redzone_init(pp, size);
1.3 pk 564:
565: /*
566: * Decide whether to put the page header off page to avoid
1.92 enami 567: * wasting too large a part of the page or too big item.
568: * Off-page page headers go on a hash table, so we can match
569: * a returned item with its header based on the page address.
570: * We use 1/16 of the page size and about 8 times of the item
571: * size as the threshold (XXX: tune)
572: *
573: * However, we'll put the header into the page if we can put
574: * it without wasting any items.
575: *
576: * Silently enforce `0 <= ioff < align'.
1.3 pk 577: */
1.92 enami 578: pp->pr_itemoffset = ioff %= align;
579: /* See the comment below about reserved bytes. */
580: trysize = palloc->pa_pagesz - ((align - ioff) % align);
581: phsize = ALIGN(sizeof(struct pool_item_header));
1.201 para 582: if (pp->pr_roflags & PR_PHINPAGE ||
583: ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 584: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
1.201 para 585: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) {
1.3 pk 586: /* Use the end of the page for the page header */
1.20 thorpej 587: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 588: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 589: } else {
1.3 pk 590: /* The page header will be taken from our page header pool */
591: pp->pr_phoffset = 0;
1.66 thorpej 592: off = palloc->pa_pagesz;
1.88 chs 593: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 594: }
1.1 pk 595:
1.3 pk 596: /*
597: * Alignment is to take place at `ioff' within the item. This means
598: * we must reserve up to `align - 1' bytes on the page to allow
599: * appropriate positioning of each item.
600: */
601: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 602: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 603: if ((pp->pr_roflags & PR_NOTOUCH)) {
604: int idx;
605:
606: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
607: idx++) {
608: /* nothing */
609: }
610: if (idx >= PHPOOL_MAX) {
611: /*
612: * if you see this panic, consider to tweak
613: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
614: */
1.213 christos 615: panic("%s: [%s] too large itemsperpage(%d) for "
616: "PR_NOTOUCH", __func__,
1.97 yamt 617: pp->pr_wchan, pp->pr_itemsperpage);
618: }
619: pp->pr_phpool = &phpool[idx];
620: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
621: pp->pr_phpool = &phpool[0];
622: }
623: #if defined(DIAGNOSTIC)
624: else {
625: pp->pr_phpool = NULL;
626: }
627: #endif
1.3 pk 628:
629: /*
630: * Use the slack between the chunks and the page header
631: * for "cache coloring".
632: */
633: slack = off - pp->pr_itemsperpage * pp->pr_size;
634: pp->pr_maxcolor = (slack / align) * align;
635: pp->pr_curcolor = 0;
636:
637: pp->pr_nget = 0;
638: pp->pr_nfail = 0;
639: pp->pr_nput = 0;
640: pp->pr_npagealloc = 0;
641: pp->pr_npagefree = 0;
1.1 pk 642: pp->pr_hiwat = 0;
1.8 thorpej 643: pp->pr_nidle = 0;
1.134 ad 644: pp->pr_refcnt = 0;
1.3 pk 645:
1.157 ad 646: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
1.134 ad 647: cv_init(&pp->pr_cv, wchan);
648: pp->pr_ipl = ipl;
1.1 pk 649:
1.145 ad 650: /* Insert into the list of all pools. */
1.181 mlelstv 651: if (!cold)
1.134 ad 652: mutex_enter(&pool_head_lock);
1.145 ad 653: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
654: if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
655: break;
656: }
657: if (pp1 == NULL)
658: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
659: else
660: TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
1.181 mlelstv 661: if (!cold)
1.134 ad 662: mutex_exit(&pool_head_lock);
663:
1.167 skrll 664: /* Insert this into the list of pools using this allocator. */
1.181 mlelstv 665: if (!cold)
1.134 ad 666: mutex_enter(&palloc->pa_lock);
1.145 ad 667: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
1.181 mlelstv 668: if (!cold)
1.134 ad 669: mutex_exit(&palloc->pa_lock);
1.1 pk 670: }
671:
672: /*
673: * De-commision a pool resource.
674: */
675: void
1.42 thorpej 676: pool_destroy(struct pool *pp)
1.1 pk 677: {
1.101 thorpej 678: struct pool_pagelist pq;
1.3 pk 679: struct pool_item_header *ph;
1.43 thorpej 680:
1.101 thorpej 681: /* Remove from global pool list */
1.134 ad 682: mutex_enter(&pool_head_lock);
683: while (pp->pr_refcnt != 0)
684: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 685: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.101 thorpej 686: if (drainpp == pp)
687: drainpp = NULL;
1.134 ad 688: mutex_exit(&pool_head_lock);
1.101 thorpej 689:
690: /* Remove this pool from its allocator's list of pools. */
1.134 ad 691: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 692: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.134 ad 693: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 694:
1.178 elad 695: mutex_enter(&pool_allocator_lock);
696: if (--pp->pr_alloc->pa_refcnt == 0)
697: mutex_destroy(&pp->pr_alloc->pa_lock);
698: mutex_exit(&pool_allocator_lock);
699:
1.134 ad 700: mutex_enter(&pp->pr_lock);
1.101 thorpej 701:
1.134 ad 702: KASSERT(pp->pr_cache == NULL);
1.207 riastrad 703: KASSERTMSG((pp->pr_nout == 0),
1.213 christos 704: "%s: pool busy: still out: %u", __func__, pp->pr_nout);
1.101 thorpej 705: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
706: KASSERT(LIST_EMPTY(&pp->pr_partpages));
707:
1.3 pk 708: /* Remove all pages */
1.101 thorpej 709: LIST_INIT(&pq);
1.88 chs 710: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 711: pr_rmpage(pp, ph, &pq);
712:
1.134 ad 713: mutex_exit(&pp->pr_lock);
1.3 pk 714:
1.101 thorpej 715: pr_pagelist_free(pp, &pq);
1.134 ad 716: cv_destroy(&pp->pr_cv);
717: mutex_destroy(&pp->pr_lock);
1.1 pk 718: }
719:
1.68 thorpej 720: void
721: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
722: {
723:
724: /* XXX no locking -- must be used just after pool_init() */
1.207 riastrad 725: KASSERTMSG((pp->pr_drain_hook == NULL),
1.213 christos 726: "%s: [%s] already set", __func__, pp->pr_wchan);
1.68 thorpej 727: pp->pr_drain_hook = fn;
728: pp->pr_drain_hook_arg = arg;
729: }
730:
1.88 chs 731: static struct pool_item_header *
1.128 christos 732: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 733: {
734: struct pool_item_header *ph;
735:
736: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.213 christos 737: ph = (void *)((char *)storage + pp->pr_phoffset);
1.134 ad 738: else
1.97 yamt 739: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 740:
741: return (ph);
742: }
1.1 pk 743:
744: /*
1.134 ad 745: * Grab an item from the pool.
1.1 pk 746: */
1.3 pk 747: void *
1.56 sommerfe 748: pool_get(struct pool *pp, int flags)
1.1 pk 749: {
750: struct pool_item *pi;
1.3 pk 751: struct pool_item_header *ph;
1.55 thorpej 752: void *v;
1.1 pk 753:
1.215 christos 754: KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
1.207 riastrad 755: KASSERTMSG((pp->pr_itemsperpage != 0),
1.213 christos 756: "%s: [%s] pr_itemsperpage is zero, "
757: "pool not initialized?", __func__, pp->pr_wchan);
1.207 riastrad 758: KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p())
759: || pp->pr_ipl != IPL_NONE || cold || panicstr != NULL),
1.213 christos 760: "%s: [%s] is IPL_NONE, but called from interrupt context",
761: __func__, pp->pr_wchan);
1.155 ad 762: if (flags & PR_WAITOK) {
1.154 yamt 763: ASSERT_SLEEPABLE();
1.155 ad 764: }
1.1 pk 765:
1.134 ad 766: mutex_enter(&pp->pr_lock);
1.20 thorpej 767: startover:
768: /*
769: * Check to see if we've reached the hard limit. If we have,
770: * and we can wait, then wait until an item has been returned to
771: * the pool.
772: */
1.207 riastrad 773: KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit),
1.213 christos 774: "%s: %s: crossed hard limit", __func__, pp->pr_wchan);
1.34 thorpej 775: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 776: if (pp->pr_drain_hook != NULL) {
777: /*
778: * Since the drain hook is going to free things
779: * back to the pool, unlock, call the hook, re-lock,
780: * and check the hardlimit condition again.
781: */
1.134 ad 782: mutex_exit(&pp->pr_lock);
1.68 thorpej 783: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.134 ad 784: mutex_enter(&pp->pr_lock);
1.68 thorpej 785: if (pp->pr_nout < pp->pr_hardlimit)
786: goto startover;
787: }
788:
1.29 sommerfe 789: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 790: /*
791: * XXX: A warning isn't logged in this case. Should
792: * it be?
793: */
794: pp->pr_flags |= PR_WANTED;
1.212 christos 795: do {
796: cv_wait(&pp->pr_cv, &pp->pr_lock);
797: } while (pp->pr_flags & PR_WANTED);
1.20 thorpej 798: goto startover;
799: }
1.31 thorpej 800:
801: /*
802: * Log a message that the hard limit has been hit.
803: */
804: if (pp->pr_hardlimit_warning != NULL &&
805: ratecheck(&pp->pr_hardlimit_warning_last,
806: &pp->pr_hardlimit_ratecap))
807: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 808:
809: pp->pr_nfail++;
810:
1.134 ad 811: mutex_exit(&pp->pr_lock);
1.216 christos 812: KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
1.20 thorpej 813: return (NULL);
814: }
815:
1.3 pk 816: /*
817: * The convention we use is that if `curpage' is not NULL, then
818: * it points at a non-empty bucket. In particular, `curpage'
819: * never points at a page header which has PR_PHINPAGE set and
820: * has no items in its bucket.
821: */
1.20 thorpej 822: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 823: int error;
824:
1.207 riastrad 825: KASSERTMSG((pp->pr_nitems == 0),
1.213 christos 826: "%s: [%s] curpage NULL, inconsistent nitems %u",
827: __func__, pp->pr_wchan, pp->pr_nitems);
1.20 thorpej 828:
1.21 thorpej 829: /*
830: * Call the back-end page allocator for more memory.
831: * Release the pool lock, as the back-end page allocator
832: * may block.
833: */
1.113 yamt 834: error = pool_grow(pp, flags);
835: if (error != 0) {
1.21 thorpej 836: /*
1.210 mlelstv 837: * pool_grow aborts when another thread
838: * is allocating a new page. Retry if it
839: * waited for it.
840: */
841: if (error == ERESTART)
842: goto startover;
843:
844: /*
1.55 thorpej 845: * We were unable to allocate a page or item
846: * header, but we released the lock during
847: * allocation, so perhaps items were freed
848: * back to the pool. Check for this case.
1.21 thorpej 849: */
850: if (pp->pr_curpage != NULL)
851: goto startover;
1.15 pk 852:
1.117 yamt 853: pp->pr_nfail++;
1.134 ad 854: mutex_exit(&pp->pr_lock);
1.211 riastrad 855: KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT);
1.117 yamt 856: return (NULL);
1.1 pk 857: }
1.3 pk 858:
1.20 thorpej 859: /* Start the allocation process over. */
860: goto startover;
1.3 pk 861: }
1.97 yamt 862: if (pp->pr_roflags & PR_NOTOUCH) {
1.207 riastrad 863: KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage),
1.213 christos 864: "%s: %s: page empty", __func__, pp->pr_wchan);
1.97 yamt 865: v = pr_item_notouch_get(pp, ph);
866: } else {
1.102 chs 867: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 868: if (__predict_false(v == NULL)) {
1.134 ad 869: mutex_exit(&pp->pr_lock);
1.213 christos 870: panic("%s: [%s] page empty", __func__, pp->pr_wchan);
1.97 yamt 871: }
1.207 riastrad 872: KASSERTMSG((pp->pr_nitems > 0),
1.213 christos 873: "%s: [%s] nitems %u inconsistent on itemlist",
874: __func__, pp->pr_wchan, pp->pr_nitems);
1.207 riastrad 875: KASSERTMSG((pi->pi_magic == PI_MAGIC),
1.213 christos 876: "%s: [%s] free list modified: "
877: "magic=%x; page %p; item addr %p", __func__,
1.207 riastrad 878: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1.3 pk 879:
1.97 yamt 880: /*
881: * Remove from item list.
882: */
1.102 chs 883: LIST_REMOVE(pi, pi_list);
1.97 yamt 884: }
1.20 thorpej 885: pp->pr_nitems--;
886: pp->pr_nout++;
1.6 thorpej 887: if (ph->ph_nmissing == 0) {
1.207 riastrad 888: KASSERT(pp->pr_nidle > 0);
1.6 thorpej 889: pp->pr_nidle--;
1.88 chs 890:
891: /*
892: * This page was previously empty. Move it to the list of
893: * partially-full pages. This page is already curpage.
894: */
895: LIST_REMOVE(ph, ph_pagelist);
896: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 897: }
1.3 pk 898: ph->ph_nmissing++;
1.97 yamt 899: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.207 riastrad 900: KASSERTMSG(((pp->pr_roflags & PR_NOTOUCH) ||
901: LIST_EMPTY(&ph->ph_itemlist)),
1.213 christos 902: "%s: [%s] nmissing (%u) inconsistent", __func__,
903: pp->pr_wchan, ph->ph_nmissing);
1.3 pk 904: /*
1.88 chs 905: * This page is now full. Move it to the full list
906: * and select a new current page.
1.3 pk 907: */
1.88 chs 908: LIST_REMOVE(ph, ph_pagelist);
909: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
910: pool_update_curpage(pp);
1.1 pk 911: }
1.3 pk 912:
913: pp->pr_nget++;
1.20 thorpej 914:
915: /*
916: * If we have a low water mark and we are now below that low
917: * water mark, add more items to the pool.
918: */
1.53 thorpej 919: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 920: /*
921: * XXX: Should we log a warning? Should we set up a timeout
922: * to try again in a second or so? The latter could break
923: * a caller's assumptions about interrupt protection, etc.
924: */
925: }
926:
1.134 ad 927: mutex_exit(&pp->pr_lock);
1.125 ad 928: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
929: FREECHECK_OUT(&pp->pr_freecheck, v);
1.204 maxv 930: pool_redzone_fill(pp, v);
1.1 pk 931: return (v);
932: }
933:
934: /*
1.43 thorpej 935: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 936: */
1.43 thorpej 937: static void
1.101 thorpej 938: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 939: {
940: struct pool_item *pi = v;
1.3 pk 941: struct pool_item_header *ph;
942:
1.134 ad 943: KASSERT(mutex_owned(&pp->pr_lock));
1.204 maxv 944: pool_redzone_check(pp, v);
1.125 ad 945: FREECHECK_IN(&pp->pr_freecheck, v);
1.134 ad 946: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 947:
1.207 riastrad 948: KASSERTMSG((pp->pr_nout > 0),
1.213 christos 949: "%s: [%s] putting with none out", __func__, pp->pr_wchan);
1.3 pk 950:
1.121 yamt 951: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.213 christos 952: panic("%s: [%s] page header missing", __func__, pp->pr_wchan);
1.3 pk 953: }
1.28 thorpej 954:
1.3 pk 955: /*
956: * Return to item list.
957: */
1.97 yamt 958: if (pp->pr_roflags & PR_NOTOUCH) {
959: pr_item_notouch_put(pp, ph, v);
960: } else {
1.2 pk 961: #ifdef DIAGNOSTIC
1.97 yamt 962: pi->pi_magic = PI_MAGIC;
1.3 pk 963: #endif
1.32 chs 964: #ifdef DEBUG
1.97 yamt 965: {
966: int i, *ip = v;
1.32 chs 967:
1.97 yamt 968: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
969: *ip++ = PI_MAGIC;
970: }
1.32 chs 971: }
972: #endif
973:
1.102 chs 974: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 975: }
1.79 thorpej 976: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 977: ph->ph_nmissing--;
978: pp->pr_nput++;
1.20 thorpej 979: pp->pr_nitems++;
980: pp->pr_nout--;
1.3 pk 981:
982: /* Cancel "pool empty" condition if it exists */
983: if (pp->pr_curpage == NULL)
984: pp->pr_curpage = ph;
985:
986: if (pp->pr_flags & PR_WANTED) {
987: pp->pr_flags &= ~PR_WANTED;
1.134 ad 988: cv_broadcast(&pp->pr_cv);
1.3 pk 989: }
990:
991: /*
1.88 chs 992: * If this page is now empty, do one of two things:
1.21 thorpej 993: *
1.88 chs 994: * (1) If we have more pages than the page high water mark,
1.96 thorpej 995: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 996: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
997: * CLAIM.
1.21 thorpej 998: *
1.88 chs 999: * (2) Otherwise, move the page to the empty page list.
1000: *
1001: * Either way, select a new current page (so we use a partially-full
1002: * page if one is available).
1.3 pk 1003: */
1004: if (ph->ph_nmissing == 0) {
1.6 thorpej 1005: pp->pr_nidle++;
1.90 thorpej 1006: if (pp->pr_npages > pp->pr_minpages &&
1.152 yamt 1007: pp->pr_npages > pp->pr_maxpages) {
1.101 thorpej 1008: pr_rmpage(pp, ph, pq);
1.3 pk 1009: } else {
1.88 chs 1010: LIST_REMOVE(ph, ph_pagelist);
1011: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1012:
1.21 thorpej 1013: /*
1014: * Update the timestamp on the page. A page must
1015: * be idle for some period of time before it can
1016: * be reclaimed by the pagedaemon. This minimizes
1017: * ping-pong'ing for memory.
1.151 yamt 1018: *
1019: * note for 64-bit time_t: truncating to 32-bit is not
1020: * a problem for our usage.
1.21 thorpej 1021: */
1.151 yamt 1022: ph->ph_time = time_uptime;
1.1 pk 1023: }
1.88 chs 1024: pool_update_curpage(pp);
1.1 pk 1025: }
1.88 chs 1026:
1.21 thorpej 1027: /*
1.88 chs 1028: * If the page was previously completely full, move it to the
1029: * partially-full list and make it the current page. The next
1030: * allocation will get the item from this page, instead of
1031: * further fragmenting the pool.
1.21 thorpej 1032: */
1033: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1034: LIST_REMOVE(ph, ph_pagelist);
1035: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1036: pp->pr_curpage = ph;
1037: }
1.43 thorpej 1038: }
1039:
1.56 sommerfe 1040: void
1041: pool_put(struct pool *pp, void *v)
1042: {
1.101 thorpej 1043: struct pool_pagelist pq;
1044:
1045: LIST_INIT(&pq);
1.56 sommerfe 1046:
1.134 ad 1047: mutex_enter(&pp->pr_lock);
1.101 thorpej 1048: pool_do_put(pp, v, &pq);
1.134 ad 1049: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1050:
1.102 chs 1051: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1052: }
1.57 sommerfe 1053:
1.74 thorpej 1054: /*
1.113 yamt 1055: * pool_grow: grow a pool by a page.
1056: *
1057: * => called with pool locked.
1058: * => unlock and relock the pool.
1059: * => return with pool locked.
1060: */
1061:
1062: static int
1063: pool_grow(struct pool *pp, int flags)
1064: {
1.209 riastrad 1065: /*
1066: * If there's a pool_grow in progress, wait for it to complete
1067: * and try again from the top.
1068: */
1069: if (pp->pr_flags & PR_GROWING) {
1070: if (flags & PR_WAITOK) {
1071: do {
1072: cv_wait(&pp->pr_cv, &pp->pr_lock);
1073: } while (pp->pr_flags & PR_GROWING);
1074: return ERESTART;
1075: } else {
1.219 mrg 1076: if (pp->pr_flags & PR_GROWINGNOWAIT) {
1077: /*
1078: * This needs an unlock/relock dance so
1079: * that the other caller has a chance to
1080: * run and actually do the thing. Note
1081: * that this is effectively a busy-wait.
1082: */
1083: mutex_exit(&pp->pr_lock);
1084: mutex_enter(&pp->pr_lock);
1085: return ERESTART;
1086: }
1.209 riastrad 1087: return EWOULDBLOCK;
1088: }
1089: }
1090: pp->pr_flags |= PR_GROWING;
1.220 christos 1091: if (flags & PR_WAITOK)
1092: mutex_exit(&pp->pr_lock);
1093: else
1.219 mrg 1094: pp->pr_flags |= PR_GROWINGNOWAIT;
1.113 yamt 1095:
1.216 christos 1096: char *cp = pool_allocator_alloc(pp, flags);
1097: if (__predict_false(cp == NULL))
1098: goto out;
1099:
1100: struct pool_item_header *ph = pool_alloc_item_header(pp, cp, flags);
1101: if (__predict_false(ph == NULL)) {
1102: pool_allocator_free(pp, cp);
1.209 riastrad 1103: goto out;
1.113 yamt 1104: }
1105:
1.220 christos 1106: if (flags & PR_WAITOK)
1107: mutex_enter(&pp->pr_lock);
1.113 yamt 1108: pool_prime_page(pp, cp, ph);
1109: pp->pr_npagealloc++;
1.216 christos 1110: KASSERT(pp->pr_flags & PR_GROWING);
1.219 mrg 1111: pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
1.209 riastrad 1112: /*
1113: * If anyone was waiting for pool_grow, notify them that we
1114: * may have just done it.
1115: */
1.216 christos 1116: cv_broadcast(&pp->pr_cv);
1117: return 0;
1118: out:
1.220 christos 1119: if (flags & PR_WAITOK)
1120: mutex_enter(&pp->pr_lock);
1.209 riastrad 1121: KASSERT(pp->pr_flags & PR_GROWING);
1.219 mrg 1122: pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
1.216 christos 1123: return ENOMEM;
1.113 yamt 1124: }
1125:
1126: /*
1.74 thorpej 1127: * Add N items to the pool.
1128: */
1129: int
1130: pool_prime(struct pool *pp, int n)
1131: {
1.75 simonb 1132: int newpages;
1.113 yamt 1133: int error = 0;
1.74 thorpej 1134:
1.134 ad 1135: mutex_enter(&pp->pr_lock);
1.74 thorpej 1136:
1137: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1138:
1.216 christos 1139: while (newpages > 0) {
1.113 yamt 1140: error = pool_grow(pp, PR_NOWAIT);
1141: if (error) {
1.214 christos 1142: if (error == ERESTART)
1143: continue;
1.74 thorpej 1144: break;
1145: }
1146: pp->pr_minpages++;
1.216 christos 1147: newpages--;
1.74 thorpej 1148: }
1149:
1150: if (pp->pr_minpages >= pp->pr_maxpages)
1151: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1152:
1.134 ad 1153: mutex_exit(&pp->pr_lock);
1.113 yamt 1154: return error;
1.74 thorpej 1155: }
1.55 thorpej 1156:
1157: /*
1.3 pk 1158: * Add a page worth of items to the pool.
1.21 thorpej 1159: *
1160: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1161: */
1.55 thorpej 1162: static void
1.128 christos 1163: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1164: {
1165: struct pool_item *pi;
1.128 christos 1166: void *cp = storage;
1.125 ad 1167: const unsigned int align = pp->pr_align;
1168: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1169: int n;
1.36 pk 1170:
1.134 ad 1171: KASSERT(mutex_owned(&pp->pr_lock));
1.207 riastrad 1172: KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) ||
1173: (((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)),
1.213 christos 1174: "%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp);
1.3 pk 1175:
1176: /*
1177: * Insert page header.
1178: */
1.88 chs 1179: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1180: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1181: ph->ph_page = storage;
1182: ph->ph_nmissing = 0;
1.151 yamt 1183: ph->ph_time = time_uptime;
1.88 chs 1184: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1185: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1186:
1.6 thorpej 1187: pp->pr_nidle++;
1188:
1.3 pk 1189: /*
1190: * Color this page.
1191: */
1.141 yamt 1192: ph->ph_off = pp->pr_curcolor;
1193: cp = (char *)cp + ph->ph_off;
1.3 pk 1194: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1195: pp->pr_curcolor = 0;
1196:
1197: /*
1198: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1199: */
1200: if (ioff != 0)
1.128 christos 1201: cp = (char *)cp + align - ioff;
1.3 pk 1202:
1.125 ad 1203: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1204:
1.3 pk 1205: /*
1206: * Insert remaining chunks on the bucket list.
1207: */
1208: n = pp->pr_itemsperpage;
1.20 thorpej 1209: pp->pr_nitems += n;
1.3 pk 1210:
1.97 yamt 1211: if (pp->pr_roflags & PR_NOTOUCH) {
1.141 yamt 1212: pr_item_notouch_init(pp, ph);
1.97 yamt 1213: } else {
1214: while (n--) {
1215: pi = (struct pool_item *)cp;
1.78 thorpej 1216:
1.97 yamt 1217: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1218:
1.97 yamt 1219: /* Insert on page list */
1.102 chs 1220: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1221: #ifdef DIAGNOSTIC
1.97 yamt 1222: pi->pi_magic = PI_MAGIC;
1.3 pk 1223: #endif
1.128 christos 1224: cp = (char *)cp + pp->pr_size;
1.125 ad 1225:
1226: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1227: }
1.3 pk 1228: }
1229:
1230: /*
1231: * If the pool was depleted, point at the new page.
1232: */
1233: if (pp->pr_curpage == NULL)
1234: pp->pr_curpage = ph;
1235:
1236: if (++pp->pr_npages > pp->pr_hiwat)
1237: pp->pr_hiwat = pp->pr_npages;
1238: }
1239:
1.20 thorpej 1240: /*
1.52 thorpej 1241: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1242: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1243: *
1.21 thorpej 1244: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1245: *
1.73 thorpej 1246: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1247: * with it locked.
1248: */
1249: static int
1.42 thorpej 1250: pool_catchup(struct pool *pp)
1.20 thorpej 1251: {
1252: int error = 0;
1253:
1.54 thorpej 1254: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1255: error = pool_grow(pp, PR_NOWAIT);
1256: if (error) {
1.214 christos 1257: if (error == ERESTART)
1258: continue;
1.20 thorpej 1259: break;
1260: }
1261: }
1.113 yamt 1262: return error;
1.20 thorpej 1263: }
1264:
1.88 chs 1265: static void
1266: pool_update_curpage(struct pool *pp)
1267: {
1268:
1269: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1270: if (pp->pr_curpage == NULL) {
1271: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1272: }
1.168 yamt 1273: KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1274: (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1.88 chs 1275: }
1276:
1.3 pk 1277: void
1.42 thorpej 1278: pool_setlowat(struct pool *pp, int n)
1.3 pk 1279: {
1.15 pk 1280:
1.134 ad 1281: mutex_enter(&pp->pr_lock);
1.21 thorpej 1282:
1.3 pk 1283: pp->pr_minitems = n;
1.15 pk 1284: pp->pr_minpages = (n == 0)
1285: ? 0
1.18 thorpej 1286: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1287:
1288: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1289: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1290: /*
1291: * XXX: Should we log a warning? Should we set up a timeout
1292: * to try again in a second or so? The latter could break
1293: * a caller's assumptions about interrupt protection, etc.
1294: */
1295: }
1.21 thorpej 1296:
1.134 ad 1297: mutex_exit(&pp->pr_lock);
1.3 pk 1298: }
1299:
1300: void
1.42 thorpej 1301: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1302: {
1.15 pk 1303:
1.134 ad 1304: mutex_enter(&pp->pr_lock);
1.21 thorpej 1305:
1.15 pk 1306: pp->pr_maxpages = (n == 0)
1307: ? 0
1.18 thorpej 1308: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1309:
1.134 ad 1310: mutex_exit(&pp->pr_lock);
1.3 pk 1311: }
1312:
1.20 thorpej 1313: void
1.42 thorpej 1314: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1315: {
1316:
1.134 ad 1317: mutex_enter(&pp->pr_lock);
1.20 thorpej 1318:
1319: pp->pr_hardlimit = n;
1320: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1321: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1322: pp->pr_hardlimit_warning_last.tv_sec = 0;
1323: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1324:
1325: /*
1.21 thorpej 1326: * In-line version of pool_sethiwat(), because we don't want to
1327: * release the lock.
1.20 thorpej 1328: */
1329: pp->pr_maxpages = (n == 0)
1330: ? 0
1331: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1332:
1.134 ad 1333: mutex_exit(&pp->pr_lock);
1.20 thorpej 1334: }
1.3 pk 1335:
1336: /*
1337: * Release all complete pages that have not been used recently.
1.184 rmind 1338: *
1.197 jym 1339: * Must not be called from interrupt context.
1.3 pk 1340: */
1.66 thorpej 1341: int
1.56 sommerfe 1342: pool_reclaim(struct pool *pp)
1.3 pk 1343: {
1344: struct pool_item_header *ph, *phnext;
1.61 chs 1345: struct pool_pagelist pq;
1.151 yamt 1346: uint32_t curtime;
1.134 ad 1347: bool klock;
1348: int rv;
1.3 pk 1349:
1.197 jym 1350: KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1.184 rmind 1351:
1.68 thorpej 1352: if (pp->pr_drain_hook != NULL) {
1353: /*
1354: * The drain hook must be called with the pool unlocked.
1355: */
1356: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1357: }
1358:
1.134 ad 1359: /*
1.157 ad 1360: * XXXSMP Because we do not want to cause non-MPSAFE code
1361: * to block.
1.134 ad 1362: */
1363: if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1364: pp->pr_ipl == IPL_SOFTSERIAL) {
1365: KERNEL_LOCK(1, NULL);
1366: klock = true;
1367: } else
1368: klock = false;
1369:
1370: /* Reclaim items from the pool's cache (if any). */
1371: if (pp->pr_cache != NULL)
1372: pool_cache_invalidate(pp->pr_cache);
1373:
1374: if (mutex_tryenter(&pp->pr_lock) == 0) {
1375: if (klock) {
1376: KERNEL_UNLOCK_ONE(NULL);
1377: }
1.66 thorpej 1378: return (0);
1.134 ad 1379: }
1.68 thorpej 1380:
1.88 chs 1381: LIST_INIT(&pq);
1.43 thorpej 1382:
1.151 yamt 1383: curtime = time_uptime;
1.21 thorpej 1384:
1.88 chs 1385: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1386: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1387:
1388: /* Check our minimum page claim */
1389: if (pp->pr_npages <= pp->pr_minpages)
1390: break;
1391:
1.88 chs 1392: KASSERT(ph->ph_nmissing == 0);
1.191 para 1393: if (curtime - ph->ph_time < pool_inactive_time)
1.88 chs 1394: continue;
1.21 thorpej 1395:
1.88 chs 1396: /*
1397: * If freeing this page would put us below
1398: * the low water mark, stop now.
1399: */
1400: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1401: pp->pr_minitems)
1402: break;
1.21 thorpej 1403:
1.88 chs 1404: pr_rmpage(pp, ph, &pq);
1.3 pk 1405: }
1406:
1.134 ad 1407: mutex_exit(&pp->pr_lock);
1408:
1409: if (LIST_EMPTY(&pq))
1410: rv = 0;
1411: else {
1412: pr_pagelist_free(pp, &pq);
1413: rv = 1;
1414: }
1415:
1416: if (klock) {
1417: KERNEL_UNLOCK_ONE(NULL);
1418: }
1.66 thorpej 1419:
1.134 ad 1420: return (rv);
1.3 pk 1421: }
1422:
1423: /*
1.197 jym 1424: * Drain pools, one at a time. The drained pool is returned within ppp.
1.131 ad 1425: *
1.134 ad 1426: * Note, must never be called from interrupt context.
1.3 pk 1427: */
1.197 jym 1428: bool
1429: pool_drain(struct pool **ppp)
1.3 pk 1430: {
1.197 jym 1431: bool reclaimed;
1.3 pk 1432: struct pool *pp;
1.134 ad 1433:
1.145 ad 1434: KASSERT(!TAILQ_EMPTY(&pool_head));
1.3 pk 1435:
1.61 chs 1436: pp = NULL;
1.134 ad 1437:
1438: /* Find next pool to drain, and add a reference. */
1439: mutex_enter(&pool_head_lock);
1440: do {
1441: if (drainpp == NULL) {
1.145 ad 1442: drainpp = TAILQ_FIRST(&pool_head);
1.134 ad 1443: }
1444: if (drainpp != NULL) {
1445: pp = drainpp;
1.145 ad 1446: drainpp = TAILQ_NEXT(pp, pr_poollist);
1.134 ad 1447: }
1448: /*
1449: * Skip completely idle pools. We depend on at least
1450: * one pool in the system being active.
1451: */
1452: } while (pp == NULL || pp->pr_npages == 0);
1453: pp->pr_refcnt++;
1454: mutex_exit(&pool_head_lock);
1455:
1456: /* Drain the cache (if any) and pool.. */
1.186 pooka 1457: reclaimed = pool_reclaim(pp);
1.134 ad 1458:
1459: /* Finally, unlock the pool. */
1460: mutex_enter(&pool_head_lock);
1461: pp->pr_refcnt--;
1462: cv_broadcast(&pool_busy);
1463: mutex_exit(&pool_head_lock);
1.186 pooka 1464:
1.197 jym 1465: if (ppp != NULL)
1466: *ppp = pp;
1467:
1.186 pooka 1468: return reclaimed;
1.3 pk 1469: }
1470:
1471: /*
1.217 mrg 1472: * Calculate the total number of pages consumed by pools.
1473: */
1474: int
1475: pool_totalpages(void)
1476: {
1477: struct pool *pp;
1.218 mrg 1478: uint64_t total = 0;
1.217 mrg 1479:
1480: mutex_enter(&pool_head_lock);
1.218 mrg 1481: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1482: uint64_t bytes = pp->pr_npages * pp->pr_alloc->pa_pagesz;
1483:
1484: if ((pp->pr_roflags & PR_RECURSIVE) != 0)
1485: bytes -= (pp->pr_nout * pp->pr_size);
1486: total += bytes;
1487: }
1.217 mrg 1488: mutex_exit(&pool_head_lock);
1489:
1.218 mrg 1490: return atop(total);
1.217 mrg 1491: }
1492:
1493: /*
1.3 pk 1494: * Diagnostic helpers.
1495: */
1.21 thorpej 1496:
1.25 thorpej 1497: void
1.108 yamt 1498: pool_printall(const char *modif, void (*pr)(const char *, ...))
1499: {
1500: struct pool *pp;
1501:
1.145 ad 1502: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.108 yamt 1503: pool_printit(pp, modif, pr);
1504: }
1505: }
1506:
1507: void
1.42 thorpej 1508: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1509: {
1510:
1511: if (pp == NULL) {
1512: (*pr)("Must specify a pool to print.\n");
1513: return;
1514: }
1515:
1516: pool_print1(pp, modif, pr);
1517: }
1518:
1.21 thorpej 1519: static void
1.124 yamt 1520: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1521: void (*pr)(const char *, ...))
1.88 chs 1522: {
1523: struct pool_item_header *ph;
1.207 riastrad 1524: struct pool_item *pi __diagused;
1.88 chs 1525:
1526: LIST_FOREACH(ph, pl, ph_pagelist) {
1.151 yamt 1527: (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1528: ph->ph_page, ph->ph_nmissing, ph->ph_time);
1.88 chs 1529: #ifdef DIAGNOSTIC
1.97 yamt 1530: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1531: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1532: if (pi->pi_magic != PI_MAGIC) {
1533: (*pr)("\t\t\titem %p, magic 0x%x\n",
1534: pi, pi->pi_magic);
1535: }
1.88 chs 1536: }
1537: }
1538: #endif
1539: }
1540: }
1541:
1542: static void
1.42 thorpej 1543: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1544: {
1.25 thorpej 1545: struct pool_item_header *ph;
1.134 ad 1546: pool_cache_t pc;
1547: pcg_t *pcg;
1548: pool_cache_cpu_t *cc;
1549: uint64_t cpuhit, cpumiss;
1.44 thorpej 1550: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1551: char c;
1552:
1553: while ((c = *modif++) != '\0') {
1554: if (c == 'l')
1555: print_log = 1;
1556: if (c == 'p')
1557: print_pagelist = 1;
1.44 thorpej 1558: if (c == 'c')
1559: print_cache = 1;
1.25 thorpej 1560: }
1561:
1.134 ad 1562: if ((pc = pp->pr_cache) != NULL) {
1563: (*pr)("POOL CACHE");
1564: } else {
1565: (*pr)("POOL");
1566: }
1567:
1568: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1569: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1570: pp->pr_roflags);
1.66 thorpej 1571: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1572: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1573: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1574: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1575: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1576:
1.134 ad 1577: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1578: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1579: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1580: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1581:
1582: if (print_pagelist == 0)
1583: goto skip_pagelist;
1584:
1.88 chs 1585: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1586: (*pr)("\n\tempty page list:\n");
1.97 yamt 1587: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1588: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1589: (*pr)("\n\tfull page list:\n");
1.97 yamt 1590: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1591: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1592: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1593: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1594:
1.25 thorpej 1595: if (pp->pr_curpage == NULL)
1596: (*pr)("\tno current page\n");
1597: else
1598: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1599:
1600: skip_pagelist:
1601: if (print_log == 0)
1602: goto skip_log;
1603:
1604: (*pr)("\n");
1.3 pk 1605:
1.25 thorpej 1606: skip_log:
1.44 thorpej 1607:
1.102 chs 1608: #define PR_GROUPLIST(pcg) \
1609: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1.142 ad 1610: for (i = 0; i < pcg->pcg_size; i++) { \
1.102 chs 1611: if (pcg->pcg_objects[i].pcgo_pa != \
1612: POOL_PADDR_INVALID) { \
1613: (*pr)("\t\t\t%p, 0x%llx\n", \
1614: pcg->pcg_objects[i].pcgo_va, \
1615: (unsigned long long) \
1616: pcg->pcg_objects[i].pcgo_pa); \
1617: } else { \
1618: (*pr)("\t\t\t%p\n", \
1619: pcg->pcg_objects[i].pcgo_va); \
1620: } \
1621: }
1622:
1.134 ad 1623: if (pc != NULL) {
1624: cpuhit = 0;
1625: cpumiss = 0;
1.183 ad 1626: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.134 ad 1627: if ((cc = pc->pc_cpus[i]) == NULL)
1628: continue;
1629: cpuhit += cc->cc_hits;
1630: cpumiss += cc->cc_misses;
1631: }
1632: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1633: (*pr)("\tcache layer hits %llu misses %llu\n",
1634: pc->pc_hits, pc->pc_misses);
1635: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1636: pc->pc_hits + pc->pc_misses - pc->pc_contended,
1637: pc->pc_contended);
1638: (*pr)("\tcache layer empty groups %u full groups %u\n",
1639: pc->pc_nempty, pc->pc_nfull);
1640: if (print_cache) {
1641: (*pr)("\tfull cache groups:\n");
1642: for (pcg = pc->pc_fullgroups; pcg != NULL;
1643: pcg = pcg->pcg_next) {
1644: PR_GROUPLIST(pcg);
1645: }
1646: (*pr)("\tempty cache groups:\n");
1647: for (pcg = pc->pc_emptygroups; pcg != NULL;
1648: pcg = pcg->pcg_next) {
1649: PR_GROUPLIST(pcg);
1650: }
1.103 chs 1651: }
1.44 thorpej 1652: }
1.102 chs 1653: #undef PR_GROUPLIST
1.88 chs 1654: }
1655:
1656: static int
1657: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1658: {
1659: struct pool_item *pi;
1.128 christos 1660: void *page;
1.88 chs 1661: int n;
1662:
1.121 yamt 1663: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1664: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1665: if (page != ph->ph_page &&
1666: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1667: if (label != NULL)
1668: printf("%s: ", label);
1669: printf("pool(%p:%s): page inconsistency: page %p;"
1670: " at page head addr %p (p %p)\n", pp,
1671: pp->pr_wchan, ph->ph_page,
1672: ph, page);
1673: return 1;
1674: }
1.88 chs 1675: }
1.3 pk 1676:
1.97 yamt 1677: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1678: return 0;
1679:
1.102 chs 1680: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1681: pi != NULL;
1.102 chs 1682: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1683:
1684: #ifdef DIAGNOSTIC
1685: if (pi->pi_magic != PI_MAGIC) {
1686: if (label != NULL)
1687: printf("%s: ", label);
1688: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1689: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1690: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1691: n, pi);
1.88 chs 1692: panic("pool");
1693: }
1694: #endif
1.121 yamt 1695: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1696: continue;
1697: }
1.128 christos 1698: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1699: if (page == ph->ph_page)
1700: continue;
1701:
1702: if (label != NULL)
1703: printf("%s: ", label);
1704: printf("pool(%p:%s): page inconsistency: page %p;"
1705: " item ordinal %d; addr %p (p %p)\n", pp,
1706: pp->pr_wchan, ph->ph_page,
1707: n, pi, page);
1708: return 1;
1709: }
1710: return 0;
1.3 pk 1711: }
1712:
1.88 chs 1713:
1.3 pk 1714: int
1.42 thorpej 1715: pool_chk(struct pool *pp, const char *label)
1.3 pk 1716: {
1717: struct pool_item_header *ph;
1718: int r = 0;
1719:
1.134 ad 1720: mutex_enter(&pp->pr_lock);
1.88 chs 1721: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1722: r = pool_chk_page(pp, label, ph);
1723: if (r) {
1724: goto out;
1725: }
1726: }
1727: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1728: r = pool_chk_page(pp, label, ph);
1729: if (r) {
1.3 pk 1730: goto out;
1731: }
1.88 chs 1732: }
1733: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1734: r = pool_chk_page(pp, label, ph);
1735: if (r) {
1.3 pk 1736: goto out;
1737: }
1738: }
1.88 chs 1739:
1.3 pk 1740: out:
1.134 ad 1741: mutex_exit(&pp->pr_lock);
1.3 pk 1742: return (r);
1.43 thorpej 1743: }
1744:
1745: /*
1746: * pool_cache_init:
1747: *
1748: * Initialize a pool cache.
1.134 ad 1749: */
1750: pool_cache_t
1751: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
1752: const char *wchan, struct pool_allocator *palloc, int ipl,
1753: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
1754: {
1755: pool_cache_t pc;
1756:
1757: pc = pool_get(&cache_pool, PR_WAITOK);
1758: if (pc == NULL)
1759: return NULL;
1760:
1761: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
1762: palloc, ipl, ctor, dtor, arg);
1763:
1764: return pc;
1765: }
1766:
1767: /*
1768: * pool_cache_bootstrap:
1.43 thorpej 1769: *
1.134 ad 1770: * Kernel-private version of pool_cache_init(). The caller
1771: * provides initial storage.
1.43 thorpej 1772: */
1773: void
1.134 ad 1774: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
1775: u_int align_offset, u_int flags, const char *wchan,
1776: struct pool_allocator *palloc, int ipl,
1777: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 1778: void *arg)
1779: {
1.134 ad 1780: CPU_INFO_ITERATOR cii;
1.145 ad 1781: pool_cache_t pc1;
1.134 ad 1782: struct cpu_info *ci;
1783: struct pool *pp;
1784:
1785: pp = &pc->pc_pool;
1.208 chs 1786: if (palloc == NULL && ipl == IPL_NONE) {
1787: if (size > PAGE_SIZE) {
1788: int bigidx = pool_bigidx(size);
1789:
1790: palloc = &pool_allocator_big[bigidx];
1791: } else
1792: palloc = &pool_allocator_nointr;
1793: }
1.134 ad 1794: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.157 ad 1795: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1.43 thorpej 1796:
1.134 ad 1797: if (ctor == NULL) {
1798: ctor = (int (*)(void *, void *, int))nullop;
1799: }
1800: if (dtor == NULL) {
1801: dtor = (void (*)(void *, void *))nullop;
1802: }
1.43 thorpej 1803:
1.134 ad 1804: pc->pc_emptygroups = NULL;
1805: pc->pc_fullgroups = NULL;
1806: pc->pc_partgroups = NULL;
1.43 thorpej 1807: pc->pc_ctor = ctor;
1808: pc->pc_dtor = dtor;
1809: pc->pc_arg = arg;
1.134 ad 1810: pc->pc_hits = 0;
1.48 thorpej 1811: pc->pc_misses = 0;
1.134 ad 1812: pc->pc_nempty = 0;
1813: pc->pc_npart = 0;
1814: pc->pc_nfull = 0;
1815: pc->pc_contended = 0;
1816: pc->pc_refcnt = 0;
1.136 yamt 1817: pc->pc_freecheck = NULL;
1.134 ad 1818:
1.142 ad 1819: if ((flags & PR_LARGECACHE) != 0) {
1820: pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
1.163 ad 1821: pc->pc_pcgpool = &pcg_large_pool;
1.142 ad 1822: } else {
1823: pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
1.163 ad 1824: pc->pc_pcgpool = &pcg_normal_pool;
1.142 ad 1825: }
1826:
1.134 ad 1827: /* Allocate per-CPU caches. */
1828: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
1829: pc->pc_ncpu = 0;
1.139 ad 1830: if (ncpu < 2) {
1.137 ad 1831: /* XXX For sparc: boot CPU is not attached yet. */
1832: pool_cache_cpu_init1(curcpu(), pc);
1833: } else {
1834: for (CPU_INFO_FOREACH(cii, ci)) {
1835: pool_cache_cpu_init1(ci, pc);
1836: }
1.134 ad 1837: }
1.145 ad 1838:
1839: /* Add to list of all pools. */
1840: if (__predict_true(!cold))
1.134 ad 1841: mutex_enter(&pool_head_lock);
1.145 ad 1842: TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
1843: if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
1844: break;
1845: }
1846: if (pc1 == NULL)
1847: TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
1848: else
1849: TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
1850: if (__predict_true(!cold))
1.134 ad 1851: mutex_exit(&pool_head_lock);
1.145 ad 1852:
1853: membar_sync();
1854: pp->pr_cache = pc;
1.43 thorpej 1855: }
1856:
1857: /*
1858: * pool_cache_destroy:
1859: *
1860: * Destroy a pool cache.
1861: */
1862: void
1.134 ad 1863: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 1864: {
1.191 para 1865:
1866: pool_cache_bootstrap_destroy(pc);
1867: pool_put(&cache_pool, pc);
1868: }
1869:
1870: /*
1871: * pool_cache_bootstrap_destroy:
1872: *
1873: * Destroy a pool cache.
1874: */
1875: void
1876: pool_cache_bootstrap_destroy(pool_cache_t pc)
1877: {
1.134 ad 1878: struct pool *pp = &pc->pc_pool;
1.175 jym 1879: u_int i;
1.134 ad 1880:
1881: /* Remove it from the global list. */
1882: mutex_enter(&pool_head_lock);
1883: while (pc->pc_refcnt != 0)
1884: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 1885: TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1.134 ad 1886: mutex_exit(&pool_head_lock);
1.43 thorpej 1887:
1888: /* First, invalidate the entire cache. */
1889: pool_cache_invalidate(pc);
1890:
1.134 ad 1891: /* Disassociate it from the pool. */
1892: mutex_enter(&pp->pr_lock);
1893: pp->pr_cache = NULL;
1894: mutex_exit(&pp->pr_lock);
1895:
1896: /* Destroy per-CPU data */
1.183 ad 1897: for (i = 0; i < __arraycount(pc->pc_cpus); i++)
1.175 jym 1898: pool_cache_invalidate_cpu(pc, i);
1.134 ad 1899:
1900: /* Finally, destroy it. */
1901: mutex_destroy(&pc->pc_lock);
1902: pool_destroy(pp);
1903: }
1904:
1905: /*
1906: * pool_cache_cpu_init1:
1907: *
1908: * Called for each pool_cache whenever a new CPU is attached.
1909: */
1910: static void
1911: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
1912: {
1913: pool_cache_cpu_t *cc;
1.137 ad 1914: int index;
1.134 ad 1915:
1.137 ad 1916: index = ci->ci_index;
1917:
1.183 ad 1918: KASSERT(index < __arraycount(pc->pc_cpus));
1.134 ad 1919:
1.137 ad 1920: if ((cc = pc->pc_cpus[index]) != NULL) {
1921: KASSERT(cc->cc_cpuindex == index);
1.134 ad 1922: return;
1923: }
1924:
1925: /*
1926: * The first CPU is 'free'. This needs to be the case for
1927: * bootstrap - we may not be able to allocate yet.
1928: */
1929: if (pc->pc_ncpu == 0) {
1930: cc = &pc->pc_cpu0;
1931: pc->pc_ncpu = 1;
1932: } else {
1933: mutex_enter(&pc->pc_lock);
1934: pc->pc_ncpu++;
1935: mutex_exit(&pc->pc_lock);
1936: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
1937: }
1938:
1939: cc->cc_ipl = pc->pc_pool.pr_ipl;
1940: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
1941: cc->cc_cache = pc;
1.137 ad 1942: cc->cc_cpuindex = index;
1.134 ad 1943: cc->cc_hits = 0;
1944: cc->cc_misses = 0;
1.169 yamt 1945: cc->cc_current = __UNCONST(&pcg_dummy);
1946: cc->cc_previous = __UNCONST(&pcg_dummy);
1.134 ad 1947:
1.137 ad 1948: pc->pc_cpus[index] = cc;
1.43 thorpej 1949: }
1950:
1.134 ad 1951: /*
1952: * pool_cache_cpu_init:
1953: *
1954: * Called whenever a new CPU is attached.
1955: */
1956: void
1957: pool_cache_cpu_init(struct cpu_info *ci)
1.43 thorpej 1958: {
1.134 ad 1959: pool_cache_t pc;
1960:
1961: mutex_enter(&pool_head_lock);
1.145 ad 1962: TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1.134 ad 1963: pc->pc_refcnt++;
1964: mutex_exit(&pool_head_lock);
1.43 thorpej 1965:
1.134 ad 1966: pool_cache_cpu_init1(ci, pc);
1.43 thorpej 1967:
1.134 ad 1968: mutex_enter(&pool_head_lock);
1969: pc->pc_refcnt--;
1970: cv_broadcast(&pool_busy);
1971: }
1972: mutex_exit(&pool_head_lock);
1.43 thorpej 1973: }
1974:
1.134 ad 1975: /*
1976: * pool_cache_reclaim:
1977: *
1978: * Reclaim memory from a pool cache.
1979: */
1980: bool
1981: pool_cache_reclaim(pool_cache_t pc)
1.43 thorpej 1982: {
1983:
1.134 ad 1984: return pool_reclaim(&pc->pc_pool);
1985: }
1.43 thorpej 1986:
1.136 yamt 1987: static void
1988: pool_cache_destruct_object1(pool_cache_t pc, void *object)
1989: {
1990:
1991: (*pc->pc_dtor)(pc->pc_arg, object);
1992: pool_put(&pc->pc_pool, object);
1993: }
1994:
1.134 ad 1995: /*
1996: * pool_cache_destruct_object:
1997: *
1998: * Force destruction of an object and its release back into
1999: * the pool.
2000: */
2001: void
2002: pool_cache_destruct_object(pool_cache_t pc, void *object)
2003: {
2004:
1.136 yamt 2005: FREECHECK_IN(&pc->pc_freecheck, object);
2006:
2007: pool_cache_destruct_object1(pc, object);
1.43 thorpej 2008: }
2009:
1.134 ad 2010: /*
2011: * pool_cache_invalidate_groups:
2012: *
2013: * Invalidate a chain of groups and destruct all objects.
2014: */
1.102 chs 2015: static void
1.134 ad 2016: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1.102 chs 2017: {
1.134 ad 2018: void *object;
2019: pcg_t *next;
2020: int i;
2021:
2022: for (; pcg != NULL; pcg = next) {
2023: next = pcg->pcg_next;
2024:
2025: for (i = 0; i < pcg->pcg_avail; i++) {
2026: object = pcg->pcg_objects[i].pcgo_va;
1.136 yamt 2027: pool_cache_destruct_object1(pc, object);
1.134 ad 2028: }
1.102 chs 2029:
1.142 ad 2030: if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
2031: pool_put(&pcg_large_pool, pcg);
2032: } else {
2033: KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
2034: pool_put(&pcg_normal_pool, pcg);
2035: }
1.102 chs 2036: }
2037: }
2038:
1.43 thorpej 2039: /*
1.134 ad 2040: * pool_cache_invalidate:
1.43 thorpej 2041: *
1.134 ad 2042: * Invalidate a pool cache (destruct and release all of the
2043: * cached objects). Does not reclaim objects from the pool.
1.176 thorpej 2044: *
2045: * Note: For pool caches that provide constructed objects, there
2046: * is an assumption that another level of synchronization is occurring
2047: * between the input to the constructor and the cache invalidation.
1.196 jym 2048: *
2049: * Invalidation is a costly process and should not be called from
2050: * interrupt context.
1.43 thorpej 2051: */
1.134 ad 2052: void
2053: pool_cache_invalidate(pool_cache_t pc)
2054: {
1.196 jym 2055: uint64_t where;
1.134 ad 2056: pcg_t *full, *empty, *part;
1.196 jym 2057:
2058: KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1.176 thorpej 2059:
1.177 jym 2060: if (ncpu < 2 || !mp_online) {
1.176 thorpej 2061: /*
2062: * We might be called early enough in the boot process
2063: * for the CPU data structures to not be fully initialized.
1.196 jym 2064: * In this case, transfer the content of the local CPU's
2065: * cache back into global cache as only this CPU is currently
2066: * running.
1.176 thorpej 2067: */
1.196 jym 2068: pool_cache_transfer(pc);
1.176 thorpej 2069: } else {
2070: /*
1.196 jym 2071: * Signal all CPUs that they must transfer their local
2072: * cache back to the global pool then wait for the xcall to
2073: * complete.
1.176 thorpej 2074: */
1.196 jym 2075: where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
2076: pc, NULL);
1.176 thorpej 2077: xc_wait(where);
2078: }
1.196 jym 2079:
2080: /* Empty pool caches, then invalidate objects */
1.134 ad 2081: mutex_enter(&pc->pc_lock);
2082: full = pc->pc_fullgroups;
2083: empty = pc->pc_emptygroups;
2084: part = pc->pc_partgroups;
2085: pc->pc_fullgroups = NULL;
2086: pc->pc_emptygroups = NULL;
2087: pc->pc_partgroups = NULL;
2088: pc->pc_nfull = 0;
2089: pc->pc_nempty = 0;
2090: pc->pc_npart = 0;
2091: mutex_exit(&pc->pc_lock);
2092:
2093: pool_cache_invalidate_groups(pc, full);
2094: pool_cache_invalidate_groups(pc, empty);
2095: pool_cache_invalidate_groups(pc, part);
2096: }
2097:
1.175 jym 2098: /*
2099: * pool_cache_invalidate_cpu:
2100: *
2101: * Invalidate all CPU-bound cached objects in pool cache, the CPU being
2102: * identified by its associated index.
2103: * It is caller's responsibility to ensure that no operation is
2104: * taking place on this pool cache while doing this invalidation.
2105: * WARNING: as no inter-CPU locking is enforced, trying to invalidate
2106: * pool cached objects from a CPU different from the one currently running
2107: * may result in an undefined behaviour.
2108: */
2109: static void
2110: pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2111: {
2112: pool_cache_cpu_t *cc;
2113: pcg_t *pcg;
2114:
2115: if ((cc = pc->pc_cpus[index]) == NULL)
2116: return;
2117:
2118: if ((pcg = cc->cc_current) != &pcg_dummy) {
2119: pcg->pcg_next = NULL;
2120: pool_cache_invalidate_groups(pc, pcg);
2121: }
2122: if ((pcg = cc->cc_previous) != &pcg_dummy) {
2123: pcg->pcg_next = NULL;
2124: pool_cache_invalidate_groups(pc, pcg);
2125: }
2126: if (cc != &pc->pc_cpu0)
2127: pool_put(&cache_cpu_pool, cc);
2128:
2129: }
2130:
1.134 ad 2131: void
2132: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2133: {
2134:
2135: pool_set_drain_hook(&pc->pc_pool, fn, arg);
2136: }
2137:
2138: void
2139: pool_cache_setlowat(pool_cache_t pc, int n)
2140: {
2141:
2142: pool_setlowat(&pc->pc_pool, n);
2143: }
2144:
2145: void
2146: pool_cache_sethiwat(pool_cache_t pc, int n)
2147: {
2148:
2149: pool_sethiwat(&pc->pc_pool, n);
2150: }
2151:
2152: void
2153: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2154: {
2155:
2156: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2157: }
2158:
1.162 ad 2159: static bool __noinline
2160: pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
1.134 ad 2161: paddr_t *pap, int flags)
1.43 thorpej 2162: {
1.134 ad 2163: pcg_t *pcg, *cur;
2164: uint64_t ncsw;
2165: pool_cache_t pc;
1.43 thorpej 2166: void *object;
1.58 thorpej 2167:
1.168 yamt 2168: KASSERT(cc->cc_current->pcg_avail == 0);
2169: KASSERT(cc->cc_previous->pcg_avail == 0);
2170:
1.134 ad 2171: pc = cc->cc_cache;
2172: cc->cc_misses++;
1.43 thorpej 2173:
1.134 ad 2174: /*
2175: * Nothing was available locally. Try and grab a group
2176: * from the cache.
2177: */
1.162 ad 2178: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.134 ad 2179: ncsw = curlwp->l_ncsw;
2180: mutex_enter(&pc->pc_lock);
2181: pc->pc_contended++;
1.43 thorpej 2182:
1.134 ad 2183: /*
2184: * If we context switched while locking, then
2185: * our view of the per-CPU data is invalid:
2186: * retry.
2187: */
2188: if (curlwp->l_ncsw != ncsw) {
2189: mutex_exit(&pc->pc_lock);
1.162 ad 2190: return true;
1.43 thorpej 2191: }
1.102 chs 2192: }
1.43 thorpej 2193:
1.162 ad 2194: if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
1.43 thorpej 2195: /*
1.134 ad 2196: * If there's a full group, release our empty
2197: * group back to the cache. Install the full
2198: * group as cc_current and return.
1.43 thorpej 2199: */
1.162 ad 2200: if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
1.134 ad 2201: KASSERT(cur->pcg_avail == 0);
2202: cur->pcg_next = pc->pc_emptygroups;
2203: pc->pc_emptygroups = cur;
2204: pc->pc_nempty++;
1.87 thorpej 2205: }
1.142 ad 2206: KASSERT(pcg->pcg_avail == pcg->pcg_size);
1.134 ad 2207: cc->cc_current = pcg;
2208: pc->pc_fullgroups = pcg->pcg_next;
2209: pc->pc_hits++;
2210: pc->pc_nfull--;
2211: mutex_exit(&pc->pc_lock);
1.162 ad 2212: return true;
1.134 ad 2213: }
2214:
2215: /*
2216: * Nothing available locally or in cache. Take the slow
2217: * path: fetch a new object from the pool and construct
2218: * it.
2219: */
2220: pc->pc_misses++;
2221: mutex_exit(&pc->pc_lock);
1.162 ad 2222: splx(s);
1.134 ad 2223:
2224: object = pool_get(&pc->pc_pool, flags);
2225: *objectp = object;
1.211 riastrad 2226: if (__predict_false(object == NULL)) {
2227: KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT);
1.162 ad 2228: return false;
1.211 riastrad 2229: }
1.125 ad 2230:
1.162 ad 2231: if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
1.134 ad 2232: pool_put(&pc->pc_pool, object);
2233: *objectp = NULL;
1.162 ad 2234: return false;
1.43 thorpej 2235: }
2236:
1.134 ad 2237: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2238: (pc->pc_pool.pr_align - 1)) == 0);
1.43 thorpej 2239:
1.134 ad 2240: if (pap != NULL) {
2241: #ifdef POOL_VTOPHYS
2242: *pap = POOL_VTOPHYS(object);
2243: #else
2244: *pap = POOL_PADDR_INVALID;
2245: #endif
1.102 chs 2246: }
1.43 thorpej 2247:
1.125 ad 2248: FREECHECK_OUT(&pc->pc_freecheck, object);
1.204 maxv 2249: pool_redzone_fill(&pc->pc_pool, object);
1.162 ad 2250: return false;
1.43 thorpej 2251: }
2252:
2253: /*
1.134 ad 2254: * pool_cache_get{,_paddr}:
1.43 thorpej 2255: *
1.134 ad 2256: * Get an object from a pool cache (optionally returning
2257: * the physical address of the object).
1.43 thorpej 2258: */
1.134 ad 2259: void *
2260: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.43 thorpej 2261: {
1.134 ad 2262: pool_cache_cpu_t *cc;
2263: pcg_t *pcg;
2264: void *object;
1.60 thorpej 2265: int s;
1.43 thorpej 2266:
1.215 christos 2267: KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
1.184 rmind 2268: KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
1.185 rmind 2269: (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
1.213 christos 2270: "%s: [%s] is IPL_NONE, but called from interrupt context",
2271: __func__, pc->pc_pool.pr_wchan);
1.184 rmind 2272:
1.155 ad 2273: if (flags & PR_WAITOK) {
1.154 yamt 2274: ASSERT_SLEEPABLE();
1.155 ad 2275: }
1.125 ad 2276:
1.162 ad 2277: /* Lock out interrupts and disable preemption. */
2278: s = splvm();
1.165 yamt 2279: while (/* CONSTCOND */ true) {
1.134 ad 2280: /* Try and allocate an object from the current group. */
1.162 ad 2281: cc = pc->pc_cpus[curcpu()->ci_index];
2282: KASSERT(cc->cc_cache == pc);
1.134 ad 2283: pcg = cc->cc_current;
1.162 ad 2284: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2285: object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
1.162 ad 2286: if (__predict_false(pap != NULL))
1.134 ad 2287: *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
1.148 yamt 2288: #if defined(DIAGNOSTIC)
1.134 ad 2289: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
1.163 ad 2290: KASSERT(pcg->pcg_avail < pcg->pcg_size);
1.134 ad 2291: KASSERT(object != NULL);
1.163 ad 2292: #endif
1.134 ad 2293: cc->cc_hits++;
1.162 ad 2294: splx(s);
1.134 ad 2295: FREECHECK_OUT(&pc->pc_freecheck, object);
1.204 maxv 2296: pool_redzone_fill(&pc->pc_pool, object);
1.134 ad 2297: return object;
1.43 thorpej 2298: }
2299:
2300: /*
1.134 ad 2301: * That failed. If the previous group isn't empty, swap
2302: * it with the current group and allocate from there.
1.43 thorpej 2303: */
1.134 ad 2304: pcg = cc->cc_previous;
1.162 ad 2305: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2306: cc->cc_previous = cc->cc_current;
2307: cc->cc_current = pcg;
2308: continue;
1.43 thorpej 2309: }
2310:
1.134 ad 2311: /*
2312: * Can't allocate from either group: try the slow path.
2313: * If get_slow() allocated an object for us, or if
1.162 ad 2314: * no more objects are available, it will return false.
1.134 ad 2315: * Otherwise, we need to retry.
2316: */
1.165 yamt 2317: if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2318: break;
2319: }
1.43 thorpej 2320:
1.211 riastrad 2321: /*
2322: * We would like to KASSERT(object || (flags & PR_NOWAIT)), but
2323: * pool_cache_get can fail even in the PR_WAITOK case, if the
2324: * constructor fails.
2325: */
1.134 ad 2326: return object;
1.51 thorpej 2327: }
2328:
1.162 ad 2329: static bool __noinline
2330: pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
1.51 thorpej 2331: {
1.200 pooka 2332: struct lwp *l = curlwp;
1.163 ad 2333: pcg_t *pcg, *cur;
1.134 ad 2334: uint64_t ncsw;
2335: pool_cache_t pc;
1.51 thorpej 2336:
1.168 yamt 2337: KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2338: KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2339:
1.134 ad 2340: pc = cc->cc_cache;
1.171 ad 2341: pcg = NULL;
1.134 ad 2342: cc->cc_misses++;
1.200 pooka 2343: ncsw = l->l_ncsw;
1.43 thorpej 2344:
1.171 ad 2345: /*
2346: * If there are no empty groups in the cache then allocate one
2347: * while still unlocked.
2348: */
2349: if (__predict_false(pc->pc_emptygroups == NULL)) {
2350: if (__predict_true(!pool_cache_disable)) {
2351: pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2352: }
1.200 pooka 2353: /*
2354: * If pool_get() blocked, then our view of
2355: * the per-CPU data is invalid: retry.
2356: */
2357: if (__predict_false(l->l_ncsw != ncsw)) {
2358: if (pcg != NULL) {
2359: pool_put(pc->pc_pcgpool, pcg);
2360: }
2361: return true;
2362: }
1.171 ad 2363: if (__predict_true(pcg != NULL)) {
2364: pcg->pcg_avail = 0;
2365: pcg->pcg_size = pc->pc_pcgsize;
2366: }
2367: }
2368:
1.162 ad 2369: /* Lock the cache. */
2370: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.134 ad 2371: mutex_enter(&pc->pc_lock);
2372: pc->pc_contended++;
1.162 ad 2373:
1.163 ad 2374: /*
2375: * If we context switched while locking, then our view of
2376: * the per-CPU data is invalid: retry.
2377: */
1.200 pooka 2378: if (__predict_false(l->l_ncsw != ncsw)) {
1.163 ad 2379: mutex_exit(&pc->pc_lock);
1.171 ad 2380: if (pcg != NULL) {
2381: pool_put(pc->pc_pcgpool, pcg);
2382: }
1.163 ad 2383: return true;
2384: }
1.162 ad 2385: }
1.102 chs 2386:
1.163 ad 2387: /* If there are no empty groups in the cache then allocate one. */
1.171 ad 2388: if (pcg == NULL && pc->pc_emptygroups != NULL) {
2389: pcg = pc->pc_emptygroups;
1.163 ad 2390: pc->pc_emptygroups = pcg->pcg_next;
2391: pc->pc_nempty--;
1.134 ad 2392: }
1.130 ad 2393:
1.162 ad 2394: /*
2395: * If there's a empty group, release our full group back
2396: * to the cache. Install the empty group to the local CPU
2397: * and return.
2398: */
1.163 ad 2399: if (pcg != NULL) {
1.134 ad 2400: KASSERT(pcg->pcg_avail == 0);
1.162 ad 2401: if (__predict_false(cc->cc_previous == &pcg_dummy)) {
1.146 ad 2402: cc->cc_previous = pcg;
2403: } else {
1.162 ad 2404: cur = cc->cc_current;
2405: if (__predict_true(cur != &pcg_dummy)) {
1.163 ad 2406: KASSERT(cur->pcg_avail == cur->pcg_size);
1.146 ad 2407: cur->pcg_next = pc->pc_fullgroups;
2408: pc->pc_fullgroups = cur;
2409: pc->pc_nfull++;
2410: }
2411: cc->cc_current = pcg;
2412: }
1.163 ad 2413: pc->pc_hits++;
1.134 ad 2414: mutex_exit(&pc->pc_lock);
1.162 ad 2415: return true;
1.102 chs 2416: }
1.105 christos 2417:
1.134 ad 2418: /*
1.162 ad 2419: * Nothing available locally or in cache, and we didn't
2420: * allocate an empty group. Take the slow path and destroy
2421: * the object here and now.
1.134 ad 2422: */
2423: pc->pc_misses++;
2424: mutex_exit(&pc->pc_lock);
1.162 ad 2425: splx(s);
2426: pool_cache_destruct_object(pc, object);
1.105 christos 2427:
1.162 ad 2428: return false;
1.134 ad 2429: }
1.102 chs 2430:
1.43 thorpej 2431: /*
1.134 ad 2432: * pool_cache_put{,_paddr}:
1.43 thorpej 2433: *
1.134 ad 2434: * Put an object back to the pool cache (optionally caching the
2435: * physical address of the object).
1.43 thorpej 2436: */
1.101 thorpej 2437: void
1.134 ad 2438: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2439: {
1.134 ad 2440: pool_cache_cpu_t *cc;
2441: pcg_t *pcg;
2442: int s;
1.101 thorpej 2443:
1.172 yamt 2444: KASSERT(object != NULL);
1.204 maxv 2445: pool_redzone_check(&pc->pc_pool, object);
1.134 ad 2446: FREECHECK_IN(&pc->pc_freecheck, object);
1.101 thorpej 2447:
1.162 ad 2448: /* Lock out interrupts and disable preemption. */
2449: s = splvm();
1.165 yamt 2450: while (/* CONSTCOND */ true) {
1.134 ad 2451: /* If the current group isn't full, release it there. */
1.162 ad 2452: cc = pc->pc_cpus[curcpu()->ci_index];
2453: KASSERT(cc->cc_cache == pc);
1.134 ad 2454: pcg = cc->cc_current;
1.162 ad 2455: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2456: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2457: pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2458: pcg->pcg_avail++;
2459: cc->cc_hits++;
1.162 ad 2460: splx(s);
1.134 ad 2461: return;
2462: }
1.43 thorpej 2463:
1.134 ad 2464: /*
1.162 ad 2465: * That failed. If the previous group isn't full, swap
1.134 ad 2466: * it with the current group and try again.
2467: */
2468: pcg = cc->cc_previous;
1.162 ad 2469: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2470: cc->cc_previous = cc->cc_current;
2471: cc->cc_current = pcg;
2472: continue;
2473: }
1.43 thorpej 2474:
1.134 ad 2475: /*
2476: * Can't free to either group: try the slow path.
2477: * If put_slow() releases the object for us, it
1.162 ad 2478: * will return false. Otherwise we need to retry.
1.134 ad 2479: */
1.165 yamt 2480: if (!pool_cache_put_slow(cc, s, object))
2481: break;
2482: }
1.43 thorpej 2483: }
2484:
2485: /*
1.196 jym 2486: * pool_cache_transfer:
1.43 thorpej 2487: *
1.134 ad 2488: * Transfer objects from the per-CPU cache to the global cache.
2489: * Run within a cross-call thread.
1.43 thorpej 2490: */
2491: static void
1.196 jym 2492: pool_cache_transfer(pool_cache_t pc)
1.43 thorpej 2493: {
1.134 ad 2494: pool_cache_cpu_t *cc;
2495: pcg_t *prev, *cur, **list;
1.162 ad 2496: int s;
1.134 ad 2497:
1.162 ad 2498: s = splvm();
2499: mutex_enter(&pc->pc_lock);
2500: cc = pc->pc_cpus[curcpu()->ci_index];
1.134 ad 2501: cur = cc->cc_current;
1.169 yamt 2502: cc->cc_current = __UNCONST(&pcg_dummy);
1.134 ad 2503: prev = cc->cc_previous;
1.169 yamt 2504: cc->cc_previous = __UNCONST(&pcg_dummy);
1.162 ad 2505: if (cur != &pcg_dummy) {
1.142 ad 2506: if (cur->pcg_avail == cur->pcg_size) {
1.134 ad 2507: list = &pc->pc_fullgroups;
2508: pc->pc_nfull++;
2509: } else if (cur->pcg_avail == 0) {
2510: list = &pc->pc_emptygroups;
2511: pc->pc_nempty++;
2512: } else {
2513: list = &pc->pc_partgroups;
2514: pc->pc_npart++;
2515: }
2516: cur->pcg_next = *list;
2517: *list = cur;
2518: }
1.162 ad 2519: if (prev != &pcg_dummy) {
1.142 ad 2520: if (prev->pcg_avail == prev->pcg_size) {
1.134 ad 2521: list = &pc->pc_fullgroups;
2522: pc->pc_nfull++;
2523: } else if (prev->pcg_avail == 0) {
2524: list = &pc->pc_emptygroups;
2525: pc->pc_nempty++;
2526: } else {
2527: list = &pc->pc_partgroups;
2528: pc->pc_npart++;
2529: }
2530: prev->pcg_next = *list;
2531: *list = prev;
2532: }
2533: mutex_exit(&pc->pc_lock);
2534: splx(s);
1.3 pk 2535: }
1.66 thorpej 2536:
2537: /*
2538: * Pool backend allocators.
2539: *
2540: * Each pool has a backend allocator that handles allocation, deallocation,
2541: * and any additional draining that might be needed.
2542: *
2543: * We provide two standard allocators:
2544: *
2545: * pool_allocator_kmem - the default when no allocator is specified
2546: *
2547: * pool_allocator_nointr - used for pools that will not be accessed
2548: * in interrupt context.
2549: */
2550: void *pool_page_alloc(struct pool *, int);
2551: void pool_page_free(struct pool *, void *);
2552:
1.112 bjh21 2553: #ifdef POOL_SUBPAGE
2554: struct pool_allocator pool_allocator_kmem_fullpage = {
1.192 rmind 2555: .pa_alloc = pool_page_alloc,
2556: .pa_free = pool_page_free,
2557: .pa_pagesz = 0
1.112 bjh21 2558: };
2559: #else
1.66 thorpej 2560: struct pool_allocator pool_allocator_kmem = {
1.191 para 2561: .pa_alloc = pool_page_alloc,
2562: .pa_free = pool_page_free,
2563: .pa_pagesz = 0
1.66 thorpej 2564: };
1.112 bjh21 2565: #endif
1.66 thorpej 2566:
1.112 bjh21 2567: #ifdef POOL_SUBPAGE
2568: struct pool_allocator pool_allocator_nointr_fullpage = {
1.194 para 2569: .pa_alloc = pool_page_alloc,
2570: .pa_free = pool_page_free,
1.192 rmind 2571: .pa_pagesz = 0
1.112 bjh21 2572: };
2573: #else
1.66 thorpej 2574: struct pool_allocator pool_allocator_nointr = {
1.191 para 2575: .pa_alloc = pool_page_alloc,
2576: .pa_free = pool_page_free,
2577: .pa_pagesz = 0
1.66 thorpej 2578: };
1.112 bjh21 2579: #endif
1.66 thorpej 2580:
2581: #ifdef POOL_SUBPAGE
2582: void *pool_subpage_alloc(struct pool *, int);
2583: void pool_subpage_free(struct pool *, void *);
2584:
1.112 bjh21 2585: struct pool_allocator pool_allocator_kmem = {
1.193 he 2586: .pa_alloc = pool_subpage_alloc,
2587: .pa_free = pool_subpage_free,
2588: .pa_pagesz = POOL_SUBPAGE
1.112 bjh21 2589: };
2590:
2591: struct pool_allocator pool_allocator_nointr = {
1.192 rmind 2592: .pa_alloc = pool_subpage_alloc,
2593: .pa_free = pool_subpage_free,
2594: .pa_pagesz = POOL_SUBPAGE
1.66 thorpej 2595: };
2596: #endif /* POOL_SUBPAGE */
2597:
1.208 chs 2598: struct pool_allocator pool_allocator_big[] = {
2599: {
2600: .pa_alloc = pool_page_alloc,
2601: .pa_free = pool_page_free,
2602: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 0),
2603: },
2604: {
2605: .pa_alloc = pool_page_alloc,
2606: .pa_free = pool_page_free,
2607: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 1),
2608: },
2609: {
2610: .pa_alloc = pool_page_alloc,
2611: .pa_free = pool_page_free,
2612: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 2),
2613: },
2614: {
2615: .pa_alloc = pool_page_alloc,
2616: .pa_free = pool_page_free,
2617: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 3),
2618: },
2619: {
2620: .pa_alloc = pool_page_alloc,
2621: .pa_free = pool_page_free,
2622: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 4),
2623: },
2624: {
2625: .pa_alloc = pool_page_alloc,
2626: .pa_free = pool_page_free,
2627: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 5),
2628: },
2629: {
2630: .pa_alloc = pool_page_alloc,
2631: .pa_free = pool_page_free,
2632: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 6),
2633: },
2634: {
2635: .pa_alloc = pool_page_alloc,
2636: .pa_free = pool_page_free,
2637: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 7),
2638: }
2639: };
2640:
2641: static int
2642: pool_bigidx(size_t size)
2643: {
2644: int i;
2645:
2646: for (i = 0; i < __arraycount(pool_allocator_big); i++) {
2647: if (1 << (i + POOL_ALLOCATOR_BIG_BASE) >= size)
2648: return i;
2649: }
2650: panic("pool item size %zu too large, use a custom allocator", size);
2651: }
2652:
1.117 yamt 2653: static void *
2654: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2655: {
1.117 yamt 2656: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2657: void *res;
2658:
1.117 yamt 2659: res = (*pa->pa_alloc)(pp, flags);
2660: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2661: /*
1.117 yamt 2662: * We only run the drain hook here if PR_NOWAIT.
2663: * In other cases, the hook will be run in
2664: * pool_reclaim().
1.66 thorpej 2665: */
1.117 yamt 2666: if (pp->pr_drain_hook != NULL) {
2667: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2668: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2669: }
1.117 yamt 2670: }
2671: return res;
1.66 thorpej 2672: }
2673:
1.117 yamt 2674: static void
1.66 thorpej 2675: pool_allocator_free(struct pool *pp, void *v)
2676: {
2677: struct pool_allocator *pa = pp->pr_alloc;
2678:
2679: (*pa->pa_free)(pp, v);
2680: }
2681:
2682: void *
1.124 yamt 2683: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2684: {
1.192 rmind 2685: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
1.191 para 2686: vmem_addr_t va;
1.192 rmind 2687: int ret;
1.191 para 2688:
1.192 rmind 2689: ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2690: vflags | VM_INSTANTFIT, &va);
1.66 thorpej 2691:
1.192 rmind 2692: return ret ? NULL : (void *)va;
1.66 thorpej 2693: }
2694:
2695: void
1.124 yamt 2696: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2697: {
2698:
1.191 para 2699: uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
1.98 yamt 2700: }
2701:
2702: static void *
1.124 yamt 2703: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2704: {
1.192 rmind 2705: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2706: vmem_addr_t va;
2707: int ret;
1.191 para 2708:
1.192 rmind 2709: ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2710: vflags | VM_INSTANTFIT, &va);
1.98 yamt 2711:
1.192 rmind 2712: return ret ? NULL : (void *)va;
1.98 yamt 2713: }
2714:
2715: static void
1.124 yamt 2716: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2717: {
2718:
1.192 rmind 2719: vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
1.66 thorpej 2720: }
2721:
1.204 maxv 2722: #ifdef POOL_REDZONE
2723: #if defined(_LP64)
2724: # define PRIME 0x9e37fffffffc0000UL
2725: #else /* defined(_LP64) */
2726: # define PRIME 0x9e3779b1
2727: #endif /* defined(_LP64) */
2728: #define STATIC_BYTE 0xFE
2729: CTASSERT(POOL_REDZONE_SIZE > 1);
2730:
2731: static inline uint8_t
2732: pool_pattern_generate(const void *p)
2733: {
2734: return (uint8_t)(((uintptr_t)p) * PRIME
2735: >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
2736: }
2737:
2738: static void
2739: pool_redzone_init(struct pool *pp, size_t requested_size)
2740: {
2741: size_t nsz;
2742:
2743: if (pp->pr_roflags & PR_NOTOUCH) {
2744: pp->pr_reqsize = 0;
2745: pp->pr_redzone = false;
2746: return;
2747: }
2748:
2749: /*
2750: * We may have extended the requested size earlier; check if
2751: * there's naturally space in the padding for a red zone.
2752: */
2753: if (pp->pr_size - requested_size >= POOL_REDZONE_SIZE) {
2754: pp->pr_reqsize = requested_size;
2755: pp->pr_redzone = true;
2756: return;
2757: }
2758:
2759: /*
2760: * No space in the natural padding; check if we can extend a
2761: * bit the size of the pool.
2762: */
2763: nsz = roundup(pp->pr_size + POOL_REDZONE_SIZE, pp->pr_align);
2764: if (nsz <= pp->pr_alloc->pa_pagesz) {
2765: /* Ok, we can */
2766: pp->pr_size = nsz;
2767: pp->pr_reqsize = requested_size;
2768: pp->pr_redzone = true;
2769: } else {
2770: /* No space for a red zone... snif :'( */
2771: pp->pr_reqsize = 0;
2772: pp->pr_redzone = false;
2773: printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
2774: }
2775: }
2776:
2777: static void
2778: pool_redzone_fill(struct pool *pp, void *p)
2779: {
2780: uint8_t *cp, pat;
2781: const uint8_t *ep;
2782:
2783: if (!pp->pr_redzone)
2784: return;
2785:
2786: cp = (uint8_t *)p + pp->pr_reqsize;
2787: ep = cp + POOL_REDZONE_SIZE;
2788:
2789: /*
2790: * We really don't want the first byte of the red zone to be '\0';
2791: * an off-by-one in a string may not be properly detected.
2792: */
2793: pat = pool_pattern_generate(cp);
2794: *cp = (pat == '\0') ? STATIC_BYTE: pat;
2795: cp++;
2796:
2797: while (cp < ep) {
2798: *cp = pool_pattern_generate(cp);
2799: cp++;
2800: }
2801: }
2802:
2803: static void
2804: pool_redzone_check(struct pool *pp, void *p)
2805: {
2806: uint8_t *cp, pat, expected;
2807: const uint8_t *ep;
2808:
2809: if (!pp->pr_redzone)
2810: return;
2811:
2812: cp = (uint8_t *)p + pp->pr_reqsize;
2813: ep = cp + POOL_REDZONE_SIZE;
2814:
2815: pat = pool_pattern_generate(cp);
2816: expected = (pat == '\0') ? STATIC_BYTE: pat;
2817: if (expected != *cp) {
2818: panic("%s: %p: 0x%02x != 0x%02x\n",
2819: __func__, cp, *cp, expected);
2820: }
2821: cp++;
2822:
2823: while (cp < ep) {
2824: expected = pool_pattern_generate(cp);
2825: if (*cp != expected) {
2826: panic("%s: %p: 0x%02x != 0x%02x\n",
2827: __func__, cp, *cp, expected);
2828: }
2829: cp++;
2830: }
2831: }
2832:
2833: #endif /* POOL_REDZONE */
2834:
2835:
1.66 thorpej 2836: #ifdef POOL_SUBPAGE
2837: /* Sub-page allocator, for machines with large hardware pages. */
2838: void *
2839: pool_subpage_alloc(struct pool *pp, int flags)
2840: {
1.134 ad 2841: return pool_get(&psppool, flags);
1.66 thorpej 2842: }
2843:
2844: void
2845: pool_subpage_free(struct pool *pp, void *v)
2846: {
2847: pool_put(&psppool, v);
2848: }
2849:
1.112 bjh21 2850: #endif /* POOL_SUBPAGE */
1.141 yamt 2851:
2852: #if defined(DDB)
2853: static bool
2854: pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2855: {
2856:
2857: return (uintptr_t)ph->ph_page <= addr &&
2858: addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2859: }
2860:
1.143 yamt 2861: static bool
2862: pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2863: {
2864:
2865: return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2866: }
2867:
2868: static bool
2869: pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2870: {
2871: int i;
2872:
2873: if (pcg == NULL) {
2874: return false;
2875: }
1.144 yamt 2876: for (i = 0; i < pcg->pcg_avail; i++) {
1.143 yamt 2877: if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2878: return true;
2879: }
2880: }
2881: return false;
2882: }
2883:
2884: static bool
2885: pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2886: {
2887:
2888: if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2889: unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2890: pool_item_bitmap_t *bitmap =
2891: ph->ph_bitmap + (idx / BITMAP_SIZE);
2892: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2893:
2894: return (*bitmap & mask) == 0;
2895: } else {
2896: struct pool_item *pi;
2897:
2898: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2899: if (pool_in_item(pp, pi, addr)) {
2900: return false;
2901: }
2902: }
2903: return true;
2904: }
2905: }
2906:
1.141 yamt 2907: void
2908: pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2909: {
2910: struct pool *pp;
2911:
1.145 ad 2912: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.141 yamt 2913: struct pool_item_header *ph;
2914: uintptr_t item;
1.143 yamt 2915: bool allocated = true;
2916: bool incache = false;
2917: bool incpucache = false;
2918: char cpucachestr[32];
1.141 yamt 2919:
2920: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2921: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2922: if (pool_in_page(pp, ph, addr)) {
2923: goto found;
2924: }
2925: }
2926: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2927: if (pool_in_page(pp, ph, addr)) {
1.143 yamt 2928: allocated =
2929: pool_allocated(pp, ph, addr);
2930: goto found;
2931: }
2932: }
2933: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2934: if (pool_in_page(pp, ph, addr)) {
2935: allocated = false;
1.141 yamt 2936: goto found;
2937: }
2938: }
2939: continue;
2940: } else {
2941: ph = pr_find_pagehead_noalign(pp, (void *)addr);
2942: if (ph == NULL || !pool_in_page(pp, ph, addr)) {
2943: continue;
2944: }
1.143 yamt 2945: allocated = pool_allocated(pp, ph, addr);
1.141 yamt 2946: }
2947: found:
1.143 yamt 2948: if (allocated && pp->pr_cache) {
2949: pool_cache_t pc = pp->pr_cache;
2950: struct pool_cache_group *pcg;
2951: int i;
2952:
2953: for (pcg = pc->pc_fullgroups; pcg != NULL;
2954: pcg = pcg->pcg_next) {
2955: if (pool_in_cg(pp, pcg, addr)) {
2956: incache = true;
2957: goto print;
2958: }
2959: }
1.183 ad 2960: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.143 yamt 2961: pool_cache_cpu_t *cc;
2962:
2963: if ((cc = pc->pc_cpus[i]) == NULL) {
2964: continue;
2965: }
2966: if (pool_in_cg(pp, cc->cc_current, addr) ||
2967: pool_in_cg(pp, cc->cc_previous, addr)) {
2968: struct cpu_info *ci =
1.170 ad 2969: cpu_lookup(i);
1.143 yamt 2970:
2971: incpucache = true;
2972: snprintf(cpucachestr,
2973: sizeof(cpucachestr),
2974: "cached by CPU %u",
1.153 martin 2975: ci->ci_index);
1.143 yamt 2976: goto print;
2977: }
2978: }
2979: }
2980: print:
1.141 yamt 2981: item = (uintptr_t)ph->ph_page + ph->ph_off;
2982: item = item + rounddown(addr - item, pp->pr_size);
1.143 yamt 2983: (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
1.141 yamt 2984: (void *)addr, item, (size_t)(addr - item),
1.143 yamt 2985: pp->pr_wchan,
2986: incpucache ? cpucachestr :
2987: incache ? "cached" : allocated ? "allocated" : "free");
1.141 yamt 2988: }
2989: }
2990: #endif /* defined(DDB) */
1.203 joerg 2991:
2992: static int
2993: pool_sysctl(SYSCTLFN_ARGS)
2994: {
2995: struct pool_sysctl data;
2996: struct pool *pp;
2997: struct pool_cache *pc;
2998: pool_cache_cpu_t *cc;
2999: int error;
3000: size_t i, written;
3001:
3002: if (oldp == NULL) {
3003: *oldlenp = 0;
3004: TAILQ_FOREACH(pp, &pool_head, pr_poollist)
3005: *oldlenp += sizeof(data);
3006: return 0;
3007: }
3008:
3009: memset(&data, 0, sizeof(data));
3010: error = 0;
3011: written = 0;
3012: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
3013: if (written + sizeof(data) > *oldlenp)
3014: break;
3015: strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
3016: data.pr_pagesize = pp->pr_alloc->pa_pagesz;
3017: data.pr_flags = pp->pr_roflags | pp->pr_flags;
3018: #define COPY(field) data.field = pp->field
3019: COPY(pr_size);
3020:
3021: COPY(pr_itemsperpage);
3022: COPY(pr_nitems);
3023: COPY(pr_nout);
3024: COPY(pr_hardlimit);
3025: COPY(pr_npages);
3026: COPY(pr_minpages);
3027: COPY(pr_maxpages);
3028:
3029: COPY(pr_nget);
3030: COPY(pr_nfail);
3031: COPY(pr_nput);
3032: COPY(pr_npagealloc);
3033: COPY(pr_npagefree);
3034: COPY(pr_hiwat);
3035: COPY(pr_nidle);
3036: #undef COPY
3037:
3038: data.pr_cache_nmiss_pcpu = 0;
3039: data.pr_cache_nhit_pcpu = 0;
3040: if (pp->pr_cache) {
3041: pc = pp->pr_cache;
3042: data.pr_cache_meta_size = pc->pc_pcgsize;
3043: data.pr_cache_nfull = pc->pc_nfull;
3044: data.pr_cache_npartial = pc->pc_npart;
3045: data.pr_cache_nempty = pc->pc_nempty;
3046: data.pr_cache_ncontended = pc->pc_contended;
3047: data.pr_cache_nmiss_global = pc->pc_misses;
3048: data.pr_cache_nhit_global = pc->pc_hits;
3049: for (i = 0; i < pc->pc_ncpu; ++i) {
3050: cc = pc->pc_cpus[i];
3051: if (cc == NULL)
3052: continue;
1.206 knakahar 3053: data.pr_cache_nmiss_pcpu += cc->cc_misses;
3054: data.pr_cache_nhit_pcpu += cc->cc_hits;
1.203 joerg 3055: }
3056: } else {
3057: data.pr_cache_meta_size = 0;
3058: data.pr_cache_nfull = 0;
3059: data.pr_cache_npartial = 0;
3060: data.pr_cache_nempty = 0;
3061: data.pr_cache_ncontended = 0;
3062: data.pr_cache_nmiss_global = 0;
3063: data.pr_cache_nhit_global = 0;
3064: }
3065:
3066: error = sysctl_copyout(l, &data, oldp, sizeof(data));
3067: if (error)
3068: break;
3069: written += sizeof(data);
3070: oldp = (char *)oldp + sizeof(data);
3071: }
3072:
3073: *oldlenp = written;
3074: return error;
3075: }
3076:
3077: SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
3078: {
3079: const struct sysctlnode *rnode = NULL;
3080:
3081: sysctl_createv(clog, 0, NULL, &rnode,
3082: CTLFLAG_PERMANENT,
3083: CTLTYPE_STRUCT, "pool",
3084: SYSCTL_DESCR("Get pool statistics"),
3085: pool_sysctl, 0, NULL, 0,
3086: CTL_KERN, CTL_CREATE, CTL_EOL);
3087: }
CVSweb <webmaster@jp.NetBSD.org>