Annotation of src/sys/kern/subr_pool.c, Revision 1.209
1.209 ! riastrad 1: /* $NetBSD: subr_pool.c,v 1.208 2017/06/08 04:00:01 chs Exp $ */
1.1 pk 2:
3: /*-
1.204 maxv 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015
1.183 ad 5: * The NetBSD Foundation, Inc.
1.1 pk 6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 9: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.204 maxv 10: * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
11: * Maxime Villard.
1.1 pk 12: *
13: * Redistribution and use in source and binary forms, with or without
14: * modification, are permitted provided that the following conditions
15: * are met:
16: * 1. Redistributions of source code must retain the above copyright
17: * notice, this list of conditions and the following disclaimer.
18: * 2. Redistributions in binary form must reproduce the above copyright
19: * notice, this list of conditions and the following disclaimer in the
20: * documentation and/or other materials provided with the distribution.
21: *
22: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32: * POSSIBILITY OF SUCH DAMAGE.
33: */
1.64 lukem 34:
35: #include <sys/cdefs.h>
1.209 ! riastrad 36: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.208 2017/06/08 04:00:01 chs Exp $");
1.24 scottr 37:
1.205 pooka 38: #ifdef _KERNEL_OPT
1.141 yamt 39: #include "opt_ddb.h"
1.28 thorpej 40: #include "opt_lockdebug.h"
1.205 pooka 41: #endif
1.1 pk 42:
43: #include <sys/param.h>
44: #include <sys/systm.h>
1.203 joerg 45: #include <sys/sysctl.h>
1.135 yamt 46: #include <sys/bitops.h>
1.1 pk 47: #include <sys/proc.h>
48: #include <sys/errno.h>
49: #include <sys/kernel.h>
1.191 para 50: #include <sys/vmem.h>
1.1 pk 51: #include <sys/pool.h>
1.20 thorpej 52: #include <sys/syslog.h>
1.125 ad 53: #include <sys/debug.h>
1.134 ad 54: #include <sys/lockdebug.h>
55: #include <sys/xcall.h>
56: #include <sys/cpu.h>
1.145 ad 57: #include <sys/atomic.h>
1.3 pk 58:
1.187 uebayasi 59: #include <uvm/uvm_extern.h>
1.3 pk 60:
1.1 pk 61: /*
62: * Pool resource management utility.
1.3 pk 63: *
1.88 chs 64: * Memory is allocated in pages which are split into pieces according to
65: * the pool item size. Each page is kept on one of three lists in the
66: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
67: * for empty, full and partially-full pages respectively. The individual
68: * pool items are on a linked list headed by `ph_itemlist' in each page
69: * header. The memory for building the page list is either taken from
70: * the allocated pages themselves (for small pool items) or taken from
71: * an internal pool of page headers (`phpool').
1.1 pk 72: */
73:
1.202 abs 74: /* List of all pools. Non static as needed by 'vmstat -i' */
75: TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.134 ad 76:
1.3 pk 77: /* Private pool for page header structures */
1.97 yamt 78: #define PHPOOL_MAX 8
79: static struct pool phpool[PHPOOL_MAX];
1.135 yamt 80: #define PHPOOL_FREELIST_NELEM(idx) \
81: (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
1.3 pk 82:
1.62 bjh21 83: #ifdef POOL_SUBPAGE
84: /* Pool of subpages for use by normal pools. */
85: static struct pool psppool;
86: #endif
87:
1.204 maxv 88: #ifdef POOL_REDZONE
89: # define POOL_REDZONE_SIZE 2
90: static void pool_redzone_init(struct pool *, size_t);
91: static void pool_redzone_fill(struct pool *, void *);
92: static void pool_redzone_check(struct pool *, void *);
93: #else
94: # define pool_redzone_init(pp, sz) /* NOTHING */
95: # define pool_redzone_fill(pp, ptr) /* NOTHING */
96: # define pool_redzone_check(pp, ptr) /* NOTHING */
97: #endif
98:
1.98 yamt 99: static void *pool_page_alloc_meta(struct pool *, int);
100: static void pool_page_free_meta(struct pool *, void *);
101:
102: /* allocator for pool metadata */
1.134 ad 103: struct pool_allocator pool_allocator_meta = {
1.191 para 104: .pa_alloc = pool_page_alloc_meta,
105: .pa_free = pool_page_free_meta,
106: .pa_pagesz = 0
1.98 yamt 107: };
108:
1.208 chs 109: #define POOL_ALLOCATOR_BIG_BASE 13
110: extern struct pool_allocator pool_allocator_big[];
111: static int pool_bigidx(size_t);
112:
1.3 pk 113: /* # of seconds to retain page after last use */
114: int pool_inactive_time = 10;
115:
116: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 117: static struct pool *drainpp;
118:
1.134 ad 119: /* This lock protects both pool_head and drainpp. */
120: static kmutex_t pool_head_lock;
121: static kcondvar_t pool_busy;
1.3 pk 122:
1.178 elad 123: /* This lock protects initialization of a potentially shared pool allocator */
124: static kmutex_t pool_allocator_lock;
125:
1.135 yamt 126: typedef uint32_t pool_item_bitmap_t;
127: #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
128: #define BITMAP_MASK (BITMAP_SIZE - 1)
1.99 yamt 129:
1.3 pk 130: struct pool_item_header {
131: /* Page headers */
1.88 chs 132: LIST_ENTRY(pool_item_header)
1.3 pk 133: ph_pagelist; /* pool page list */
1.88 chs 134: SPLAY_ENTRY(pool_item_header)
135: ph_node; /* Off-page page headers */
1.128 christos 136: void * ph_page; /* this page's address */
1.151 yamt 137: uint32_t ph_time; /* last referenced */
1.135 yamt 138: uint16_t ph_nmissing; /* # of chunks in use */
1.141 yamt 139: uint16_t ph_off; /* start offset in page */
1.97 yamt 140: union {
141: /* !PR_NOTOUCH */
142: struct {
1.102 chs 143: LIST_HEAD(, pool_item)
1.97 yamt 144: phu_itemlist; /* chunk list for this page */
145: } phu_normal;
146: /* PR_NOTOUCH */
147: struct {
1.141 yamt 148: pool_item_bitmap_t phu_bitmap[1];
1.97 yamt 149: } phu_notouch;
150: } ph_u;
1.3 pk 151: };
1.97 yamt 152: #define ph_itemlist ph_u.phu_normal.phu_itemlist
1.135 yamt 153: #define ph_bitmap ph_u.phu_notouch.phu_bitmap
1.3 pk 154:
1.1 pk 155: struct pool_item {
1.3 pk 156: #ifdef DIAGNOSTIC
1.82 thorpej 157: u_int pi_magic;
1.33 chs 158: #endif
1.134 ad 159: #define PI_MAGIC 0xdeaddeadU
1.3 pk 160: /* Other entries use only this list entry */
1.102 chs 161: LIST_ENTRY(pool_item) pi_list;
1.3 pk 162: };
163:
1.53 thorpej 164: #define POOL_NEEDS_CATCHUP(pp) \
165: ((pp)->pr_nitems < (pp)->pr_minitems)
166:
1.43 thorpej 167: /*
168: * Pool cache management.
169: *
170: * Pool caches provide a way for constructed objects to be cached by the
171: * pool subsystem. This can lead to performance improvements by avoiding
172: * needless object construction/destruction; it is deferred until absolutely
173: * necessary.
174: *
1.134 ad 175: * Caches are grouped into cache groups. Each cache group references up
176: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
177: * object from the pool, it calls the object's constructor and places it
178: * into a cache group. When a cache group frees an object back to the
179: * pool, it first calls the object's destructor. This allows the object
180: * to persist in constructed form while freed to the cache.
181: *
182: * The pool references each cache, so that when a pool is drained by the
183: * pagedaemon, it can drain each individual cache as well. Each time a
184: * cache is drained, the most idle cache group is freed to the pool in
185: * its entirety.
1.43 thorpej 186: *
187: * Pool caches are layed on top of pools. By layering them, we can avoid
188: * the complexity of cache management for pools which would not benefit
189: * from it.
190: */
191:
1.142 ad 192: static struct pool pcg_normal_pool;
193: static struct pool pcg_large_pool;
1.134 ad 194: static struct pool cache_pool;
195: static struct pool cache_cpu_pool;
1.3 pk 196:
1.189 pooka 197: pool_cache_t pnbuf_cache; /* pathname buffer cache */
198:
1.145 ad 199: /* List of all caches. */
200: TAILQ_HEAD(,pool_cache) pool_cache_head =
201: TAILQ_HEAD_INITIALIZER(pool_cache_head);
202:
1.162 ad 203: int pool_cache_disable; /* global disable for caching */
1.169 yamt 204: static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */
1.145 ad 205:
1.162 ad 206: static bool pool_cache_put_slow(pool_cache_cpu_t *, int,
207: void *);
208: static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
209: void **, paddr_t *, int);
1.134 ad 210: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
211: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
1.175 jym 212: static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
1.196 jym 213: static void pool_cache_transfer(pool_cache_t);
1.3 pk 214:
1.42 thorpej 215: static int pool_catchup(struct pool *);
1.128 christos 216: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 217: struct pool_item_header *);
1.88 chs 218: static void pool_update_curpage(struct pool *);
1.66 thorpej 219:
1.113 yamt 220: static int pool_grow(struct pool *, int);
1.117 yamt 221: static void *pool_allocator_alloc(struct pool *, int);
222: static void pool_allocator_free(struct pool *, void *);
1.3 pk 223:
1.97 yamt 224: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.199 christos 225: void (*)(const char *, ...) __printflike(1, 2));
1.42 thorpej 226: static void pool_print1(struct pool *, const char *,
1.199 christos 227: void (*)(const char *, ...) __printflike(1, 2));
1.3 pk 228:
1.88 chs 229: static int pool_chk_page(struct pool *, const char *,
230: struct pool_item_header *);
231:
1.135 yamt 232: static inline unsigned int
1.97 yamt 233: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
234: const void *v)
235: {
236: const char *cp = v;
1.135 yamt 237: unsigned int idx;
1.97 yamt 238:
239: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 240: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 241: KASSERT(idx < pp->pr_itemsperpage);
242: return idx;
243: }
244:
1.110 perry 245: static inline void
1.97 yamt 246: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
247: void *obj)
248: {
1.135 yamt 249: unsigned int idx = pr_item_notouch_index(pp, ph, obj);
250: pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
251: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
1.97 yamt 252:
1.135 yamt 253: KASSERT((*bitmap & mask) == 0);
254: *bitmap |= mask;
1.97 yamt 255: }
256:
1.110 perry 257: static inline void *
1.97 yamt 258: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
259: {
1.135 yamt 260: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
261: unsigned int idx;
262: int i;
1.97 yamt 263:
1.135 yamt 264: for (i = 0; ; i++) {
265: int bit;
1.97 yamt 266:
1.135 yamt 267: KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
268: bit = ffs32(bitmap[i]);
269: if (bit) {
270: pool_item_bitmap_t mask;
271:
272: bit--;
273: idx = (i * BITMAP_SIZE) + bit;
274: mask = 1 << bit;
275: KASSERT((bitmap[i] & mask) != 0);
276: bitmap[i] &= ~mask;
277: break;
278: }
279: }
280: KASSERT(idx < pp->pr_itemsperpage);
1.128 christos 281: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 282: }
283:
1.135 yamt 284: static inline void
1.141 yamt 285: pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
1.135 yamt 286: {
287: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
288: const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
289: int i;
290:
291: for (i = 0; i < n; i++) {
292: bitmap[i] = (pool_item_bitmap_t)-1;
293: }
294: }
295:
1.110 perry 296: static inline int
1.88 chs 297: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
298: {
1.121 yamt 299:
300: /*
301: * we consider pool_item_header with smaller ph_page bigger.
302: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
303: */
304:
1.88 chs 305: if (a->ph_page < b->ph_page)
1.121 yamt 306: return (1);
307: else if (a->ph_page > b->ph_page)
1.88 chs 308: return (-1);
309: else
310: return (0);
311: }
312:
313: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
314: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
315:
1.141 yamt 316: static inline struct pool_item_header *
317: pr_find_pagehead_noalign(struct pool *pp, void *v)
318: {
319: struct pool_item_header *ph, tmp;
320:
321: tmp.ph_page = (void *)(uintptr_t)v;
322: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
323: if (ph == NULL) {
324: ph = SPLAY_ROOT(&pp->pr_phtree);
325: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
326: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
327: }
328: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
329: }
330:
331: return ph;
332: }
333:
1.3 pk 334: /*
1.121 yamt 335: * Return the pool page header based on item address.
1.3 pk 336: */
1.110 perry 337: static inline struct pool_item_header *
1.121 yamt 338: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 339: {
1.88 chs 340: struct pool_item_header *ph, tmp;
1.3 pk 341:
1.121 yamt 342: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.141 yamt 343: ph = pr_find_pagehead_noalign(pp, v);
1.121 yamt 344: } else {
1.128 christos 345: void *page =
346: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 347:
348: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 349: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 350: } else {
351: tmp.ph_page = page;
352: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
353: }
354: }
1.3 pk 355:
1.121 yamt 356: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 357: ((char *)ph->ph_page <= (char *)v &&
358: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 359: return ph;
1.3 pk 360: }
361:
1.101 thorpej 362: static void
363: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
364: {
365: struct pool_item_header *ph;
366:
367: while ((ph = LIST_FIRST(pq)) != NULL) {
368: LIST_REMOVE(ph, ph_pagelist);
369: pool_allocator_free(pp, ph->ph_page);
1.134 ad 370: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 371: pool_put(pp->pr_phpool, ph);
372: }
373: }
374:
1.3 pk 375: /*
376: * Remove a page from the pool.
377: */
1.110 perry 378: static inline void
1.61 chs 379: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
380: struct pool_pagelist *pq)
1.3 pk 381: {
382:
1.134 ad 383: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 384:
1.3 pk 385: /*
1.7 thorpej 386: * If the page was idle, decrement the idle page count.
1.3 pk 387: */
1.6 thorpej 388: if (ph->ph_nmissing == 0) {
1.207 riastrad 389: KASSERT(pp->pr_nidle != 0);
390: KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage),
391: "nitems=%u < itemsperpage=%u",
392: pp->pr_nitems, pp->pr_itemsperpage);
1.6 thorpej 393: pp->pr_nidle--;
394: }
1.7 thorpej 395:
1.20 thorpej 396: pp->pr_nitems -= pp->pr_itemsperpage;
397:
1.7 thorpej 398: /*
1.101 thorpej 399: * Unlink the page from the pool and queue it for release.
1.7 thorpej 400: */
1.88 chs 401: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 402: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
403: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 404: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
405:
1.7 thorpej 406: pp->pr_npages--;
407: pp->pr_npagefree++;
1.6 thorpej 408:
1.88 chs 409: pool_update_curpage(pp);
1.3 pk 410: }
411:
412: /*
1.94 simonb 413: * Initialize all the pools listed in the "pools" link set.
414: */
415: void
1.117 yamt 416: pool_subsystem_init(void)
1.94 simonb 417: {
1.192 rmind 418: size_t size;
1.191 para 419: int idx;
1.94 simonb 420:
1.134 ad 421: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
1.179 mlelstv 422: mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
1.134 ad 423: cv_init(&pool_busy, "poolbusy");
424:
1.191 para 425: /*
426: * Initialize private page header pool and cache magazine pool if we
427: * haven't done so yet.
428: */
429: for (idx = 0; idx < PHPOOL_MAX; idx++) {
430: static char phpool_names[PHPOOL_MAX][6+1+6+1];
431: int nelem;
432: size_t sz;
433:
434: nelem = PHPOOL_FREELIST_NELEM(idx);
435: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
436: "phpool-%d", nelem);
437: sz = sizeof(struct pool_item_header);
438: if (nelem) {
439: sz = offsetof(struct pool_item_header,
440: ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
441: }
442: pool_init(&phpool[idx], sz, 0, 0, 0,
443: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.117 yamt 444: }
1.191 para 445: #ifdef POOL_SUBPAGE
446: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
447: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
448: #endif
449:
450: size = sizeof(pcg_t) +
451: (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
452: pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
453: "pcgnormal", &pool_allocator_meta, IPL_VM);
454:
455: size = sizeof(pcg_t) +
456: (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
457: pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
458: "pcglarge", &pool_allocator_meta, IPL_VM);
1.134 ad 459:
1.156 ad 460: pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
1.191 para 461: 0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
1.134 ad 462:
1.156 ad 463: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
1.191 para 464: 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
1.94 simonb 465: }
466:
467: /*
1.3 pk 468: * Initialize the given pool resource structure.
469: *
470: * We export this routine to allow other kernel parts to declare
1.195 rmind 471: * static pools that must be initialized before kmem(9) is available.
1.3 pk 472: */
473: void
1.42 thorpej 474: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.129 ad 475: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 476: {
1.116 simonb 477: struct pool *pp1;
1.204 maxv 478: size_t trysize, phsize, prsize;
1.134 ad 479: int off, slack;
1.3 pk 480:
1.116 simonb 481: #ifdef DEBUG
1.198 christos 482: if (__predict_true(!cold))
483: mutex_enter(&pool_head_lock);
1.116 simonb 484: /*
485: * Check that the pool hasn't already been initialised and
486: * added to the list of all pools.
487: */
1.145 ad 488: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
1.116 simonb 489: if (pp == pp1)
490: panic("pool_init: pool %s already initialised",
491: wchan);
492: }
1.198 christos 493: if (__predict_true(!cold))
494: mutex_exit(&pool_head_lock);
1.116 simonb 495: #endif
496:
1.66 thorpej 497: if (palloc == NULL)
498: palloc = &pool_allocator_kmem;
1.112 bjh21 499: #ifdef POOL_SUBPAGE
500: if (size > palloc->pa_pagesz) {
501: if (palloc == &pool_allocator_kmem)
502: palloc = &pool_allocator_kmem_fullpage;
503: else if (palloc == &pool_allocator_nointr)
504: palloc = &pool_allocator_nointr_fullpage;
505: }
1.66 thorpej 506: #endif /* POOL_SUBPAGE */
1.180 mlelstv 507: if (!cold)
508: mutex_enter(&pool_allocator_lock);
1.178 elad 509: if (palloc->pa_refcnt++ == 0) {
1.112 bjh21 510: if (palloc->pa_pagesz == 0)
1.66 thorpej 511: palloc->pa_pagesz = PAGE_SIZE;
512:
513: TAILQ_INIT(&palloc->pa_list);
514:
1.134 ad 515: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 516: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
517: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.4 thorpej 518: }
1.180 mlelstv 519: if (!cold)
520: mutex_exit(&pool_allocator_lock);
1.3 pk 521:
522: if (align == 0)
523: align = ALIGN(1);
1.14 thorpej 524:
1.204 maxv 525: prsize = size;
526: if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
527: prsize = sizeof(struct pool_item);
1.3 pk 528:
1.204 maxv 529: prsize = roundup(prsize, align);
1.207 riastrad 530: KASSERTMSG((prsize <= palloc->pa_pagesz),
531: "pool_init: pool item size (%zu) larger than page size (%u)",
532: prsize, palloc->pa_pagesz);
1.35 pk 533:
1.3 pk 534: /*
535: * Initialize the pool structure.
536: */
1.88 chs 537: LIST_INIT(&pp->pr_emptypages);
538: LIST_INIT(&pp->pr_fullpages);
539: LIST_INIT(&pp->pr_partpages);
1.134 ad 540: pp->pr_cache = NULL;
1.3 pk 541: pp->pr_curpage = NULL;
542: pp->pr_npages = 0;
543: pp->pr_minitems = 0;
544: pp->pr_minpages = 0;
545: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 546: pp->pr_roflags = flags;
547: pp->pr_flags = 0;
1.204 maxv 548: pp->pr_size = prsize;
1.3 pk 549: pp->pr_align = align;
550: pp->pr_wchan = wchan;
1.66 thorpej 551: pp->pr_alloc = palloc;
1.20 thorpej 552: pp->pr_nitems = 0;
553: pp->pr_nout = 0;
554: pp->pr_hardlimit = UINT_MAX;
555: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 556: pp->pr_hardlimit_ratecap.tv_sec = 0;
557: pp->pr_hardlimit_ratecap.tv_usec = 0;
558: pp->pr_hardlimit_warning_last.tv_sec = 0;
559: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 560: pp->pr_drain_hook = NULL;
561: pp->pr_drain_hook_arg = NULL;
1.125 ad 562: pp->pr_freecheck = NULL;
1.204 maxv 563: pool_redzone_init(pp, size);
1.3 pk 564:
565: /*
566: * Decide whether to put the page header off page to avoid
1.92 enami 567: * wasting too large a part of the page or too big item.
568: * Off-page page headers go on a hash table, so we can match
569: * a returned item with its header based on the page address.
570: * We use 1/16 of the page size and about 8 times of the item
571: * size as the threshold (XXX: tune)
572: *
573: * However, we'll put the header into the page if we can put
574: * it without wasting any items.
575: *
576: * Silently enforce `0 <= ioff < align'.
1.3 pk 577: */
1.92 enami 578: pp->pr_itemoffset = ioff %= align;
579: /* See the comment below about reserved bytes. */
580: trysize = palloc->pa_pagesz - ((align - ioff) % align);
581: phsize = ALIGN(sizeof(struct pool_item_header));
1.201 para 582: if (pp->pr_roflags & PR_PHINPAGE ||
583: ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 584: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
1.201 para 585: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) {
1.3 pk 586: /* Use the end of the page for the page header */
1.20 thorpej 587: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 588: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 589: } else {
1.3 pk 590: /* The page header will be taken from our page header pool */
591: pp->pr_phoffset = 0;
1.66 thorpej 592: off = palloc->pa_pagesz;
1.88 chs 593: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 594: }
1.1 pk 595:
1.3 pk 596: /*
597: * Alignment is to take place at `ioff' within the item. This means
598: * we must reserve up to `align - 1' bytes on the page to allow
599: * appropriate positioning of each item.
600: */
601: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 602: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 603: if ((pp->pr_roflags & PR_NOTOUCH)) {
604: int idx;
605:
606: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
607: idx++) {
608: /* nothing */
609: }
610: if (idx >= PHPOOL_MAX) {
611: /*
612: * if you see this panic, consider to tweak
613: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
614: */
615: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
616: pp->pr_wchan, pp->pr_itemsperpage);
617: }
618: pp->pr_phpool = &phpool[idx];
619: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
620: pp->pr_phpool = &phpool[0];
621: }
622: #if defined(DIAGNOSTIC)
623: else {
624: pp->pr_phpool = NULL;
625: }
626: #endif
1.3 pk 627:
628: /*
629: * Use the slack between the chunks and the page header
630: * for "cache coloring".
631: */
632: slack = off - pp->pr_itemsperpage * pp->pr_size;
633: pp->pr_maxcolor = (slack / align) * align;
634: pp->pr_curcolor = 0;
635:
636: pp->pr_nget = 0;
637: pp->pr_nfail = 0;
638: pp->pr_nput = 0;
639: pp->pr_npagealloc = 0;
640: pp->pr_npagefree = 0;
1.1 pk 641: pp->pr_hiwat = 0;
1.8 thorpej 642: pp->pr_nidle = 0;
1.134 ad 643: pp->pr_refcnt = 0;
1.3 pk 644:
1.157 ad 645: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
1.134 ad 646: cv_init(&pp->pr_cv, wchan);
647: pp->pr_ipl = ipl;
1.1 pk 648:
1.145 ad 649: /* Insert into the list of all pools. */
1.181 mlelstv 650: if (!cold)
1.134 ad 651: mutex_enter(&pool_head_lock);
1.145 ad 652: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
653: if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
654: break;
655: }
656: if (pp1 == NULL)
657: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
658: else
659: TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
1.181 mlelstv 660: if (!cold)
1.134 ad 661: mutex_exit(&pool_head_lock);
662:
1.167 skrll 663: /* Insert this into the list of pools using this allocator. */
1.181 mlelstv 664: if (!cold)
1.134 ad 665: mutex_enter(&palloc->pa_lock);
1.145 ad 666: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
1.181 mlelstv 667: if (!cold)
1.134 ad 668: mutex_exit(&palloc->pa_lock);
1.1 pk 669: }
670:
671: /*
672: * De-commision a pool resource.
673: */
674: void
1.42 thorpej 675: pool_destroy(struct pool *pp)
1.1 pk 676: {
1.101 thorpej 677: struct pool_pagelist pq;
1.3 pk 678: struct pool_item_header *ph;
1.43 thorpej 679:
1.101 thorpej 680: /* Remove from global pool list */
1.134 ad 681: mutex_enter(&pool_head_lock);
682: while (pp->pr_refcnt != 0)
683: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 684: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.101 thorpej 685: if (drainpp == pp)
686: drainpp = NULL;
1.134 ad 687: mutex_exit(&pool_head_lock);
1.101 thorpej 688:
689: /* Remove this pool from its allocator's list of pools. */
1.134 ad 690: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 691: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.134 ad 692: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 693:
1.178 elad 694: mutex_enter(&pool_allocator_lock);
695: if (--pp->pr_alloc->pa_refcnt == 0)
696: mutex_destroy(&pp->pr_alloc->pa_lock);
697: mutex_exit(&pool_allocator_lock);
698:
1.134 ad 699: mutex_enter(&pp->pr_lock);
1.101 thorpej 700:
1.134 ad 701: KASSERT(pp->pr_cache == NULL);
1.207 riastrad 702: KASSERTMSG((pp->pr_nout == 0),
703: "pool_destroy: pool busy: still out: %u", pp->pr_nout);
1.101 thorpej 704: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
705: KASSERT(LIST_EMPTY(&pp->pr_partpages));
706:
1.3 pk 707: /* Remove all pages */
1.101 thorpej 708: LIST_INIT(&pq);
1.88 chs 709: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 710: pr_rmpage(pp, ph, &pq);
711:
1.134 ad 712: mutex_exit(&pp->pr_lock);
1.3 pk 713:
1.101 thorpej 714: pr_pagelist_free(pp, &pq);
1.134 ad 715: cv_destroy(&pp->pr_cv);
716: mutex_destroy(&pp->pr_lock);
1.1 pk 717: }
718:
1.68 thorpej 719: void
720: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
721: {
722:
723: /* XXX no locking -- must be used just after pool_init() */
1.207 riastrad 724: KASSERTMSG((pp->pr_drain_hook == NULL),
725: "pool_set_drain_hook(%s): already set", pp->pr_wchan);
1.68 thorpej 726: pp->pr_drain_hook = fn;
727: pp->pr_drain_hook_arg = arg;
728: }
729:
1.88 chs 730: static struct pool_item_header *
1.128 christos 731: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 732: {
733: struct pool_item_header *ph;
734:
735: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 736: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.134 ad 737: else
1.97 yamt 738: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 739:
740: return (ph);
741: }
1.1 pk 742:
743: /*
1.134 ad 744: * Grab an item from the pool.
1.1 pk 745: */
1.3 pk 746: void *
1.56 sommerfe 747: pool_get(struct pool *pp, int flags)
1.1 pk 748: {
749: struct pool_item *pi;
1.3 pk 750: struct pool_item_header *ph;
1.55 thorpej 751: void *v;
1.1 pk 752:
1.207 riastrad 753: KASSERTMSG((pp->pr_itemsperpage != 0),
754: "pool_get: pool '%s': pr_itemsperpage is zero, "
755: "pool not initialized?", pp->pr_wchan);
756: KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p())
757: || pp->pr_ipl != IPL_NONE || cold || panicstr != NULL),
758: "pool '%s' is IPL_NONE, but called from interrupt context",
759: pp->pr_wchan);
1.155 ad 760: if (flags & PR_WAITOK) {
1.154 yamt 761: ASSERT_SLEEPABLE();
1.155 ad 762: }
1.1 pk 763:
1.134 ad 764: mutex_enter(&pp->pr_lock);
1.20 thorpej 765: startover:
766: /*
767: * Check to see if we've reached the hard limit. If we have,
768: * and we can wait, then wait until an item has been returned to
769: * the pool.
770: */
1.207 riastrad 771: KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit),
772: "pool_get: %s: crossed hard limit", pp->pr_wchan);
1.34 thorpej 773: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 774: if (pp->pr_drain_hook != NULL) {
775: /*
776: * Since the drain hook is going to free things
777: * back to the pool, unlock, call the hook, re-lock,
778: * and check the hardlimit condition again.
779: */
1.134 ad 780: mutex_exit(&pp->pr_lock);
1.68 thorpej 781: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.134 ad 782: mutex_enter(&pp->pr_lock);
1.68 thorpej 783: if (pp->pr_nout < pp->pr_hardlimit)
784: goto startover;
785: }
786:
1.29 sommerfe 787: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 788: /*
789: * XXX: A warning isn't logged in this case. Should
790: * it be?
791: */
792: pp->pr_flags |= PR_WANTED;
1.134 ad 793: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.20 thorpej 794: goto startover;
795: }
1.31 thorpej 796:
797: /*
798: * Log a message that the hard limit has been hit.
799: */
800: if (pp->pr_hardlimit_warning != NULL &&
801: ratecheck(&pp->pr_hardlimit_warning_last,
802: &pp->pr_hardlimit_ratecap))
803: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 804:
805: pp->pr_nfail++;
806:
1.134 ad 807: mutex_exit(&pp->pr_lock);
1.20 thorpej 808: return (NULL);
809: }
810:
1.3 pk 811: /*
812: * The convention we use is that if `curpage' is not NULL, then
813: * it points at a non-empty bucket. In particular, `curpage'
814: * never points at a page header which has PR_PHINPAGE set and
815: * has no items in its bucket.
816: */
1.20 thorpej 817: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 818: int error;
819:
1.207 riastrad 820: KASSERTMSG((pp->pr_nitems == 0),
821: "pool_get: nitems inconsistent"
822: ": %s: curpage NULL, nitems %u",
823: pp->pr_wchan, pp->pr_nitems);
1.20 thorpej 824:
1.21 thorpej 825: /*
826: * Call the back-end page allocator for more memory.
827: * Release the pool lock, as the back-end page allocator
828: * may block.
829: */
1.113 yamt 830: error = pool_grow(pp, flags);
831: if (error != 0) {
1.21 thorpej 832: /*
1.55 thorpej 833: * We were unable to allocate a page or item
834: * header, but we released the lock during
835: * allocation, so perhaps items were freed
836: * back to the pool. Check for this case.
1.21 thorpej 837: */
838: if (pp->pr_curpage != NULL)
839: goto startover;
1.15 pk 840:
1.117 yamt 841: pp->pr_nfail++;
1.134 ad 842: mutex_exit(&pp->pr_lock);
1.117 yamt 843: return (NULL);
1.1 pk 844: }
1.3 pk 845:
1.20 thorpej 846: /* Start the allocation process over. */
847: goto startover;
1.3 pk 848: }
1.97 yamt 849: if (pp->pr_roflags & PR_NOTOUCH) {
1.207 riastrad 850: KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage),
851: "pool_get: %s: page empty", pp->pr_wchan);
1.97 yamt 852: v = pr_item_notouch_get(pp, ph);
853: } else {
1.102 chs 854: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 855: if (__predict_false(v == NULL)) {
1.134 ad 856: mutex_exit(&pp->pr_lock);
1.97 yamt 857: panic("pool_get: %s: page empty", pp->pr_wchan);
858: }
1.207 riastrad 859: KASSERTMSG((pp->pr_nitems > 0),
860: "pool_get: nitems inconsistent"
861: ": %s: items on itemlist, nitems %u",
862: pp->pr_wchan, pp->pr_nitems);
863: KASSERTMSG((pi->pi_magic == PI_MAGIC),
864: "pool_get(%s): free list modified: "
865: "magic=%x; page %p; item addr %p",
866: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1.3 pk 867:
1.97 yamt 868: /*
869: * Remove from item list.
870: */
1.102 chs 871: LIST_REMOVE(pi, pi_list);
1.97 yamt 872: }
1.20 thorpej 873: pp->pr_nitems--;
874: pp->pr_nout++;
1.6 thorpej 875: if (ph->ph_nmissing == 0) {
1.207 riastrad 876: KASSERT(pp->pr_nidle > 0);
1.6 thorpej 877: pp->pr_nidle--;
1.88 chs 878:
879: /*
880: * This page was previously empty. Move it to the list of
881: * partially-full pages. This page is already curpage.
882: */
883: LIST_REMOVE(ph, ph_pagelist);
884: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 885: }
1.3 pk 886: ph->ph_nmissing++;
1.97 yamt 887: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.207 riastrad 888: KASSERTMSG(((pp->pr_roflags & PR_NOTOUCH) ||
889: LIST_EMPTY(&ph->ph_itemlist)),
890: "pool_get: %s: nmissing inconsistent", pp->pr_wchan);
1.3 pk 891: /*
1.88 chs 892: * This page is now full. Move it to the full list
893: * and select a new current page.
1.3 pk 894: */
1.88 chs 895: LIST_REMOVE(ph, ph_pagelist);
896: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
897: pool_update_curpage(pp);
1.1 pk 898: }
1.3 pk 899:
900: pp->pr_nget++;
1.20 thorpej 901:
902: /*
903: * If we have a low water mark and we are now below that low
904: * water mark, add more items to the pool.
905: */
1.53 thorpej 906: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 907: /*
908: * XXX: Should we log a warning? Should we set up a timeout
909: * to try again in a second or so? The latter could break
910: * a caller's assumptions about interrupt protection, etc.
911: */
912: }
913:
1.134 ad 914: mutex_exit(&pp->pr_lock);
1.125 ad 915: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
916: FREECHECK_OUT(&pp->pr_freecheck, v);
1.204 maxv 917: pool_redzone_fill(pp, v);
1.1 pk 918: return (v);
919: }
920:
921: /*
1.43 thorpej 922: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 923: */
1.43 thorpej 924: static void
1.101 thorpej 925: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 926: {
927: struct pool_item *pi = v;
1.3 pk 928: struct pool_item_header *ph;
929:
1.134 ad 930: KASSERT(mutex_owned(&pp->pr_lock));
1.204 maxv 931: pool_redzone_check(pp, v);
1.125 ad 932: FREECHECK_IN(&pp->pr_freecheck, v);
1.134 ad 933: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 934:
1.207 riastrad 935: KASSERTMSG((pp->pr_nout > 0),
936: "pool_put: pool %s: putting with none out", pp->pr_wchan);
1.3 pk 937:
1.121 yamt 938: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.3 pk 939: panic("pool_put: %s: page header missing", pp->pr_wchan);
940: }
1.28 thorpej 941:
1.3 pk 942: /*
943: * Return to item list.
944: */
1.97 yamt 945: if (pp->pr_roflags & PR_NOTOUCH) {
946: pr_item_notouch_put(pp, ph, v);
947: } else {
1.2 pk 948: #ifdef DIAGNOSTIC
1.97 yamt 949: pi->pi_magic = PI_MAGIC;
1.3 pk 950: #endif
1.32 chs 951: #ifdef DEBUG
1.97 yamt 952: {
953: int i, *ip = v;
1.32 chs 954:
1.97 yamt 955: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
956: *ip++ = PI_MAGIC;
957: }
1.32 chs 958: }
959: #endif
960:
1.102 chs 961: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 962: }
1.79 thorpej 963: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 964: ph->ph_nmissing--;
965: pp->pr_nput++;
1.20 thorpej 966: pp->pr_nitems++;
967: pp->pr_nout--;
1.3 pk 968:
969: /* Cancel "pool empty" condition if it exists */
970: if (pp->pr_curpage == NULL)
971: pp->pr_curpage = ph;
972:
973: if (pp->pr_flags & PR_WANTED) {
974: pp->pr_flags &= ~PR_WANTED;
1.134 ad 975: cv_broadcast(&pp->pr_cv);
1.3 pk 976: }
977:
978: /*
1.88 chs 979: * If this page is now empty, do one of two things:
1.21 thorpej 980: *
1.88 chs 981: * (1) If we have more pages than the page high water mark,
1.96 thorpej 982: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 983: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
984: * CLAIM.
1.21 thorpej 985: *
1.88 chs 986: * (2) Otherwise, move the page to the empty page list.
987: *
988: * Either way, select a new current page (so we use a partially-full
989: * page if one is available).
1.3 pk 990: */
991: if (ph->ph_nmissing == 0) {
1.6 thorpej 992: pp->pr_nidle++;
1.90 thorpej 993: if (pp->pr_npages > pp->pr_minpages &&
1.152 yamt 994: pp->pr_npages > pp->pr_maxpages) {
1.101 thorpej 995: pr_rmpage(pp, ph, pq);
1.3 pk 996: } else {
1.88 chs 997: LIST_REMOVE(ph, ph_pagelist);
998: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 999:
1.21 thorpej 1000: /*
1001: * Update the timestamp on the page. A page must
1002: * be idle for some period of time before it can
1003: * be reclaimed by the pagedaemon. This minimizes
1004: * ping-pong'ing for memory.
1.151 yamt 1005: *
1006: * note for 64-bit time_t: truncating to 32-bit is not
1007: * a problem for our usage.
1.21 thorpej 1008: */
1.151 yamt 1009: ph->ph_time = time_uptime;
1.1 pk 1010: }
1.88 chs 1011: pool_update_curpage(pp);
1.1 pk 1012: }
1.88 chs 1013:
1.21 thorpej 1014: /*
1.88 chs 1015: * If the page was previously completely full, move it to the
1016: * partially-full list and make it the current page. The next
1017: * allocation will get the item from this page, instead of
1018: * further fragmenting the pool.
1.21 thorpej 1019: */
1020: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1021: LIST_REMOVE(ph, ph_pagelist);
1022: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1023: pp->pr_curpage = ph;
1024: }
1.43 thorpej 1025: }
1026:
1.56 sommerfe 1027: void
1028: pool_put(struct pool *pp, void *v)
1029: {
1.101 thorpej 1030: struct pool_pagelist pq;
1031:
1032: LIST_INIT(&pq);
1.56 sommerfe 1033:
1.134 ad 1034: mutex_enter(&pp->pr_lock);
1.101 thorpej 1035: pool_do_put(pp, v, &pq);
1.134 ad 1036: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1037:
1.102 chs 1038: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1039: }
1.57 sommerfe 1040:
1.74 thorpej 1041: /*
1.113 yamt 1042: * pool_grow: grow a pool by a page.
1043: *
1044: * => called with pool locked.
1045: * => unlock and relock the pool.
1046: * => return with pool locked.
1047: */
1048:
1049: static int
1050: pool_grow(struct pool *pp, int flags)
1051: {
1052: struct pool_item_header *ph = NULL;
1053: char *cp;
1.209 ! riastrad 1054: int error;
! 1055:
! 1056: /*
! 1057: * If there's a pool_grow in progress, wait for it to complete
! 1058: * and try again from the top.
! 1059: */
! 1060: if (pp->pr_flags & PR_GROWING) {
! 1061: if (flags & PR_WAITOK) {
! 1062: do {
! 1063: cv_wait(&pp->pr_cv, &pp->pr_lock);
! 1064: } while (pp->pr_flags & PR_GROWING);
! 1065: return ERESTART;
! 1066: } else {
! 1067: return EWOULDBLOCK;
! 1068: }
! 1069: }
! 1070: pp->pr_flags |= PR_GROWING;
1.113 yamt 1071:
1.134 ad 1072: mutex_exit(&pp->pr_lock);
1.113 yamt 1073: cp = pool_allocator_alloc(pp, flags);
1074: if (__predict_true(cp != NULL)) {
1075: ph = pool_alloc_item_header(pp, cp, flags);
1076: }
1077: if (__predict_false(cp == NULL || ph == NULL)) {
1078: if (cp != NULL) {
1079: pool_allocator_free(pp, cp);
1080: }
1.134 ad 1081: mutex_enter(&pp->pr_lock);
1.209 ! riastrad 1082: error = ENOMEM;
! 1083: goto out;
1.113 yamt 1084: }
1085:
1.134 ad 1086: mutex_enter(&pp->pr_lock);
1.113 yamt 1087: pool_prime_page(pp, cp, ph);
1088: pp->pr_npagealloc++;
1.209 ! riastrad 1089: error = 0;
! 1090:
! 1091: out:
! 1092: /*
! 1093: * If anyone was waiting for pool_grow, notify them that we
! 1094: * may have just done it.
! 1095: */
! 1096: KASSERT(pp->pr_flags & PR_GROWING);
! 1097: pp->pr_flags &= ~PR_GROWING;
! 1098: cv_broadcast(&pp->pr_cv);
! 1099:
! 1100: return error;
1.113 yamt 1101: }
1102:
1103: /*
1.74 thorpej 1104: * Add N items to the pool.
1105: */
1106: int
1107: pool_prime(struct pool *pp, int n)
1108: {
1.75 simonb 1109: int newpages;
1.113 yamt 1110: int error = 0;
1.74 thorpej 1111:
1.134 ad 1112: mutex_enter(&pp->pr_lock);
1.74 thorpej 1113:
1114: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1115:
1116: while (newpages-- > 0) {
1.113 yamt 1117: error = pool_grow(pp, PR_NOWAIT);
1118: if (error) {
1.74 thorpej 1119: break;
1120: }
1121: pp->pr_minpages++;
1122: }
1123:
1124: if (pp->pr_minpages >= pp->pr_maxpages)
1125: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1126:
1.134 ad 1127: mutex_exit(&pp->pr_lock);
1.113 yamt 1128: return error;
1.74 thorpej 1129: }
1.55 thorpej 1130:
1131: /*
1.3 pk 1132: * Add a page worth of items to the pool.
1.21 thorpej 1133: *
1134: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1135: */
1.55 thorpej 1136: static void
1.128 christos 1137: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1138: {
1139: struct pool_item *pi;
1.128 christos 1140: void *cp = storage;
1.125 ad 1141: const unsigned int align = pp->pr_align;
1142: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1143: int n;
1.36 pk 1144:
1.134 ad 1145: KASSERT(mutex_owned(&pp->pr_lock));
1.207 riastrad 1146: KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) ||
1147: (((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)),
1148: "pool_prime_page: %s: unaligned page: %p", pp->pr_wchan, cp);
1.3 pk 1149:
1150: /*
1151: * Insert page header.
1152: */
1.88 chs 1153: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1154: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1155: ph->ph_page = storage;
1156: ph->ph_nmissing = 0;
1.151 yamt 1157: ph->ph_time = time_uptime;
1.88 chs 1158: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1159: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1160:
1.6 thorpej 1161: pp->pr_nidle++;
1162:
1.3 pk 1163: /*
1164: * Color this page.
1165: */
1.141 yamt 1166: ph->ph_off = pp->pr_curcolor;
1167: cp = (char *)cp + ph->ph_off;
1.3 pk 1168: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1169: pp->pr_curcolor = 0;
1170:
1171: /*
1172: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1173: */
1174: if (ioff != 0)
1.128 christos 1175: cp = (char *)cp + align - ioff;
1.3 pk 1176:
1.125 ad 1177: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1178:
1.3 pk 1179: /*
1180: * Insert remaining chunks on the bucket list.
1181: */
1182: n = pp->pr_itemsperpage;
1.20 thorpej 1183: pp->pr_nitems += n;
1.3 pk 1184:
1.97 yamt 1185: if (pp->pr_roflags & PR_NOTOUCH) {
1.141 yamt 1186: pr_item_notouch_init(pp, ph);
1.97 yamt 1187: } else {
1188: while (n--) {
1189: pi = (struct pool_item *)cp;
1.78 thorpej 1190:
1.97 yamt 1191: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1192:
1.97 yamt 1193: /* Insert on page list */
1.102 chs 1194: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1195: #ifdef DIAGNOSTIC
1.97 yamt 1196: pi->pi_magic = PI_MAGIC;
1.3 pk 1197: #endif
1.128 christos 1198: cp = (char *)cp + pp->pr_size;
1.125 ad 1199:
1200: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1201: }
1.3 pk 1202: }
1203:
1204: /*
1205: * If the pool was depleted, point at the new page.
1206: */
1207: if (pp->pr_curpage == NULL)
1208: pp->pr_curpage = ph;
1209:
1210: if (++pp->pr_npages > pp->pr_hiwat)
1211: pp->pr_hiwat = pp->pr_npages;
1212: }
1213:
1.20 thorpej 1214: /*
1.52 thorpej 1215: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1216: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1217: *
1.21 thorpej 1218: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1219: *
1.73 thorpej 1220: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1221: * with it locked.
1222: */
1223: static int
1.42 thorpej 1224: pool_catchup(struct pool *pp)
1.20 thorpej 1225: {
1226: int error = 0;
1227:
1.54 thorpej 1228: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1229: error = pool_grow(pp, PR_NOWAIT);
1230: if (error) {
1.20 thorpej 1231: break;
1232: }
1233: }
1.113 yamt 1234: return error;
1.20 thorpej 1235: }
1236:
1.88 chs 1237: static void
1238: pool_update_curpage(struct pool *pp)
1239: {
1240:
1241: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1242: if (pp->pr_curpage == NULL) {
1243: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1244: }
1.168 yamt 1245: KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1246: (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1.88 chs 1247: }
1248:
1.3 pk 1249: void
1.42 thorpej 1250: pool_setlowat(struct pool *pp, int n)
1.3 pk 1251: {
1.15 pk 1252:
1.134 ad 1253: mutex_enter(&pp->pr_lock);
1.21 thorpej 1254:
1.3 pk 1255: pp->pr_minitems = n;
1.15 pk 1256: pp->pr_minpages = (n == 0)
1257: ? 0
1.18 thorpej 1258: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1259:
1260: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1261: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1262: /*
1263: * XXX: Should we log a warning? Should we set up a timeout
1264: * to try again in a second or so? The latter could break
1265: * a caller's assumptions about interrupt protection, etc.
1266: */
1267: }
1.21 thorpej 1268:
1.134 ad 1269: mutex_exit(&pp->pr_lock);
1.3 pk 1270: }
1271:
1272: void
1.42 thorpej 1273: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1274: {
1.15 pk 1275:
1.134 ad 1276: mutex_enter(&pp->pr_lock);
1.21 thorpej 1277:
1.15 pk 1278: pp->pr_maxpages = (n == 0)
1279: ? 0
1.18 thorpej 1280: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1281:
1.134 ad 1282: mutex_exit(&pp->pr_lock);
1.3 pk 1283: }
1284:
1.20 thorpej 1285: void
1.42 thorpej 1286: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1287: {
1288:
1.134 ad 1289: mutex_enter(&pp->pr_lock);
1.20 thorpej 1290:
1291: pp->pr_hardlimit = n;
1292: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1293: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1294: pp->pr_hardlimit_warning_last.tv_sec = 0;
1295: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1296:
1297: /*
1.21 thorpej 1298: * In-line version of pool_sethiwat(), because we don't want to
1299: * release the lock.
1.20 thorpej 1300: */
1301: pp->pr_maxpages = (n == 0)
1302: ? 0
1303: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1304:
1.134 ad 1305: mutex_exit(&pp->pr_lock);
1.20 thorpej 1306: }
1.3 pk 1307:
1308: /*
1309: * Release all complete pages that have not been used recently.
1.184 rmind 1310: *
1.197 jym 1311: * Must not be called from interrupt context.
1.3 pk 1312: */
1.66 thorpej 1313: int
1.56 sommerfe 1314: pool_reclaim(struct pool *pp)
1.3 pk 1315: {
1316: struct pool_item_header *ph, *phnext;
1.61 chs 1317: struct pool_pagelist pq;
1.151 yamt 1318: uint32_t curtime;
1.134 ad 1319: bool klock;
1320: int rv;
1.3 pk 1321:
1.197 jym 1322: KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1.184 rmind 1323:
1.68 thorpej 1324: if (pp->pr_drain_hook != NULL) {
1325: /*
1326: * The drain hook must be called with the pool unlocked.
1327: */
1328: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1329: }
1330:
1.134 ad 1331: /*
1.157 ad 1332: * XXXSMP Because we do not want to cause non-MPSAFE code
1333: * to block.
1.134 ad 1334: */
1335: if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1336: pp->pr_ipl == IPL_SOFTSERIAL) {
1337: KERNEL_LOCK(1, NULL);
1338: klock = true;
1339: } else
1340: klock = false;
1341:
1342: /* Reclaim items from the pool's cache (if any). */
1343: if (pp->pr_cache != NULL)
1344: pool_cache_invalidate(pp->pr_cache);
1345:
1346: if (mutex_tryenter(&pp->pr_lock) == 0) {
1347: if (klock) {
1348: KERNEL_UNLOCK_ONE(NULL);
1349: }
1.66 thorpej 1350: return (0);
1.134 ad 1351: }
1.68 thorpej 1352:
1.88 chs 1353: LIST_INIT(&pq);
1.43 thorpej 1354:
1.151 yamt 1355: curtime = time_uptime;
1.21 thorpej 1356:
1.88 chs 1357: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1358: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1359:
1360: /* Check our minimum page claim */
1361: if (pp->pr_npages <= pp->pr_minpages)
1362: break;
1363:
1.88 chs 1364: KASSERT(ph->ph_nmissing == 0);
1.191 para 1365: if (curtime - ph->ph_time < pool_inactive_time)
1.88 chs 1366: continue;
1.21 thorpej 1367:
1.88 chs 1368: /*
1369: * If freeing this page would put us below
1370: * the low water mark, stop now.
1371: */
1372: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1373: pp->pr_minitems)
1374: break;
1.21 thorpej 1375:
1.88 chs 1376: pr_rmpage(pp, ph, &pq);
1.3 pk 1377: }
1378:
1.134 ad 1379: mutex_exit(&pp->pr_lock);
1380:
1381: if (LIST_EMPTY(&pq))
1382: rv = 0;
1383: else {
1384: pr_pagelist_free(pp, &pq);
1385: rv = 1;
1386: }
1387:
1388: if (klock) {
1389: KERNEL_UNLOCK_ONE(NULL);
1390: }
1.66 thorpej 1391:
1.134 ad 1392: return (rv);
1.3 pk 1393: }
1394:
1395: /*
1.197 jym 1396: * Drain pools, one at a time. The drained pool is returned within ppp.
1.131 ad 1397: *
1.134 ad 1398: * Note, must never be called from interrupt context.
1.3 pk 1399: */
1.197 jym 1400: bool
1401: pool_drain(struct pool **ppp)
1.3 pk 1402: {
1.197 jym 1403: bool reclaimed;
1.3 pk 1404: struct pool *pp;
1.134 ad 1405:
1.145 ad 1406: KASSERT(!TAILQ_EMPTY(&pool_head));
1.3 pk 1407:
1.61 chs 1408: pp = NULL;
1.134 ad 1409:
1410: /* Find next pool to drain, and add a reference. */
1411: mutex_enter(&pool_head_lock);
1412: do {
1413: if (drainpp == NULL) {
1.145 ad 1414: drainpp = TAILQ_FIRST(&pool_head);
1.134 ad 1415: }
1416: if (drainpp != NULL) {
1417: pp = drainpp;
1.145 ad 1418: drainpp = TAILQ_NEXT(pp, pr_poollist);
1.134 ad 1419: }
1420: /*
1421: * Skip completely idle pools. We depend on at least
1422: * one pool in the system being active.
1423: */
1424: } while (pp == NULL || pp->pr_npages == 0);
1425: pp->pr_refcnt++;
1426: mutex_exit(&pool_head_lock);
1427:
1428: /* Drain the cache (if any) and pool.. */
1.186 pooka 1429: reclaimed = pool_reclaim(pp);
1.134 ad 1430:
1431: /* Finally, unlock the pool. */
1432: mutex_enter(&pool_head_lock);
1433: pp->pr_refcnt--;
1434: cv_broadcast(&pool_busy);
1435: mutex_exit(&pool_head_lock);
1.186 pooka 1436:
1.197 jym 1437: if (ppp != NULL)
1438: *ppp = pp;
1439:
1.186 pooka 1440: return reclaimed;
1.3 pk 1441: }
1442:
1443: /*
1444: * Diagnostic helpers.
1445: */
1.21 thorpej 1446:
1.25 thorpej 1447: void
1.108 yamt 1448: pool_printall(const char *modif, void (*pr)(const char *, ...))
1449: {
1450: struct pool *pp;
1451:
1.145 ad 1452: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.108 yamt 1453: pool_printit(pp, modif, pr);
1454: }
1455: }
1456:
1457: void
1.42 thorpej 1458: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1459: {
1460:
1461: if (pp == NULL) {
1462: (*pr)("Must specify a pool to print.\n");
1463: return;
1464: }
1465:
1466: pool_print1(pp, modif, pr);
1467: }
1468:
1.21 thorpej 1469: static void
1.124 yamt 1470: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1471: void (*pr)(const char *, ...))
1.88 chs 1472: {
1473: struct pool_item_header *ph;
1.207 riastrad 1474: struct pool_item *pi __diagused;
1.88 chs 1475:
1476: LIST_FOREACH(ph, pl, ph_pagelist) {
1.151 yamt 1477: (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1478: ph->ph_page, ph->ph_nmissing, ph->ph_time);
1.88 chs 1479: #ifdef DIAGNOSTIC
1.97 yamt 1480: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1481: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1482: if (pi->pi_magic != PI_MAGIC) {
1483: (*pr)("\t\t\titem %p, magic 0x%x\n",
1484: pi, pi->pi_magic);
1485: }
1.88 chs 1486: }
1487: }
1488: #endif
1489: }
1490: }
1491:
1492: static void
1.42 thorpej 1493: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1494: {
1.25 thorpej 1495: struct pool_item_header *ph;
1.134 ad 1496: pool_cache_t pc;
1497: pcg_t *pcg;
1498: pool_cache_cpu_t *cc;
1499: uint64_t cpuhit, cpumiss;
1.44 thorpej 1500: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1501: char c;
1502:
1503: while ((c = *modif++) != '\0') {
1504: if (c == 'l')
1505: print_log = 1;
1506: if (c == 'p')
1507: print_pagelist = 1;
1.44 thorpej 1508: if (c == 'c')
1509: print_cache = 1;
1.25 thorpej 1510: }
1511:
1.134 ad 1512: if ((pc = pp->pr_cache) != NULL) {
1513: (*pr)("POOL CACHE");
1514: } else {
1515: (*pr)("POOL");
1516: }
1517:
1518: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1519: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1520: pp->pr_roflags);
1.66 thorpej 1521: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1522: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1523: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1524: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1525: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1526:
1.134 ad 1527: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1528: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1529: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1530: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1531:
1532: if (print_pagelist == 0)
1533: goto skip_pagelist;
1534:
1.88 chs 1535: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1536: (*pr)("\n\tempty page list:\n");
1.97 yamt 1537: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1538: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1539: (*pr)("\n\tfull page list:\n");
1.97 yamt 1540: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1541: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1542: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1543: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1544:
1.25 thorpej 1545: if (pp->pr_curpage == NULL)
1546: (*pr)("\tno current page\n");
1547: else
1548: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1549:
1550: skip_pagelist:
1551: if (print_log == 0)
1552: goto skip_log;
1553:
1554: (*pr)("\n");
1.3 pk 1555:
1.25 thorpej 1556: skip_log:
1.44 thorpej 1557:
1.102 chs 1558: #define PR_GROUPLIST(pcg) \
1559: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1.142 ad 1560: for (i = 0; i < pcg->pcg_size; i++) { \
1.102 chs 1561: if (pcg->pcg_objects[i].pcgo_pa != \
1562: POOL_PADDR_INVALID) { \
1563: (*pr)("\t\t\t%p, 0x%llx\n", \
1564: pcg->pcg_objects[i].pcgo_va, \
1565: (unsigned long long) \
1566: pcg->pcg_objects[i].pcgo_pa); \
1567: } else { \
1568: (*pr)("\t\t\t%p\n", \
1569: pcg->pcg_objects[i].pcgo_va); \
1570: } \
1571: }
1572:
1.134 ad 1573: if (pc != NULL) {
1574: cpuhit = 0;
1575: cpumiss = 0;
1.183 ad 1576: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.134 ad 1577: if ((cc = pc->pc_cpus[i]) == NULL)
1578: continue;
1579: cpuhit += cc->cc_hits;
1580: cpumiss += cc->cc_misses;
1581: }
1582: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1583: (*pr)("\tcache layer hits %llu misses %llu\n",
1584: pc->pc_hits, pc->pc_misses);
1585: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1586: pc->pc_hits + pc->pc_misses - pc->pc_contended,
1587: pc->pc_contended);
1588: (*pr)("\tcache layer empty groups %u full groups %u\n",
1589: pc->pc_nempty, pc->pc_nfull);
1590: if (print_cache) {
1591: (*pr)("\tfull cache groups:\n");
1592: for (pcg = pc->pc_fullgroups; pcg != NULL;
1593: pcg = pcg->pcg_next) {
1594: PR_GROUPLIST(pcg);
1595: }
1596: (*pr)("\tempty cache groups:\n");
1597: for (pcg = pc->pc_emptygroups; pcg != NULL;
1598: pcg = pcg->pcg_next) {
1599: PR_GROUPLIST(pcg);
1600: }
1.103 chs 1601: }
1.44 thorpej 1602: }
1.102 chs 1603: #undef PR_GROUPLIST
1.88 chs 1604: }
1605:
1606: static int
1607: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1608: {
1609: struct pool_item *pi;
1.128 christos 1610: void *page;
1.88 chs 1611: int n;
1612:
1.121 yamt 1613: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1614: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1615: if (page != ph->ph_page &&
1616: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1617: if (label != NULL)
1618: printf("%s: ", label);
1619: printf("pool(%p:%s): page inconsistency: page %p;"
1620: " at page head addr %p (p %p)\n", pp,
1621: pp->pr_wchan, ph->ph_page,
1622: ph, page);
1623: return 1;
1624: }
1.88 chs 1625: }
1.3 pk 1626:
1.97 yamt 1627: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1628: return 0;
1629:
1.102 chs 1630: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1631: pi != NULL;
1.102 chs 1632: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1633:
1634: #ifdef DIAGNOSTIC
1635: if (pi->pi_magic != PI_MAGIC) {
1636: if (label != NULL)
1637: printf("%s: ", label);
1638: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1639: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1640: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1641: n, pi);
1.88 chs 1642: panic("pool");
1643: }
1644: #endif
1.121 yamt 1645: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1646: continue;
1647: }
1.128 christos 1648: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1649: if (page == ph->ph_page)
1650: continue;
1651:
1652: if (label != NULL)
1653: printf("%s: ", label);
1654: printf("pool(%p:%s): page inconsistency: page %p;"
1655: " item ordinal %d; addr %p (p %p)\n", pp,
1656: pp->pr_wchan, ph->ph_page,
1657: n, pi, page);
1658: return 1;
1659: }
1660: return 0;
1.3 pk 1661: }
1662:
1.88 chs 1663:
1.3 pk 1664: int
1.42 thorpej 1665: pool_chk(struct pool *pp, const char *label)
1.3 pk 1666: {
1667: struct pool_item_header *ph;
1668: int r = 0;
1669:
1.134 ad 1670: mutex_enter(&pp->pr_lock);
1.88 chs 1671: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1672: r = pool_chk_page(pp, label, ph);
1673: if (r) {
1674: goto out;
1675: }
1676: }
1677: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1678: r = pool_chk_page(pp, label, ph);
1679: if (r) {
1.3 pk 1680: goto out;
1681: }
1.88 chs 1682: }
1683: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1684: r = pool_chk_page(pp, label, ph);
1685: if (r) {
1.3 pk 1686: goto out;
1687: }
1688: }
1.88 chs 1689:
1.3 pk 1690: out:
1.134 ad 1691: mutex_exit(&pp->pr_lock);
1.3 pk 1692: return (r);
1.43 thorpej 1693: }
1694:
1695: /*
1696: * pool_cache_init:
1697: *
1698: * Initialize a pool cache.
1.134 ad 1699: */
1700: pool_cache_t
1701: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
1702: const char *wchan, struct pool_allocator *palloc, int ipl,
1703: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
1704: {
1705: pool_cache_t pc;
1706:
1707: pc = pool_get(&cache_pool, PR_WAITOK);
1708: if (pc == NULL)
1709: return NULL;
1710:
1711: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
1712: palloc, ipl, ctor, dtor, arg);
1713:
1714: return pc;
1715: }
1716:
1717: /*
1718: * pool_cache_bootstrap:
1.43 thorpej 1719: *
1.134 ad 1720: * Kernel-private version of pool_cache_init(). The caller
1721: * provides initial storage.
1.43 thorpej 1722: */
1723: void
1.134 ad 1724: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
1725: u_int align_offset, u_int flags, const char *wchan,
1726: struct pool_allocator *palloc, int ipl,
1727: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 1728: void *arg)
1729: {
1.134 ad 1730: CPU_INFO_ITERATOR cii;
1.145 ad 1731: pool_cache_t pc1;
1.134 ad 1732: struct cpu_info *ci;
1733: struct pool *pp;
1734:
1735: pp = &pc->pc_pool;
1.208 chs 1736: if (palloc == NULL && ipl == IPL_NONE) {
1737: if (size > PAGE_SIZE) {
1738: int bigidx = pool_bigidx(size);
1739:
1740: palloc = &pool_allocator_big[bigidx];
1741: } else
1742: palloc = &pool_allocator_nointr;
1743: }
1.134 ad 1744: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.157 ad 1745: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1.43 thorpej 1746:
1.134 ad 1747: if (ctor == NULL) {
1748: ctor = (int (*)(void *, void *, int))nullop;
1749: }
1750: if (dtor == NULL) {
1751: dtor = (void (*)(void *, void *))nullop;
1752: }
1.43 thorpej 1753:
1.134 ad 1754: pc->pc_emptygroups = NULL;
1755: pc->pc_fullgroups = NULL;
1756: pc->pc_partgroups = NULL;
1.43 thorpej 1757: pc->pc_ctor = ctor;
1758: pc->pc_dtor = dtor;
1759: pc->pc_arg = arg;
1.134 ad 1760: pc->pc_hits = 0;
1.48 thorpej 1761: pc->pc_misses = 0;
1.134 ad 1762: pc->pc_nempty = 0;
1763: pc->pc_npart = 0;
1764: pc->pc_nfull = 0;
1765: pc->pc_contended = 0;
1766: pc->pc_refcnt = 0;
1.136 yamt 1767: pc->pc_freecheck = NULL;
1.134 ad 1768:
1.142 ad 1769: if ((flags & PR_LARGECACHE) != 0) {
1770: pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
1.163 ad 1771: pc->pc_pcgpool = &pcg_large_pool;
1.142 ad 1772: } else {
1773: pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
1.163 ad 1774: pc->pc_pcgpool = &pcg_normal_pool;
1.142 ad 1775: }
1776:
1.134 ad 1777: /* Allocate per-CPU caches. */
1778: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
1779: pc->pc_ncpu = 0;
1.139 ad 1780: if (ncpu < 2) {
1.137 ad 1781: /* XXX For sparc: boot CPU is not attached yet. */
1782: pool_cache_cpu_init1(curcpu(), pc);
1783: } else {
1784: for (CPU_INFO_FOREACH(cii, ci)) {
1785: pool_cache_cpu_init1(ci, pc);
1786: }
1.134 ad 1787: }
1.145 ad 1788:
1789: /* Add to list of all pools. */
1790: if (__predict_true(!cold))
1.134 ad 1791: mutex_enter(&pool_head_lock);
1.145 ad 1792: TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
1793: if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
1794: break;
1795: }
1796: if (pc1 == NULL)
1797: TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
1798: else
1799: TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
1800: if (__predict_true(!cold))
1.134 ad 1801: mutex_exit(&pool_head_lock);
1.145 ad 1802:
1803: membar_sync();
1804: pp->pr_cache = pc;
1.43 thorpej 1805: }
1806:
1807: /*
1808: * pool_cache_destroy:
1809: *
1810: * Destroy a pool cache.
1811: */
1812: void
1.134 ad 1813: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 1814: {
1.191 para 1815:
1816: pool_cache_bootstrap_destroy(pc);
1817: pool_put(&cache_pool, pc);
1818: }
1819:
1820: /*
1821: * pool_cache_bootstrap_destroy:
1822: *
1823: * Destroy a pool cache.
1824: */
1825: void
1826: pool_cache_bootstrap_destroy(pool_cache_t pc)
1827: {
1.134 ad 1828: struct pool *pp = &pc->pc_pool;
1.175 jym 1829: u_int i;
1.134 ad 1830:
1831: /* Remove it from the global list. */
1832: mutex_enter(&pool_head_lock);
1833: while (pc->pc_refcnt != 0)
1834: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 1835: TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1.134 ad 1836: mutex_exit(&pool_head_lock);
1.43 thorpej 1837:
1838: /* First, invalidate the entire cache. */
1839: pool_cache_invalidate(pc);
1840:
1.134 ad 1841: /* Disassociate it from the pool. */
1842: mutex_enter(&pp->pr_lock);
1843: pp->pr_cache = NULL;
1844: mutex_exit(&pp->pr_lock);
1845:
1846: /* Destroy per-CPU data */
1.183 ad 1847: for (i = 0; i < __arraycount(pc->pc_cpus); i++)
1.175 jym 1848: pool_cache_invalidate_cpu(pc, i);
1.134 ad 1849:
1850: /* Finally, destroy it. */
1851: mutex_destroy(&pc->pc_lock);
1852: pool_destroy(pp);
1853: }
1854:
1855: /*
1856: * pool_cache_cpu_init1:
1857: *
1858: * Called for each pool_cache whenever a new CPU is attached.
1859: */
1860: static void
1861: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
1862: {
1863: pool_cache_cpu_t *cc;
1.137 ad 1864: int index;
1.134 ad 1865:
1.137 ad 1866: index = ci->ci_index;
1867:
1.183 ad 1868: KASSERT(index < __arraycount(pc->pc_cpus));
1.134 ad 1869:
1.137 ad 1870: if ((cc = pc->pc_cpus[index]) != NULL) {
1871: KASSERT(cc->cc_cpuindex == index);
1.134 ad 1872: return;
1873: }
1874:
1875: /*
1876: * The first CPU is 'free'. This needs to be the case for
1877: * bootstrap - we may not be able to allocate yet.
1878: */
1879: if (pc->pc_ncpu == 0) {
1880: cc = &pc->pc_cpu0;
1881: pc->pc_ncpu = 1;
1882: } else {
1883: mutex_enter(&pc->pc_lock);
1884: pc->pc_ncpu++;
1885: mutex_exit(&pc->pc_lock);
1886: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
1887: }
1888:
1889: cc->cc_ipl = pc->pc_pool.pr_ipl;
1890: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
1891: cc->cc_cache = pc;
1.137 ad 1892: cc->cc_cpuindex = index;
1.134 ad 1893: cc->cc_hits = 0;
1894: cc->cc_misses = 0;
1.169 yamt 1895: cc->cc_current = __UNCONST(&pcg_dummy);
1896: cc->cc_previous = __UNCONST(&pcg_dummy);
1.134 ad 1897:
1.137 ad 1898: pc->pc_cpus[index] = cc;
1.43 thorpej 1899: }
1900:
1.134 ad 1901: /*
1902: * pool_cache_cpu_init:
1903: *
1904: * Called whenever a new CPU is attached.
1905: */
1906: void
1907: pool_cache_cpu_init(struct cpu_info *ci)
1.43 thorpej 1908: {
1.134 ad 1909: pool_cache_t pc;
1910:
1911: mutex_enter(&pool_head_lock);
1.145 ad 1912: TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1.134 ad 1913: pc->pc_refcnt++;
1914: mutex_exit(&pool_head_lock);
1.43 thorpej 1915:
1.134 ad 1916: pool_cache_cpu_init1(ci, pc);
1.43 thorpej 1917:
1.134 ad 1918: mutex_enter(&pool_head_lock);
1919: pc->pc_refcnt--;
1920: cv_broadcast(&pool_busy);
1921: }
1922: mutex_exit(&pool_head_lock);
1.43 thorpej 1923: }
1924:
1.134 ad 1925: /*
1926: * pool_cache_reclaim:
1927: *
1928: * Reclaim memory from a pool cache.
1929: */
1930: bool
1931: pool_cache_reclaim(pool_cache_t pc)
1.43 thorpej 1932: {
1933:
1.134 ad 1934: return pool_reclaim(&pc->pc_pool);
1935: }
1.43 thorpej 1936:
1.136 yamt 1937: static void
1938: pool_cache_destruct_object1(pool_cache_t pc, void *object)
1939: {
1940:
1941: (*pc->pc_dtor)(pc->pc_arg, object);
1942: pool_put(&pc->pc_pool, object);
1943: }
1944:
1.134 ad 1945: /*
1946: * pool_cache_destruct_object:
1947: *
1948: * Force destruction of an object and its release back into
1949: * the pool.
1950: */
1951: void
1952: pool_cache_destruct_object(pool_cache_t pc, void *object)
1953: {
1954:
1.136 yamt 1955: FREECHECK_IN(&pc->pc_freecheck, object);
1956:
1957: pool_cache_destruct_object1(pc, object);
1.43 thorpej 1958: }
1959:
1.134 ad 1960: /*
1961: * pool_cache_invalidate_groups:
1962: *
1963: * Invalidate a chain of groups and destruct all objects.
1964: */
1.102 chs 1965: static void
1.134 ad 1966: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1.102 chs 1967: {
1.134 ad 1968: void *object;
1969: pcg_t *next;
1970: int i;
1971:
1972: for (; pcg != NULL; pcg = next) {
1973: next = pcg->pcg_next;
1974:
1975: for (i = 0; i < pcg->pcg_avail; i++) {
1976: object = pcg->pcg_objects[i].pcgo_va;
1.136 yamt 1977: pool_cache_destruct_object1(pc, object);
1.134 ad 1978: }
1.102 chs 1979:
1.142 ad 1980: if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
1981: pool_put(&pcg_large_pool, pcg);
1982: } else {
1983: KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
1984: pool_put(&pcg_normal_pool, pcg);
1985: }
1.102 chs 1986: }
1987: }
1988:
1.43 thorpej 1989: /*
1.134 ad 1990: * pool_cache_invalidate:
1.43 thorpej 1991: *
1.134 ad 1992: * Invalidate a pool cache (destruct and release all of the
1993: * cached objects). Does not reclaim objects from the pool.
1.176 thorpej 1994: *
1995: * Note: For pool caches that provide constructed objects, there
1996: * is an assumption that another level of synchronization is occurring
1997: * between the input to the constructor and the cache invalidation.
1.196 jym 1998: *
1999: * Invalidation is a costly process and should not be called from
2000: * interrupt context.
1.43 thorpej 2001: */
1.134 ad 2002: void
2003: pool_cache_invalidate(pool_cache_t pc)
2004: {
1.196 jym 2005: uint64_t where;
1.134 ad 2006: pcg_t *full, *empty, *part;
1.196 jym 2007:
2008: KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1.176 thorpej 2009:
1.177 jym 2010: if (ncpu < 2 || !mp_online) {
1.176 thorpej 2011: /*
2012: * We might be called early enough in the boot process
2013: * for the CPU data structures to not be fully initialized.
1.196 jym 2014: * In this case, transfer the content of the local CPU's
2015: * cache back into global cache as only this CPU is currently
2016: * running.
1.176 thorpej 2017: */
1.196 jym 2018: pool_cache_transfer(pc);
1.176 thorpej 2019: } else {
2020: /*
1.196 jym 2021: * Signal all CPUs that they must transfer their local
2022: * cache back to the global pool then wait for the xcall to
2023: * complete.
1.176 thorpej 2024: */
1.196 jym 2025: where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
2026: pc, NULL);
1.176 thorpej 2027: xc_wait(where);
2028: }
1.196 jym 2029:
2030: /* Empty pool caches, then invalidate objects */
1.134 ad 2031: mutex_enter(&pc->pc_lock);
2032: full = pc->pc_fullgroups;
2033: empty = pc->pc_emptygroups;
2034: part = pc->pc_partgroups;
2035: pc->pc_fullgroups = NULL;
2036: pc->pc_emptygroups = NULL;
2037: pc->pc_partgroups = NULL;
2038: pc->pc_nfull = 0;
2039: pc->pc_nempty = 0;
2040: pc->pc_npart = 0;
2041: mutex_exit(&pc->pc_lock);
2042:
2043: pool_cache_invalidate_groups(pc, full);
2044: pool_cache_invalidate_groups(pc, empty);
2045: pool_cache_invalidate_groups(pc, part);
2046: }
2047:
1.175 jym 2048: /*
2049: * pool_cache_invalidate_cpu:
2050: *
2051: * Invalidate all CPU-bound cached objects in pool cache, the CPU being
2052: * identified by its associated index.
2053: * It is caller's responsibility to ensure that no operation is
2054: * taking place on this pool cache while doing this invalidation.
2055: * WARNING: as no inter-CPU locking is enforced, trying to invalidate
2056: * pool cached objects from a CPU different from the one currently running
2057: * may result in an undefined behaviour.
2058: */
2059: static void
2060: pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2061: {
2062: pool_cache_cpu_t *cc;
2063: pcg_t *pcg;
2064:
2065: if ((cc = pc->pc_cpus[index]) == NULL)
2066: return;
2067:
2068: if ((pcg = cc->cc_current) != &pcg_dummy) {
2069: pcg->pcg_next = NULL;
2070: pool_cache_invalidate_groups(pc, pcg);
2071: }
2072: if ((pcg = cc->cc_previous) != &pcg_dummy) {
2073: pcg->pcg_next = NULL;
2074: pool_cache_invalidate_groups(pc, pcg);
2075: }
2076: if (cc != &pc->pc_cpu0)
2077: pool_put(&cache_cpu_pool, cc);
2078:
2079: }
2080:
1.134 ad 2081: void
2082: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2083: {
2084:
2085: pool_set_drain_hook(&pc->pc_pool, fn, arg);
2086: }
2087:
2088: void
2089: pool_cache_setlowat(pool_cache_t pc, int n)
2090: {
2091:
2092: pool_setlowat(&pc->pc_pool, n);
2093: }
2094:
2095: void
2096: pool_cache_sethiwat(pool_cache_t pc, int n)
2097: {
2098:
2099: pool_sethiwat(&pc->pc_pool, n);
2100: }
2101:
2102: void
2103: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2104: {
2105:
2106: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2107: }
2108:
1.162 ad 2109: static bool __noinline
2110: pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
1.134 ad 2111: paddr_t *pap, int flags)
1.43 thorpej 2112: {
1.134 ad 2113: pcg_t *pcg, *cur;
2114: uint64_t ncsw;
2115: pool_cache_t pc;
1.43 thorpej 2116: void *object;
1.58 thorpej 2117:
1.168 yamt 2118: KASSERT(cc->cc_current->pcg_avail == 0);
2119: KASSERT(cc->cc_previous->pcg_avail == 0);
2120:
1.134 ad 2121: pc = cc->cc_cache;
2122: cc->cc_misses++;
1.43 thorpej 2123:
1.134 ad 2124: /*
2125: * Nothing was available locally. Try and grab a group
2126: * from the cache.
2127: */
1.162 ad 2128: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.134 ad 2129: ncsw = curlwp->l_ncsw;
2130: mutex_enter(&pc->pc_lock);
2131: pc->pc_contended++;
1.43 thorpej 2132:
1.134 ad 2133: /*
2134: * If we context switched while locking, then
2135: * our view of the per-CPU data is invalid:
2136: * retry.
2137: */
2138: if (curlwp->l_ncsw != ncsw) {
2139: mutex_exit(&pc->pc_lock);
1.162 ad 2140: return true;
1.43 thorpej 2141: }
1.102 chs 2142: }
1.43 thorpej 2143:
1.162 ad 2144: if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
1.43 thorpej 2145: /*
1.134 ad 2146: * If there's a full group, release our empty
2147: * group back to the cache. Install the full
2148: * group as cc_current and return.
1.43 thorpej 2149: */
1.162 ad 2150: if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
1.134 ad 2151: KASSERT(cur->pcg_avail == 0);
2152: cur->pcg_next = pc->pc_emptygroups;
2153: pc->pc_emptygroups = cur;
2154: pc->pc_nempty++;
1.87 thorpej 2155: }
1.142 ad 2156: KASSERT(pcg->pcg_avail == pcg->pcg_size);
1.134 ad 2157: cc->cc_current = pcg;
2158: pc->pc_fullgroups = pcg->pcg_next;
2159: pc->pc_hits++;
2160: pc->pc_nfull--;
2161: mutex_exit(&pc->pc_lock);
1.162 ad 2162: return true;
1.134 ad 2163: }
2164:
2165: /*
2166: * Nothing available locally or in cache. Take the slow
2167: * path: fetch a new object from the pool and construct
2168: * it.
2169: */
2170: pc->pc_misses++;
2171: mutex_exit(&pc->pc_lock);
1.162 ad 2172: splx(s);
1.134 ad 2173:
2174: object = pool_get(&pc->pc_pool, flags);
2175: *objectp = object;
1.162 ad 2176: if (__predict_false(object == NULL))
2177: return false;
1.125 ad 2178:
1.162 ad 2179: if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
1.134 ad 2180: pool_put(&pc->pc_pool, object);
2181: *objectp = NULL;
1.162 ad 2182: return false;
1.43 thorpej 2183: }
2184:
1.134 ad 2185: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2186: (pc->pc_pool.pr_align - 1)) == 0);
1.43 thorpej 2187:
1.134 ad 2188: if (pap != NULL) {
2189: #ifdef POOL_VTOPHYS
2190: *pap = POOL_VTOPHYS(object);
2191: #else
2192: *pap = POOL_PADDR_INVALID;
2193: #endif
1.102 chs 2194: }
1.43 thorpej 2195:
1.125 ad 2196: FREECHECK_OUT(&pc->pc_freecheck, object);
1.204 maxv 2197: pool_redzone_fill(&pc->pc_pool, object);
1.162 ad 2198: return false;
1.43 thorpej 2199: }
2200:
2201: /*
1.134 ad 2202: * pool_cache_get{,_paddr}:
1.43 thorpej 2203: *
1.134 ad 2204: * Get an object from a pool cache (optionally returning
2205: * the physical address of the object).
1.43 thorpej 2206: */
1.134 ad 2207: void *
2208: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.43 thorpej 2209: {
1.134 ad 2210: pool_cache_cpu_t *cc;
2211: pcg_t *pcg;
2212: void *object;
1.60 thorpej 2213: int s;
1.43 thorpej 2214:
1.184 rmind 2215: KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
1.185 rmind 2216: (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
1.190 jym 2217: "pool '%s' is IPL_NONE, but called from interrupt context\n",
2218: pc->pc_pool.pr_wchan);
1.184 rmind 2219:
1.155 ad 2220: if (flags & PR_WAITOK) {
1.154 yamt 2221: ASSERT_SLEEPABLE();
1.155 ad 2222: }
1.125 ad 2223:
1.162 ad 2224: /* Lock out interrupts and disable preemption. */
2225: s = splvm();
1.165 yamt 2226: while (/* CONSTCOND */ true) {
1.134 ad 2227: /* Try and allocate an object from the current group. */
1.162 ad 2228: cc = pc->pc_cpus[curcpu()->ci_index];
2229: KASSERT(cc->cc_cache == pc);
1.134 ad 2230: pcg = cc->cc_current;
1.162 ad 2231: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2232: object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
1.162 ad 2233: if (__predict_false(pap != NULL))
1.134 ad 2234: *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
1.148 yamt 2235: #if defined(DIAGNOSTIC)
1.134 ad 2236: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
1.163 ad 2237: KASSERT(pcg->pcg_avail < pcg->pcg_size);
1.134 ad 2238: KASSERT(object != NULL);
1.163 ad 2239: #endif
1.134 ad 2240: cc->cc_hits++;
1.162 ad 2241: splx(s);
1.134 ad 2242: FREECHECK_OUT(&pc->pc_freecheck, object);
1.204 maxv 2243: pool_redzone_fill(&pc->pc_pool, object);
1.134 ad 2244: return object;
1.43 thorpej 2245: }
2246:
2247: /*
1.134 ad 2248: * That failed. If the previous group isn't empty, swap
2249: * it with the current group and allocate from there.
1.43 thorpej 2250: */
1.134 ad 2251: pcg = cc->cc_previous;
1.162 ad 2252: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2253: cc->cc_previous = cc->cc_current;
2254: cc->cc_current = pcg;
2255: continue;
1.43 thorpej 2256: }
2257:
1.134 ad 2258: /*
2259: * Can't allocate from either group: try the slow path.
2260: * If get_slow() allocated an object for us, or if
1.162 ad 2261: * no more objects are available, it will return false.
1.134 ad 2262: * Otherwise, we need to retry.
2263: */
1.165 yamt 2264: if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2265: break;
2266: }
1.43 thorpej 2267:
1.134 ad 2268: return object;
1.51 thorpej 2269: }
2270:
1.162 ad 2271: static bool __noinline
2272: pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
1.51 thorpej 2273: {
1.200 pooka 2274: struct lwp *l = curlwp;
1.163 ad 2275: pcg_t *pcg, *cur;
1.134 ad 2276: uint64_t ncsw;
2277: pool_cache_t pc;
1.51 thorpej 2278:
1.168 yamt 2279: KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2280: KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2281:
1.134 ad 2282: pc = cc->cc_cache;
1.171 ad 2283: pcg = NULL;
1.134 ad 2284: cc->cc_misses++;
1.200 pooka 2285: ncsw = l->l_ncsw;
1.43 thorpej 2286:
1.171 ad 2287: /*
2288: * If there are no empty groups in the cache then allocate one
2289: * while still unlocked.
2290: */
2291: if (__predict_false(pc->pc_emptygroups == NULL)) {
2292: if (__predict_true(!pool_cache_disable)) {
2293: pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2294: }
1.200 pooka 2295: /*
2296: * If pool_get() blocked, then our view of
2297: * the per-CPU data is invalid: retry.
2298: */
2299: if (__predict_false(l->l_ncsw != ncsw)) {
2300: if (pcg != NULL) {
2301: pool_put(pc->pc_pcgpool, pcg);
2302: }
2303: return true;
2304: }
1.171 ad 2305: if (__predict_true(pcg != NULL)) {
2306: pcg->pcg_avail = 0;
2307: pcg->pcg_size = pc->pc_pcgsize;
2308: }
2309: }
2310:
1.162 ad 2311: /* Lock the cache. */
2312: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.134 ad 2313: mutex_enter(&pc->pc_lock);
2314: pc->pc_contended++;
1.162 ad 2315:
1.163 ad 2316: /*
2317: * If we context switched while locking, then our view of
2318: * the per-CPU data is invalid: retry.
2319: */
1.200 pooka 2320: if (__predict_false(l->l_ncsw != ncsw)) {
1.163 ad 2321: mutex_exit(&pc->pc_lock);
1.171 ad 2322: if (pcg != NULL) {
2323: pool_put(pc->pc_pcgpool, pcg);
2324: }
1.163 ad 2325: return true;
2326: }
1.162 ad 2327: }
1.102 chs 2328:
1.163 ad 2329: /* If there are no empty groups in the cache then allocate one. */
1.171 ad 2330: if (pcg == NULL && pc->pc_emptygroups != NULL) {
2331: pcg = pc->pc_emptygroups;
1.163 ad 2332: pc->pc_emptygroups = pcg->pcg_next;
2333: pc->pc_nempty--;
1.134 ad 2334: }
1.130 ad 2335:
1.162 ad 2336: /*
2337: * If there's a empty group, release our full group back
2338: * to the cache. Install the empty group to the local CPU
2339: * and return.
2340: */
1.163 ad 2341: if (pcg != NULL) {
1.134 ad 2342: KASSERT(pcg->pcg_avail == 0);
1.162 ad 2343: if (__predict_false(cc->cc_previous == &pcg_dummy)) {
1.146 ad 2344: cc->cc_previous = pcg;
2345: } else {
1.162 ad 2346: cur = cc->cc_current;
2347: if (__predict_true(cur != &pcg_dummy)) {
1.163 ad 2348: KASSERT(cur->pcg_avail == cur->pcg_size);
1.146 ad 2349: cur->pcg_next = pc->pc_fullgroups;
2350: pc->pc_fullgroups = cur;
2351: pc->pc_nfull++;
2352: }
2353: cc->cc_current = pcg;
2354: }
1.163 ad 2355: pc->pc_hits++;
1.134 ad 2356: mutex_exit(&pc->pc_lock);
1.162 ad 2357: return true;
1.102 chs 2358: }
1.105 christos 2359:
1.134 ad 2360: /*
1.162 ad 2361: * Nothing available locally or in cache, and we didn't
2362: * allocate an empty group. Take the slow path and destroy
2363: * the object here and now.
1.134 ad 2364: */
2365: pc->pc_misses++;
2366: mutex_exit(&pc->pc_lock);
1.162 ad 2367: splx(s);
2368: pool_cache_destruct_object(pc, object);
1.105 christos 2369:
1.162 ad 2370: return false;
1.134 ad 2371: }
1.102 chs 2372:
1.43 thorpej 2373: /*
1.134 ad 2374: * pool_cache_put{,_paddr}:
1.43 thorpej 2375: *
1.134 ad 2376: * Put an object back to the pool cache (optionally caching the
2377: * physical address of the object).
1.43 thorpej 2378: */
1.101 thorpej 2379: void
1.134 ad 2380: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2381: {
1.134 ad 2382: pool_cache_cpu_t *cc;
2383: pcg_t *pcg;
2384: int s;
1.101 thorpej 2385:
1.172 yamt 2386: KASSERT(object != NULL);
1.204 maxv 2387: pool_redzone_check(&pc->pc_pool, object);
1.134 ad 2388: FREECHECK_IN(&pc->pc_freecheck, object);
1.101 thorpej 2389:
1.162 ad 2390: /* Lock out interrupts and disable preemption. */
2391: s = splvm();
1.165 yamt 2392: while (/* CONSTCOND */ true) {
1.134 ad 2393: /* If the current group isn't full, release it there. */
1.162 ad 2394: cc = pc->pc_cpus[curcpu()->ci_index];
2395: KASSERT(cc->cc_cache == pc);
1.134 ad 2396: pcg = cc->cc_current;
1.162 ad 2397: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2398: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2399: pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2400: pcg->pcg_avail++;
2401: cc->cc_hits++;
1.162 ad 2402: splx(s);
1.134 ad 2403: return;
2404: }
1.43 thorpej 2405:
1.134 ad 2406: /*
1.162 ad 2407: * That failed. If the previous group isn't full, swap
1.134 ad 2408: * it with the current group and try again.
2409: */
2410: pcg = cc->cc_previous;
1.162 ad 2411: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2412: cc->cc_previous = cc->cc_current;
2413: cc->cc_current = pcg;
2414: continue;
2415: }
1.43 thorpej 2416:
1.134 ad 2417: /*
2418: * Can't free to either group: try the slow path.
2419: * If put_slow() releases the object for us, it
1.162 ad 2420: * will return false. Otherwise we need to retry.
1.134 ad 2421: */
1.165 yamt 2422: if (!pool_cache_put_slow(cc, s, object))
2423: break;
2424: }
1.43 thorpej 2425: }
2426:
2427: /*
1.196 jym 2428: * pool_cache_transfer:
1.43 thorpej 2429: *
1.134 ad 2430: * Transfer objects from the per-CPU cache to the global cache.
2431: * Run within a cross-call thread.
1.43 thorpej 2432: */
2433: static void
1.196 jym 2434: pool_cache_transfer(pool_cache_t pc)
1.43 thorpej 2435: {
1.134 ad 2436: pool_cache_cpu_t *cc;
2437: pcg_t *prev, *cur, **list;
1.162 ad 2438: int s;
1.134 ad 2439:
1.162 ad 2440: s = splvm();
2441: mutex_enter(&pc->pc_lock);
2442: cc = pc->pc_cpus[curcpu()->ci_index];
1.134 ad 2443: cur = cc->cc_current;
1.169 yamt 2444: cc->cc_current = __UNCONST(&pcg_dummy);
1.134 ad 2445: prev = cc->cc_previous;
1.169 yamt 2446: cc->cc_previous = __UNCONST(&pcg_dummy);
1.162 ad 2447: if (cur != &pcg_dummy) {
1.142 ad 2448: if (cur->pcg_avail == cur->pcg_size) {
1.134 ad 2449: list = &pc->pc_fullgroups;
2450: pc->pc_nfull++;
2451: } else if (cur->pcg_avail == 0) {
2452: list = &pc->pc_emptygroups;
2453: pc->pc_nempty++;
2454: } else {
2455: list = &pc->pc_partgroups;
2456: pc->pc_npart++;
2457: }
2458: cur->pcg_next = *list;
2459: *list = cur;
2460: }
1.162 ad 2461: if (prev != &pcg_dummy) {
1.142 ad 2462: if (prev->pcg_avail == prev->pcg_size) {
1.134 ad 2463: list = &pc->pc_fullgroups;
2464: pc->pc_nfull++;
2465: } else if (prev->pcg_avail == 0) {
2466: list = &pc->pc_emptygroups;
2467: pc->pc_nempty++;
2468: } else {
2469: list = &pc->pc_partgroups;
2470: pc->pc_npart++;
2471: }
2472: prev->pcg_next = *list;
2473: *list = prev;
2474: }
2475: mutex_exit(&pc->pc_lock);
2476: splx(s);
1.3 pk 2477: }
1.66 thorpej 2478:
2479: /*
2480: * Pool backend allocators.
2481: *
2482: * Each pool has a backend allocator that handles allocation, deallocation,
2483: * and any additional draining that might be needed.
2484: *
2485: * We provide two standard allocators:
2486: *
2487: * pool_allocator_kmem - the default when no allocator is specified
2488: *
2489: * pool_allocator_nointr - used for pools that will not be accessed
2490: * in interrupt context.
2491: */
2492: void *pool_page_alloc(struct pool *, int);
2493: void pool_page_free(struct pool *, void *);
2494:
1.112 bjh21 2495: #ifdef POOL_SUBPAGE
2496: struct pool_allocator pool_allocator_kmem_fullpage = {
1.192 rmind 2497: .pa_alloc = pool_page_alloc,
2498: .pa_free = pool_page_free,
2499: .pa_pagesz = 0
1.112 bjh21 2500: };
2501: #else
1.66 thorpej 2502: struct pool_allocator pool_allocator_kmem = {
1.191 para 2503: .pa_alloc = pool_page_alloc,
2504: .pa_free = pool_page_free,
2505: .pa_pagesz = 0
1.66 thorpej 2506: };
1.112 bjh21 2507: #endif
1.66 thorpej 2508:
1.112 bjh21 2509: #ifdef POOL_SUBPAGE
2510: struct pool_allocator pool_allocator_nointr_fullpage = {
1.194 para 2511: .pa_alloc = pool_page_alloc,
2512: .pa_free = pool_page_free,
1.192 rmind 2513: .pa_pagesz = 0
1.112 bjh21 2514: };
2515: #else
1.66 thorpej 2516: struct pool_allocator pool_allocator_nointr = {
1.191 para 2517: .pa_alloc = pool_page_alloc,
2518: .pa_free = pool_page_free,
2519: .pa_pagesz = 0
1.66 thorpej 2520: };
1.112 bjh21 2521: #endif
1.66 thorpej 2522:
2523: #ifdef POOL_SUBPAGE
2524: void *pool_subpage_alloc(struct pool *, int);
2525: void pool_subpage_free(struct pool *, void *);
2526:
1.112 bjh21 2527: struct pool_allocator pool_allocator_kmem = {
1.193 he 2528: .pa_alloc = pool_subpage_alloc,
2529: .pa_free = pool_subpage_free,
2530: .pa_pagesz = POOL_SUBPAGE
1.112 bjh21 2531: };
2532:
2533: struct pool_allocator pool_allocator_nointr = {
1.192 rmind 2534: .pa_alloc = pool_subpage_alloc,
2535: .pa_free = pool_subpage_free,
2536: .pa_pagesz = POOL_SUBPAGE
1.66 thorpej 2537: };
2538: #endif /* POOL_SUBPAGE */
2539:
1.208 chs 2540: struct pool_allocator pool_allocator_big[] = {
2541: {
2542: .pa_alloc = pool_page_alloc,
2543: .pa_free = pool_page_free,
2544: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 0),
2545: },
2546: {
2547: .pa_alloc = pool_page_alloc,
2548: .pa_free = pool_page_free,
2549: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 1),
2550: },
2551: {
2552: .pa_alloc = pool_page_alloc,
2553: .pa_free = pool_page_free,
2554: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 2),
2555: },
2556: {
2557: .pa_alloc = pool_page_alloc,
2558: .pa_free = pool_page_free,
2559: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 3),
2560: },
2561: {
2562: .pa_alloc = pool_page_alloc,
2563: .pa_free = pool_page_free,
2564: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 4),
2565: },
2566: {
2567: .pa_alloc = pool_page_alloc,
2568: .pa_free = pool_page_free,
2569: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 5),
2570: },
2571: {
2572: .pa_alloc = pool_page_alloc,
2573: .pa_free = pool_page_free,
2574: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 6),
2575: },
2576: {
2577: .pa_alloc = pool_page_alloc,
2578: .pa_free = pool_page_free,
2579: .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 7),
2580: }
2581: };
2582:
2583: static int
2584: pool_bigidx(size_t size)
2585: {
2586: int i;
2587:
2588: for (i = 0; i < __arraycount(pool_allocator_big); i++) {
2589: if (1 << (i + POOL_ALLOCATOR_BIG_BASE) >= size)
2590: return i;
2591: }
2592: panic("pool item size %zu too large, use a custom allocator", size);
2593: }
2594:
1.117 yamt 2595: static void *
2596: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2597: {
1.117 yamt 2598: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2599: void *res;
2600:
1.117 yamt 2601: res = (*pa->pa_alloc)(pp, flags);
2602: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2603: /*
1.117 yamt 2604: * We only run the drain hook here if PR_NOWAIT.
2605: * In other cases, the hook will be run in
2606: * pool_reclaim().
1.66 thorpej 2607: */
1.117 yamt 2608: if (pp->pr_drain_hook != NULL) {
2609: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2610: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2611: }
1.117 yamt 2612: }
2613: return res;
1.66 thorpej 2614: }
2615:
1.117 yamt 2616: static void
1.66 thorpej 2617: pool_allocator_free(struct pool *pp, void *v)
2618: {
2619: struct pool_allocator *pa = pp->pr_alloc;
2620:
2621: (*pa->pa_free)(pp, v);
2622: }
2623:
2624: void *
1.124 yamt 2625: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2626: {
1.192 rmind 2627: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
1.191 para 2628: vmem_addr_t va;
1.192 rmind 2629: int ret;
1.191 para 2630:
1.192 rmind 2631: ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2632: vflags | VM_INSTANTFIT, &va);
1.66 thorpej 2633:
1.192 rmind 2634: return ret ? NULL : (void *)va;
1.66 thorpej 2635: }
2636:
2637: void
1.124 yamt 2638: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2639: {
2640:
1.191 para 2641: uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
1.98 yamt 2642: }
2643:
2644: static void *
1.124 yamt 2645: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2646: {
1.192 rmind 2647: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2648: vmem_addr_t va;
2649: int ret;
1.191 para 2650:
1.192 rmind 2651: ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2652: vflags | VM_INSTANTFIT, &va);
1.98 yamt 2653:
1.192 rmind 2654: return ret ? NULL : (void *)va;
1.98 yamt 2655: }
2656:
2657: static void
1.124 yamt 2658: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2659: {
2660:
1.192 rmind 2661: vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
1.66 thorpej 2662: }
2663:
1.204 maxv 2664: #ifdef POOL_REDZONE
2665: #if defined(_LP64)
2666: # define PRIME 0x9e37fffffffc0000UL
2667: #else /* defined(_LP64) */
2668: # define PRIME 0x9e3779b1
2669: #endif /* defined(_LP64) */
2670: #define STATIC_BYTE 0xFE
2671: CTASSERT(POOL_REDZONE_SIZE > 1);
2672:
2673: static inline uint8_t
2674: pool_pattern_generate(const void *p)
2675: {
2676: return (uint8_t)(((uintptr_t)p) * PRIME
2677: >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
2678: }
2679:
2680: static void
2681: pool_redzone_init(struct pool *pp, size_t requested_size)
2682: {
2683: size_t nsz;
2684:
2685: if (pp->pr_roflags & PR_NOTOUCH) {
2686: pp->pr_reqsize = 0;
2687: pp->pr_redzone = false;
2688: return;
2689: }
2690:
2691: /*
2692: * We may have extended the requested size earlier; check if
2693: * there's naturally space in the padding for a red zone.
2694: */
2695: if (pp->pr_size - requested_size >= POOL_REDZONE_SIZE) {
2696: pp->pr_reqsize = requested_size;
2697: pp->pr_redzone = true;
2698: return;
2699: }
2700:
2701: /*
2702: * No space in the natural padding; check if we can extend a
2703: * bit the size of the pool.
2704: */
2705: nsz = roundup(pp->pr_size + POOL_REDZONE_SIZE, pp->pr_align);
2706: if (nsz <= pp->pr_alloc->pa_pagesz) {
2707: /* Ok, we can */
2708: pp->pr_size = nsz;
2709: pp->pr_reqsize = requested_size;
2710: pp->pr_redzone = true;
2711: } else {
2712: /* No space for a red zone... snif :'( */
2713: pp->pr_reqsize = 0;
2714: pp->pr_redzone = false;
2715: printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
2716: }
2717: }
2718:
2719: static void
2720: pool_redzone_fill(struct pool *pp, void *p)
2721: {
2722: uint8_t *cp, pat;
2723: const uint8_t *ep;
2724:
2725: if (!pp->pr_redzone)
2726: return;
2727:
2728: cp = (uint8_t *)p + pp->pr_reqsize;
2729: ep = cp + POOL_REDZONE_SIZE;
2730:
2731: /*
2732: * We really don't want the first byte of the red zone to be '\0';
2733: * an off-by-one in a string may not be properly detected.
2734: */
2735: pat = pool_pattern_generate(cp);
2736: *cp = (pat == '\0') ? STATIC_BYTE: pat;
2737: cp++;
2738:
2739: while (cp < ep) {
2740: *cp = pool_pattern_generate(cp);
2741: cp++;
2742: }
2743: }
2744:
2745: static void
2746: pool_redzone_check(struct pool *pp, void *p)
2747: {
2748: uint8_t *cp, pat, expected;
2749: const uint8_t *ep;
2750:
2751: if (!pp->pr_redzone)
2752: return;
2753:
2754: cp = (uint8_t *)p + pp->pr_reqsize;
2755: ep = cp + POOL_REDZONE_SIZE;
2756:
2757: pat = pool_pattern_generate(cp);
2758: expected = (pat == '\0') ? STATIC_BYTE: pat;
2759: if (expected != *cp) {
2760: panic("%s: %p: 0x%02x != 0x%02x\n",
2761: __func__, cp, *cp, expected);
2762: }
2763: cp++;
2764:
2765: while (cp < ep) {
2766: expected = pool_pattern_generate(cp);
2767: if (*cp != expected) {
2768: panic("%s: %p: 0x%02x != 0x%02x\n",
2769: __func__, cp, *cp, expected);
2770: }
2771: cp++;
2772: }
2773: }
2774:
2775: #endif /* POOL_REDZONE */
2776:
2777:
1.66 thorpej 2778: #ifdef POOL_SUBPAGE
2779: /* Sub-page allocator, for machines with large hardware pages. */
2780: void *
2781: pool_subpage_alloc(struct pool *pp, int flags)
2782: {
1.134 ad 2783: return pool_get(&psppool, flags);
1.66 thorpej 2784: }
2785:
2786: void
2787: pool_subpage_free(struct pool *pp, void *v)
2788: {
2789: pool_put(&psppool, v);
2790: }
2791:
1.112 bjh21 2792: #endif /* POOL_SUBPAGE */
1.141 yamt 2793:
2794: #if defined(DDB)
2795: static bool
2796: pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2797: {
2798:
2799: return (uintptr_t)ph->ph_page <= addr &&
2800: addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2801: }
2802:
1.143 yamt 2803: static bool
2804: pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2805: {
2806:
2807: return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2808: }
2809:
2810: static bool
2811: pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2812: {
2813: int i;
2814:
2815: if (pcg == NULL) {
2816: return false;
2817: }
1.144 yamt 2818: for (i = 0; i < pcg->pcg_avail; i++) {
1.143 yamt 2819: if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2820: return true;
2821: }
2822: }
2823: return false;
2824: }
2825:
2826: static bool
2827: pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2828: {
2829:
2830: if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2831: unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2832: pool_item_bitmap_t *bitmap =
2833: ph->ph_bitmap + (idx / BITMAP_SIZE);
2834: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2835:
2836: return (*bitmap & mask) == 0;
2837: } else {
2838: struct pool_item *pi;
2839:
2840: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2841: if (pool_in_item(pp, pi, addr)) {
2842: return false;
2843: }
2844: }
2845: return true;
2846: }
2847: }
2848:
1.141 yamt 2849: void
2850: pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2851: {
2852: struct pool *pp;
2853:
1.145 ad 2854: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.141 yamt 2855: struct pool_item_header *ph;
2856: uintptr_t item;
1.143 yamt 2857: bool allocated = true;
2858: bool incache = false;
2859: bool incpucache = false;
2860: char cpucachestr[32];
1.141 yamt 2861:
2862: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2863: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2864: if (pool_in_page(pp, ph, addr)) {
2865: goto found;
2866: }
2867: }
2868: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2869: if (pool_in_page(pp, ph, addr)) {
1.143 yamt 2870: allocated =
2871: pool_allocated(pp, ph, addr);
2872: goto found;
2873: }
2874: }
2875: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2876: if (pool_in_page(pp, ph, addr)) {
2877: allocated = false;
1.141 yamt 2878: goto found;
2879: }
2880: }
2881: continue;
2882: } else {
2883: ph = pr_find_pagehead_noalign(pp, (void *)addr);
2884: if (ph == NULL || !pool_in_page(pp, ph, addr)) {
2885: continue;
2886: }
1.143 yamt 2887: allocated = pool_allocated(pp, ph, addr);
1.141 yamt 2888: }
2889: found:
1.143 yamt 2890: if (allocated && pp->pr_cache) {
2891: pool_cache_t pc = pp->pr_cache;
2892: struct pool_cache_group *pcg;
2893: int i;
2894:
2895: for (pcg = pc->pc_fullgroups; pcg != NULL;
2896: pcg = pcg->pcg_next) {
2897: if (pool_in_cg(pp, pcg, addr)) {
2898: incache = true;
2899: goto print;
2900: }
2901: }
1.183 ad 2902: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.143 yamt 2903: pool_cache_cpu_t *cc;
2904:
2905: if ((cc = pc->pc_cpus[i]) == NULL) {
2906: continue;
2907: }
2908: if (pool_in_cg(pp, cc->cc_current, addr) ||
2909: pool_in_cg(pp, cc->cc_previous, addr)) {
2910: struct cpu_info *ci =
1.170 ad 2911: cpu_lookup(i);
1.143 yamt 2912:
2913: incpucache = true;
2914: snprintf(cpucachestr,
2915: sizeof(cpucachestr),
2916: "cached by CPU %u",
1.153 martin 2917: ci->ci_index);
1.143 yamt 2918: goto print;
2919: }
2920: }
2921: }
2922: print:
1.141 yamt 2923: item = (uintptr_t)ph->ph_page + ph->ph_off;
2924: item = item + rounddown(addr - item, pp->pr_size);
1.143 yamt 2925: (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
1.141 yamt 2926: (void *)addr, item, (size_t)(addr - item),
1.143 yamt 2927: pp->pr_wchan,
2928: incpucache ? cpucachestr :
2929: incache ? "cached" : allocated ? "allocated" : "free");
1.141 yamt 2930: }
2931: }
2932: #endif /* defined(DDB) */
1.203 joerg 2933:
2934: static int
2935: pool_sysctl(SYSCTLFN_ARGS)
2936: {
2937: struct pool_sysctl data;
2938: struct pool *pp;
2939: struct pool_cache *pc;
2940: pool_cache_cpu_t *cc;
2941: int error;
2942: size_t i, written;
2943:
2944: if (oldp == NULL) {
2945: *oldlenp = 0;
2946: TAILQ_FOREACH(pp, &pool_head, pr_poollist)
2947: *oldlenp += sizeof(data);
2948: return 0;
2949: }
2950:
2951: memset(&data, 0, sizeof(data));
2952: error = 0;
2953: written = 0;
2954: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
2955: if (written + sizeof(data) > *oldlenp)
2956: break;
2957: strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
2958: data.pr_pagesize = pp->pr_alloc->pa_pagesz;
2959: data.pr_flags = pp->pr_roflags | pp->pr_flags;
2960: #define COPY(field) data.field = pp->field
2961: COPY(pr_size);
2962:
2963: COPY(pr_itemsperpage);
2964: COPY(pr_nitems);
2965: COPY(pr_nout);
2966: COPY(pr_hardlimit);
2967: COPY(pr_npages);
2968: COPY(pr_minpages);
2969: COPY(pr_maxpages);
2970:
2971: COPY(pr_nget);
2972: COPY(pr_nfail);
2973: COPY(pr_nput);
2974: COPY(pr_npagealloc);
2975: COPY(pr_npagefree);
2976: COPY(pr_hiwat);
2977: COPY(pr_nidle);
2978: #undef COPY
2979:
2980: data.pr_cache_nmiss_pcpu = 0;
2981: data.pr_cache_nhit_pcpu = 0;
2982: if (pp->pr_cache) {
2983: pc = pp->pr_cache;
2984: data.pr_cache_meta_size = pc->pc_pcgsize;
2985: data.pr_cache_nfull = pc->pc_nfull;
2986: data.pr_cache_npartial = pc->pc_npart;
2987: data.pr_cache_nempty = pc->pc_nempty;
2988: data.pr_cache_ncontended = pc->pc_contended;
2989: data.pr_cache_nmiss_global = pc->pc_misses;
2990: data.pr_cache_nhit_global = pc->pc_hits;
2991: for (i = 0; i < pc->pc_ncpu; ++i) {
2992: cc = pc->pc_cpus[i];
2993: if (cc == NULL)
2994: continue;
1.206 knakahar 2995: data.pr_cache_nmiss_pcpu += cc->cc_misses;
2996: data.pr_cache_nhit_pcpu += cc->cc_hits;
1.203 joerg 2997: }
2998: } else {
2999: data.pr_cache_meta_size = 0;
3000: data.pr_cache_nfull = 0;
3001: data.pr_cache_npartial = 0;
3002: data.pr_cache_nempty = 0;
3003: data.pr_cache_ncontended = 0;
3004: data.pr_cache_nmiss_global = 0;
3005: data.pr_cache_nhit_global = 0;
3006: }
3007:
3008: error = sysctl_copyout(l, &data, oldp, sizeof(data));
3009: if (error)
3010: break;
3011: written += sizeof(data);
3012: oldp = (char *)oldp + sizeof(data);
3013: }
3014:
3015: *oldlenp = written;
3016: return error;
3017: }
3018:
3019: SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
3020: {
3021: const struct sysctlnode *rnode = NULL;
3022:
3023: sysctl_createv(clog, 0, NULL, &rnode,
3024: CTLFLAG_PERMANENT,
3025: CTLTYPE_STRUCT, "pool",
3026: SYSCTL_DESCR("Get pool statistics"),
3027: pool_sysctl, 0, NULL, 0,
3028: CTL_KERN, CTL_CREATE, CTL_EOL);
3029: }
CVSweb <webmaster@jp.NetBSD.org>