Annotation of src/sys/kern/subr_pool.c, Revision 1.206.2.1
1.206.2.1! pgoyette 1: /* $NetBSD: subr_pool.c,v 1.207 2017/03/14 03:13:50 riastradh Exp $ */
1.1 pk 2:
3: /*-
1.204 maxv 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015
1.183 ad 5: * The NetBSD Foundation, Inc.
1.1 pk 6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 9: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.204 maxv 10: * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
11: * Maxime Villard.
1.1 pk 12: *
13: * Redistribution and use in source and binary forms, with or without
14: * modification, are permitted provided that the following conditions
15: * are met:
16: * 1. Redistributions of source code must retain the above copyright
17: * notice, this list of conditions and the following disclaimer.
18: * 2. Redistributions in binary form must reproduce the above copyright
19: * notice, this list of conditions and the following disclaimer in the
20: * documentation and/or other materials provided with the distribution.
21: *
22: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32: * POSSIBILITY OF SUCH DAMAGE.
33: */
1.64 lukem 34:
35: #include <sys/cdefs.h>
1.206.2.1! pgoyette 36: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.207 2017/03/14 03:13:50 riastradh Exp $");
1.24 scottr 37:
1.205 pooka 38: #ifdef _KERNEL_OPT
1.141 yamt 39: #include "opt_ddb.h"
1.28 thorpej 40: #include "opt_lockdebug.h"
1.205 pooka 41: #endif
1.1 pk 42:
43: #include <sys/param.h>
44: #include <sys/systm.h>
1.203 joerg 45: #include <sys/sysctl.h>
1.135 yamt 46: #include <sys/bitops.h>
1.1 pk 47: #include <sys/proc.h>
48: #include <sys/errno.h>
49: #include <sys/kernel.h>
1.191 para 50: #include <sys/vmem.h>
1.1 pk 51: #include <sys/pool.h>
1.20 thorpej 52: #include <sys/syslog.h>
1.125 ad 53: #include <sys/debug.h>
1.134 ad 54: #include <sys/lockdebug.h>
55: #include <sys/xcall.h>
56: #include <sys/cpu.h>
1.145 ad 57: #include <sys/atomic.h>
1.3 pk 58:
1.187 uebayasi 59: #include <uvm/uvm_extern.h>
1.3 pk 60:
1.1 pk 61: /*
62: * Pool resource management utility.
1.3 pk 63: *
1.88 chs 64: * Memory is allocated in pages which are split into pieces according to
65: * the pool item size. Each page is kept on one of three lists in the
66: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
67: * for empty, full and partially-full pages respectively. The individual
68: * pool items are on a linked list headed by `ph_itemlist' in each page
69: * header. The memory for building the page list is either taken from
70: * the allocated pages themselves (for small pool items) or taken from
71: * an internal pool of page headers (`phpool').
1.1 pk 72: */
73:
1.202 abs 74: /* List of all pools. Non static as needed by 'vmstat -i' */
75: TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.134 ad 76:
1.3 pk 77: /* Private pool for page header structures */
1.97 yamt 78: #define PHPOOL_MAX 8
79: static struct pool phpool[PHPOOL_MAX];
1.135 yamt 80: #define PHPOOL_FREELIST_NELEM(idx) \
81: (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
1.3 pk 82:
1.62 bjh21 83: #ifdef POOL_SUBPAGE
84: /* Pool of subpages for use by normal pools. */
85: static struct pool psppool;
86: #endif
87:
1.204 maxv 88: #ifdef POOL_REDZONE
89: # define POOL_REDZONE_SIZE 2
90: static void pool_redzone_init(struct pool *, size_t);
91: static void pool_redzone_fill(struct pool *, void *);
92: static void pool_redzone_check(struct pool *, void *);
93: #else
94: # define pool_redzone_init(pp, sz) /* NOTHING */
95: # define pool_redzone_fill(pp, ptr) /* NOTHING */
96: # define pool_redzone_check(pp, ptr) /* NOTHING */
97: #endif
98:
1.98 yamt 99: static void *pool_page_alloc_meta(struct pool *, int);
100: static void pool_page_free_meta(struct pool *, void *);
101:
102: /* allocator for pool metadata */
1.134 ad 103: struct pool_allocator pool_allocator_meta = {
1.191 para 104: .pa_alloc = pool_page_alloc_meta,
105: .pa_free = pool_page_free_meta,
106: .pa_pagesz = 0
1.98 yamt 107: };
108:
1.3 pk 109: /* # of seconds to retain page after last use */
110: int pool_inactive_time = 10;
111:
112: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 113: static struct pool *drainpp;
114:
1.134 ad 115: /* This lock protects both pool_head and drainpp. */
116: static kmutex_t pool_head_lock;
117: static kcondvar_t pool_busy;
1.3 pk 118:
1.178 elad 119: /* This lock protects initialization of a potentially shared pool allocator */
120: static kmutex_t pool_allocator_lock;
121:
1.135 yamt 122: typedef uint32_t pool_item_bitmap_t;
123: #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
124: #define BITMAP_MASK (BITMAP_SIZE - 1)
1.99 yamt 125:
1.3 pk 126: struct pool_item_header {
127: /* Page headers */
1.88 chs 128: LIST_ENTRY(pool_item_header)
1.3 pk 129: ph_pagelist; /* pool page list */
1.88 chs 130: SPLAY_ENTRY(pool_item_header)
131: ph_node; /* Off-page page headers */
1.128 christos 132: void * ph_page; /* this page's address */
1.151 yamt 133: uint32_t ph_time; /* last referenced */
1.135 yamt 134: uint16_t ph_nmissing; /* # of chunks in use */
1.141 yamt 135: uint16_t ph_off; /* start offset in page */
1.97 yamt 136: union {
137: /* !PR_NOTOUCH */
138: struct {
1.102 chs 139: LIST_HEAD(, pool_item)
1.97 yamt 140: phu_itemlist; /* chunk list for this page */
141: } phu_normal;
142: /* PR_NOTOUCH */
143: struct {
1.141 yamt 144: pool_item_bitmap_t phu_bitmap[1];
1.97 yamt 145: } phu_notouch;
146: } ph_u;
1.3 pk 147: };
1.97 yamt 148: #define ph_itemlist ph_u.phu_normal.phu_itemlist
1.135 yamt 149: #define ph_bitmap ph_u.phu_notouch.phu_bitmap
1.3 pk 150:
1.1 pk 151: struct pool_item {
1.3 pk 152: #ifdef DIAGNOSTIC
1.82 thorpej 153: u_int pi_magic;
1.33 chs 154: #endif
1.134 ad 155: #define PI_MAGIC 0xdeaddeadU
1.3 pk 156: /* Other entries use only this list entry */
1.102 chs 157: LIST_ENTRY(pool_item) pi_list;
1.3 pk 158: };
159:
1.53 thorpej 160: #define POOL_NEEDS_CATCHUP(pp) \
161: ((pp)->pr_nitems < (pp)->pr_minitems)
162:
1.43 thorpej 163: /*
164: * Pool cache management.
165: *
166: * Pool caches provide a way for constructed objects to be cached by the
167: * pool subsystem. This can lead to performance improvements by avoiding
168: * needless object construction/destruction; it is deferred until absolutely
169: * necessary.
170: *
1.134 ad 171: * Caches are grouped into cache groups. Each cache group references up
172: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
173: * object from the pool, it calls the object's constructor and places it
174: * into a cache group. When a cache group frees an object back to the
175: * pool, it first calls the object's destructor. This allows the object
176: * to persist in constructed form while freed to the cache.
177: *
178: * The pool references each cache, so that when a pool is drained by the
179: * pagedaemon, it can drain each individual cache as well. Each time a
180: * cache is drained, the most idle cache group is freed to the pool in
181: * its entirety.
1.43 thorpej 182: *
183: * Pool caches are layed on top of pools. By layering them, we can avoid
184: * the complexity of cache management for pools which would not benefit
185: * from it.
186: */
187:
1.142 ad 188: static struct pool pcg_normal_pool;
189: static struct pool pcg_large_pool;
1.134 ad 190: static struct pool cache_pool;
191: static struct pool cache_cpu_pool;
1.3 pk 192:
1.189 pooka 193: pool_cache_t pnbuf_cache; /* pathname buffer cache */
194:
1.145 ad 195: /* List of all caches. */
196: TAILQ_HEAD(,pool_cache) pool_cache_head =
197: TAILQ_HEAD_INITIALIZER(pool_cache_head);
198:
1.162 ad 199: int pool_cache_disable; /* global disable for caching */
1.169 yamt 200: static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */
1.145 ad 201:
1.162 ad 202: static bool pool_cache_put_slow(pool_cache_cpu_t *, int,
203: void *);
204: static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
205: void **, paddr_t *, int);
1.134 ad 206: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
207: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
1.175 jym 208: static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
1.196 jym 209: static void pool_cache_transfer(pool_cache_t);
1.3 pk 210:
1.42 thorpej 211: static int pool_catchup(struct pool *);
1.128 christos 212: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 213: struct pool_item_header *);
1.88 chs 214: static void pool_update_curpage(struct pool *);
1.66 thorpej 215:
1.113 yamt 216: static int pool_grow(struct pool *, int);
1.117 yamt 217: static void *pool_allocator_alloc(struct pool *, int);
218: static void pool_allocator_free(struct pool *, void *);
1.3 pk 219:
1.97 yamt 220: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.199 christos 221: void (*)(const char *, ...) __printflike(1, 2));
1.42 thorpej 222: static void pool_print1(struct pool *, const char *,
1.199 christos 223: void (*)(const char *, ...) __printflike(1, 2));
1.3 pk 224:
1.88 chs 225: static int pool_chk_page(struct pool *, const char *,
226: struct pool_item_header *);
227:
1.135 yamt 228: static inline unsigned int
1.97 yamt 229: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
230: const void *v)
231: {
232: const char *cp = v;
1.135 yamt 233: unsigned int idx;
1.97 yamt 234:
235: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 236: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 237: KASSERT(idx < pp->pr_itemsperpage);
238: return idx;
239: }
240:
1.110 perry 241: static inline void
1.97 yamt 242: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
243: void *obj)
244: {
1.135 yamt 245: unsigned int idx = pr_item_notouch_index(pp, ph, obj);
246: pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
247: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
1.97 yamt 248:
1.135 yamt 249: KASSERT((*bitmap & mask) == 0);
250: *bitmap |= mask;
1.97 yamt 251: }
252:
1.110 perry 253: static inline void *
1.97 yamt 254: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
255: {
1.135 yamt 256: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
257: unsigned int idx;
258: int i;
1.97 yamt 259:
1.135 yamt 260: for (i = 0; ; i++) {
261: int bit;
1.97 yamt 262:
1.135 yamt 263: KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
264: bit = ffs32(bitmap[i]);
265: if (bit) {
266: pool_item_bitmap_t mask;
267:
268: bit--;
269: idx = (i * BITMAP_SIZE) + bit;
270: mask = 1 << bit;
271: KASSERT((bitmap[i] & mask) != 0);
272: bitmap[i] &= ~mask;
273: break;
274: }
275: }
276: KASSERT(idx < pp->pr_itemsperpage);
1.128 christos 277: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 278: }
279:
1.135 yamt 280: static inline void
1.141 yamt 281: pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
1.135 yamt 282: {
283: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
284: const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
285: int i;
286:
287: for (i = 0; i < n; i++) {
288: bitmap[i] = (pool_item_bitmap_t)-1;
289: }
290: }
291:
1.110 perry 292: static inline int
1.88 chs 293: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
294: {
1.121 yamt 295:
296: /*
297: * we consider pool_item_header with smaller ph_page bigger.
298: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
299: */
300:
1.88 chs 301: if (a->ph_page < b->ph_page)
1.121 yamt 302: return (1);
303: else if (a->ph_page > b->ph_page)
1.88 chs 304: return (-1);
305: else
306: return (0);
307: }
308:
309: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
310: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
311:
1.141 yamt 312: static inline struct pool_item_header *
313: pr_find_pagehead_noalign(struct pool *pp, void *v)
314: {
315: struct pool_item_header *ph, tmp;
316:
317: tmp.ph_page = (void *)(uintptr_t)v;
318: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
319: if (ph == NULL) {
320: ph = SPLAY_ROOT(&pp->pr_phtree);
321: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
322: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
323: }
324: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
325: }
326:
327: return ph;
328: }
329:
1.3 pk 330: /*
1.121 yamt 331: * Return the pool page header based on item address.
1.3 pk 332: */
1.110 perry 333: static inline struct pool_item_header *
1.121 yamt 334: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 335: {
1.88 chs 336: struct pool_item_header *ph, tmp;
1.3 pk 337:
1.121 yamt 338: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.141 yamt 339: ph = pr_find_pagehead_noalign(pp, v);
1.121 yamt 340: } else {
1.128 christos 341: void *page =
342: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 343:
344: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 345: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 346: } else {
347: tmp.ph_page = page;
348: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
349: }
350: }
1.3 pk 351:
1.121 yamt 352: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 353: ((char *)ph->ph_page <= (char *)v &&
354: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 355: return ph;
1.3 pk 356: }
357:
1.101 thorpej 358: static void
359: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
360: {
361: struct pool_item_header *ph;
362:
363: while ((ph = LIST_FIRST(pq)) != NULL) {
364: LIST_REMOVE(ph, ph_pagelist);
365: pool_allocator_free(pp, ph->ph_page);
1.134 ad 366: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 367: pool_put(pp->pr_phpool, ph);
368: }
369: }
370:
1.3 pk 371: /*
372: * Remove a page from the pool.
373: */
1.110 perry 374: static inline void
1.61 chs 375: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
376: struct pool_pagelist *pq)
1.3 pk 377: {
378:
1.134 ad 379: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 380:
1.3 pk 381: /*
1.7 thorpej 382: * If the page was idle, decrement the idle page count.
1.3 pk 383: */
1.6 thorpej 384: if (ph->ph_nmissing == 0) {
1.206.2.1! pgoyette 385: KASSERT(pp->pr_nidle != 0);
! 386: KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage),
! 387: "nitems=%u < itemsperpage=%u",
! 388: pp->pr_nitems, pp->pr_itemsperpage);
1.6 thorpej 389: pp->pr_nidle--;
390: }
1.7 thorpej 391:
1.20 thorpej 392: pp->pr_nitems -= pp->pr_itemsperpage;
393:
1.7 thorpej 394: /*
1.101 thorpej 395: * Unlink the page from the pool and queue it for release.
1.7 thorpej 396: */
1.88 chs 397: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 398: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
399: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 400: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
401:
1.7 thorpej 402: pp->pr_npages--;
403: pp->pr_npagefree++;
1.6 thorpej 404:
1.88 chs 405: pool_update_curpage(pp);
1.3 pk 406: }
407:
408: /*
1.94 simonb 409: * Initialize all the pools listed in the "pools" link set.
410: */
411: void
1.117 yamt 412: pool_subsystem_init(void)
1.94 simonb 413: {
1.192 rmind 414: size_t size;
1.191 para 415: int idx;
1.94 simonb 416:
1.134 ad 417: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
1.179 mlelstv 418: mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
1.134 ad 419: cv_init(&pool_busy, "poolbusy");
420:
1.191 para 421: /*
422: * Initialize private page header pool and cache magazine pool if we
423: * haven't done so yet.
424: */
425: for (idx = 0; idx < PHPOOL_MAX; idx++) {
426: static char phpool_names[PHPOOL_MAX][6+1+6+1];
427: int nelem;
428: size_t sz;
429:
430: nelem = PHPOOL_FREELIST_NELEM(idx);
431: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
432: "phpool-%d", nelem);
433: sz = sizeof(struct pool_item_header);
434: if (nelem) {
435: sz = offsetof(struct pool_item_header,
436: ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
437: }
438: pool_init(&phpool[idx], sz, 0, 0, 0,
439: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.117 yamt 440: }
1.191 para 441: #ifdef POOL_SUBPAGE
442: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
443: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
444: #endif
445:
446: size = sizeof(pcg_t) +
447: (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
448: pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
449: "pcgnormal", &pool_allocator_meta, IPL_VM);
450:
451: size = sizeof(pcg_t) +
452: (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
453: pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
454: "pcglarge", &pool_allocator_meta, IPL_VM);
1.134 ad 455:
1.156 ad 456: pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
1.191 para 457: 0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
1.134 ad 458:
1.156 ad 459: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
1.191 para 460: 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
1.94 simonb 461: }
462:
463: /*
1.3 pk 464: * Initialize the given pool resource structure.
465: *
466: * We export this routine to allow other kernel parts to declare
1.195 rmind 467: * static pools that must be initialized before kmem(9) is available.
1.3 pk 468: */
469: void
1.42 thorpej 470: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.129 ad 471: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 472: {
1.116 simonb 473: struct pool *pp1;
1.204 maxv 474: size_t trysize, phsize, prsize;
1.134 ad 475: int off, slack;
1.3 pk 476:
1.116 simonb 477: #ifdef DEBUG
1.198 christos 478: if (__predict_true(!cold))
479: mutex_enter(&pool_head_lock);
1.116 simonb 480: /*
481: * Check that the pool hasn't already been initialised and
482: * added to the list of all pools.
483: */
1.145 ad 484: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
1.116 simonb 485: if (pp == pp1)
486: panic("pool_init: pool %s already initialised",
487: wchan);
488: }
1.198 christos 489: if (__predict_true(!cold))
490: mutex_exit(&pool_head_lock);
1.116 simonb 491: #endif
492:
1.66 thorpej 493: if (palloc == NULL)
494: palloc = &pool_allocator_kmem;
1.112 bjh21 495: #ifdef POOL_SUBPAGE
496: if (size > palloc->pa_pagesz) {
497: if (palloc == &pool_allocator_kmem)
498: palloc = &pool_allocator_kmem_fullpage;
499: else if (palloc == &pool_allocator_nointr)
500: palloc = &pool_allocator_nointr_fullpage;
501: }
1.66 thorpej 502: #endif /* POOL_SUBPAGE */
1.180 mlelstv 503: if (!cold)
504: mutex_enter(&pool_allocator_lock);
1.178 elad 505: if (palloc->pa_refcnt++ == 0) {
1.112 bjh21 506: if (palloc->pa_pagesz == 0)
1.66 thorpej 507: palloc->pa_pagesz = PAGE_SIZE;
508:
509: TAILQ_INIT(&palloc->pa_list);
510:
1.134 ad 511: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 512: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
513: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.4 thorpej 514: }
1.180 mlelstv 515: if (!cold)
516: mutex_exit(&pool_allocator_lock);
1.3 pk 517:
518: if (align == 0)
519: align = ALIGN(1);
1.14 thorpej 520:
1.204 maxv 521: prsize = size;
522: if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
523: prsize = sizeof(struct pool_item);
1.3 pk 524:
1.204 maxv 525: prsize = roundup(prsize, align);
1.206.2.1! pgoyette 526: KASSERTMSG((prsize <= palloc->pa_pagesz),
! 527: "pool_init: pool item size (%zu) larger than page size (%u)",
! 528: prsize, palloc->pa_pagesz);
1.35 pk 529:
1.3 pk 530: /*
531: * Initialize the pool structure.
532: */
1.88 chs 533: LIST_INIT(&pp->pr_emptypages);
534: LIST_INIT(&pp->pr_fullpages);
535: LIST_INIT(&pp->pr_partpages);
1.134 ad 536: pp->pr_cache = NULL;
1.3 pk 537: pp->pr_curpage = NULL;
538: pp->pr_npages = 0;
539: pp->pr_minitems = 0;
540: pp->pr_minpages = 0;
541: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 542: pp->pr_roflags = flags;
543: pp->pr_flags = 0;
1.204 maxv 544: pp->pr_size = prsize;
1.3 pk 545: pp->pr_align = align;
546: pp->pr_wchan = wchan;
1.66 thorpej 547: pp->pr_alloc = palloc;
1.20 thorpej 548: pp->pr_nitems = 0;
549: pp->pr_nout = 0;
550: pp->pr_hardlimit = UINT_MAX;
551: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 552: pp->pr_hardlimit_ratecap.tv_sec = 0;
553: pp->pr_hardlimit_ratecap.tv_usec = 0;
554: pp->pr_hardlimit_warning_last.tv_sec = 0;
555: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 556: pp->pr_drain_hook = NULL;
557: pp->pr_drain_hook_arg = NULL;
1.125 ad 558: pp->pr_freecheck = NULL;
1.204 maxv 559: pool_redzone_init(pp, size);
1.3 pk 560:
561: /*
562: * Decide whether to put the page header off page to avoid
1.92 enami 563: * wasting too large a part of the page or too big item.
564: * Off-page page headers go on a hash table, so we can match
565: * a returned item with its header based on the page address.
566: * We use 1/16 of the page size and about 8 times of the item
567: * size as the threshold (XXX: tune)
568: *
569: * However, we'll put the header into the page if we can put
570: * it without wasting any items.
571: *
572: * Silently enforce `0 <= ioff < align'.
1.3 pk 573: */
1.92 enami 574: pp->pr_itemoffset = ioff %= align;
575: /* See the comment below about reserved bytes. */
576: trysize = palloc->pa_pagesz - ((align - ioff) % align);
577: phsize = ALIGN(sizeof(struct pool_item_header));
1.201 para 578: if (pp->pr_roflags & PR_PHINPAGE ||
579: ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 580: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
1.201 para 581: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) {
1.3 pk 582: /* Use the end of the page for the page header */
1.20 thorpej 583: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 584: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 585: } else {
1.3 pk 586: /* The page header will be taken from our page header pool */
587: pp->pr_phoffset = 0;
1.66 thorpej 588: off = palloc->pa_pagesz;
1.88 chs 589: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 590: }
1.1 pk 591:
1.3 pk 592: /*
593: * Alignment is to take place at `ioff' within the item. This means
594: * we must reserve up to `align - 1' bytes on the page to allow
595: * appropriate positioning of each item.
596: */
597: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 598: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 599: if ((pp->pr_roflags & PR_NOTOUCH)) {
600: int idx;
601:
602: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
603: idx++) {
604: /* nothing */
605: }
606: if (idx >= PHPOOL_MAX) {
607: /*
608: * if you see this panic, consider to tweak
609: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
610: */
611: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
612: pp->pr_wchan, pp->pr_itemsperpage);
613: }
614: pp->pr_phpool = &phpool[idx];
615: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
616: pp->pr_phpool = &phpool[0];
617: }
618: #if defined(DIAGNOSTIC)
619: else {
620: pp->pr_phpool = NULL;
621: }
622: #endif
1.3 pk 623:
624: /*
625: * Use the slack between the chunks and the page header
626: * for "cache coloring".
627: */
628: slack = off - pp->pr_itemsperpage * pp->pr_size;
629: pp->pr_maxcolor = (slack / align) * align;
630: pp->pr_curcolor = 0;
631:
632: pp->pr_nget = 0;
633: pp->pr_nfail = 0;
634: pp->pr_nput = 0;
635: pp->pr_npagealloc = 0;
636: pp->pr_npagefree = 0;
1.1 pk 637: pp->pr_hiwat = 0;
1.8 thorpej 638: pp->pr_nidle = 0;
1.134 ad 639: pp->pr_refcnt = 0;
1.3 pk 640:
1.157 ad 641: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
1.134 ad 642: cv_init(&pp->pr_cv, wchan);
643: pp->pr_ipl = ipl;
1.1 pk 644:
1.145 ad 645: /* Insert into the list of all pools. */
1.181 mlelstv 646: if (!cold)
1.134 ad 647: mutex_enter(&pool_head_lock);
1.145 ad 648: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
649: if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
650: break;
651: }
652: if (pp1 == NULL)
653: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
654: else
655: TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
1.181 mlelstv 656: if (!cold)
1.134 ad 657: mutex_exit(&pool_head_lock);
658:
1.167 skrll 659: /* Insert this into the list of pools using this allocator. */
1.181 mlelstv 660: if (!cold)
1.134 ad 661: mutex_enter(&palloc->pa_lock);
1.145 ad 662: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
1.181 mlelstv 663: if (!cold)
1.134 ad 664: mutex_exit(&palloc->pa_lock);
1.1 pk 665: }
666:
667: /*
668: * De-commision a pool resource.
669: */
670: void
1.42 thorpej 671: pool_destroy(struct pool *pp)
1.1 pk 672: {
1.101 thorpej 673: struct pool_pagelist pq;
1.3 pk 674: struct pool_item_header *ph;
1.43 thorpej 675:
1.101 thorpej 676: /* Remove from global pool list */
1.134 ad 677: mutex_enter(&pool_head_lock);
678: while (pp->pr_refcnt != 0)
679: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 680: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.101 thorpej 681: if (drainpp == pp)
682: drainpp = NULL;
1.134 ad 683: mutex_exit(&pool_head_lock);
1.101 thorpej 684:
685: /* Remove this pool from its allocator's list of pools. */
1.134 ad 686: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 687: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.134 ad 688: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 689:
1.178 elad 690: mutex_enter(&pool_allocator_lock);
691: if (--pp->pr_alloc->pa_refcnt == 0)
692: mutex_destroy(&pp->pr_alloc->pa_lock);
693: mutex_exit(&pool_allocator_lock);
694:
1.134 ad 695: mutex_enter(&pp->pr_lock);
1.101 thorpej 696:
1.134 ad 697: KASSERT(pp->pr_cache == NULL);
1.206.2.1! pgoyette 698: KASSERTMSG((pp->pr_nout == 0),
! 699: "pool_destroy: pool busy: still out: %u", pp->pr_nout);
1.101 thorpej 700: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
701: KASSERT(LIST_EMPTY(&pp->pr_partpages));
702:
1.3 pk 703: /* Remove all pages */
1.101 thorpej 704: LIST_INIT(&pq);
1.88 chs 705: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 706: pr_rmpage(pp, ph, &pq);
707:
1.134 ad 708: mutex_exit(&pp->pr_lock);
1.3 pk 709:
1.101 thorpej 710: pr_pagelist_free(pp, &pq);
1.134 ad 711: cv_destroy(&pp->pr_cv);
712: mutex_destroy(&pp->pr_lock);
1.1 pk 713: }
714:
1.68 thorpej 715: void
716: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
717: {
718:
719: /* XXX no locking -- must be used just after pool_init() */
1.206.2.1! pgoyette 720: KASSERTMSG((pp->pr_drain_hook == NULL),
! 721: "pool_set_drain_hook(%s): already set", pp->pr_wchan);
1.68 thorpej 722: pp->pr_drain_hook = fn;
723: pp->pr_drain_hook_arg = arg;
724: }
725:
1.88 chs 726: static struct pool_item_header *
1.128 christos 727: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 728: {
729: struct pool_item_header *ph;
730:
731: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 732: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.134 ad 733: else
1.97 yamt 734: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 735:
736: return (ph);
737: }
1.1 pk 738:
739: /*
1.134 ad 740: * Grab an item from the pool.
1.1 pk 741: */
1.3 pk 742: void *
1.56 sommerfe 743: pool_get(struct pool *pp, int flags)
1.1 pk 744: {
745: struct pool_item *pi;
1.3 pk 746: struct pool_item_header *ph;
1.55 thorpej 747: void *v;
1.1 pk 748:
1.206.2.1! pgoyette 749: KASSERTMSG((pp->pr_itemsperpage != 0),
! 750: "pool_get: pool '%s': pr_itemsperpage is zero, "
! 751: "pool not initialized?", pp->pr_wchan);
! 752: KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p())
! 753: || pp->pr_ipl != IPL_NONE || cold || panicstr != NULL),
! 754: "pool '%s' is IPL_NONE, but called from interrupt context",
! 755: pp->pr_wchan);
1.155 ad 756: if (flags & PR_WAITOK) {
1.154 yamt 757: ASSERT_SLEEPABLE();
1.155 ad 758: }
1.1 pk 759:
1.134 ad 760: mutex_enter(&pp->pr_lock);
1.20 thorpej 761: startover:
762: /*
763: * Check to see if we've reached the hard limit. If we have,
764: * and we can wait, then wait until an item has been returned to
765: * the pool.
766: */
1.206.2.1! pgoyette 767: KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit),
! 768: "pool_get: %s: crossed hard limit", pp->pr_wchan);
1.34 thorpej 769: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 770: if (pp->pr_drain_hook != NULL) {
771: /*
772: * Since the drain hook is going to free things
773: * back to the pool, unlock, call the hook, re-lock,
774: * and check the hardlimit condition again.
775: */
1.134 ad 776: mutex_exit(&pp->pr_lock);
1.68 thorpej 777: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.134 ad 778: mutex_enter(&pp->pr_lock);
1.68 thorpej 779: if (pp->pr_nout < pp->pr_hardlimit)
780: goto startover;
781: }
782:
1.29 sommerfe 783: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 784: /*
785: * XXX: A warning isn't logged in this case. Should
786: * it be?
787: */
788: pp->pr_flags |= PR_WANTED;
1.134 ad 789: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.20 thorpej 790: goto startover;
791: }
1.31 thorpej 792:
793: /*
794: * Log a message that the hard limit has been hit.
795: */
796: if (pp->pr_hardlimit_warning != NULL &&
797: ratecheck(&pp->pr_hardlimit_warning_last,
798: &pp->pr_hardlimit_ratecap))
799: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 800:
801: pp->pr_nfail++;
802:
1.134 ad 803: mutex_exit(&pp->pr_lock);
1.20 thorpej 804: return (NULL);
805: }
806:
1.3 pk 807: /*
808: * The convention we use is that if `curpage' is not NULL, then
809: * it points at a non-empty bucket. In particular, `curpage'
810: * never points at a page header which has PR_PHINPAGE set and
811: * has no items in its bucket.
812: */
1.20 thorpej 813: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 814: int error;
815:
1.206.2.1! pgoyette 816: KASSERTMSG((pp->pr_nitems == 0),
! 817: "pool_get: nitems inconsistent"
! 818: ": %s: curpage NULL, nitems %u",
! 819: pp->pr_wchan, pp->pr_nitems);
1.20 thorpej 820:
1.21 thorpej 821: /*
822: * Call the back-end page allocator for more memory.
823: * Release the pool lock, as the back-end page allocator
824: * may block.
825: */
1.113 yamt 826: error = pool_grow(pp, flags);
827: if (error != 0) {
1.21 thorpej 828: /*
1.55 thorpej 829: * We were unable to allocate a page or item
830: * header, but we released the lock during
831: * allocation, so perhaps items were freed
832: * back to the pool. Check for this case.
1.21 thorpej 833: */
834: if (pp->pr_curpage != NULL)
835: goto startover;
1.15 pk 836:
1.117 yamt 837: pp->pr_nfail++;
1.134 ad 838: mutex_exit(&pp->pr_lock);
1.117 yamt 839: return (NULL);
1.1 pk 840: }
1.3 pk 841:
1.20 thorpej 842: /* Start the allocation process over. */
843: goto startover;
1.3 pk 844: }
1.97 yamt 845: if (pp->pr_roflags & PR_NOTOUCH) {
1.206.2.1! pgoyette 846: KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage),
! 847: "pool_get: %s: page empty", pp->pr_wchan);
1.97 yamt 848: v = pr_item_notouch_get(pp, ph);
849: } else {
1.102 chs 850: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 851: if (__predict_false(v == NULL)) {
1.134 ad 852: mutex_exit(&pp->pr_lock);
1.97 yamt 853: panic("pool_get: %s: page empty", pp->pr_wchan);
854: }
1.206.2.1! pgoyette 855: KASSERTMSG((pp->pr_nitems > 0),
! 856: "pool_get: nitems inconsistent"
! 857: ": %s: items on itemlist, nitems %u",
! 858: pp->pr_wchan, pp->pr_nitems);
! 859: KASSERTMSG((pi->pi_magic == PI_MAGIC),
! 860: "pool_get(%s): free list modified: "
! 861: "magic=%x; page %p; item addr %p",
! 862: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1.3 pk 863:
1.97 yamt 864: /*
865: * Remove from item list.
866: */
1.102 chs 867: LIST_REMOVE(pi, pi_list);
1.97 yamt 868: }
1.20 thorpej 869: pp->pr_nitems--;
870: pp->pr_nout++;
1.6 thorpej 871: if (ph->ph_nmissing == 0) {
1.206.2.1! pgoyette 872: KASSERT(pp->pr_nidle > 0);
1.6 thorpej 873: pp->pr_nidle--;
1.88 chs 874:
875: /*
876: * This page was previously empty. Move it to the list of
877: * partially-full pages. This page is already curpage.
878: */
879: LIST_REMOVE(ph, ph_pagelist);
880: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 881: }
1.3 pk 882: ph->ph_nmissing++;
1.97 yamt 883: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.206.2.1! pgoyette 884: KASSERTMSG(((pp->pr_roflags & PR_NOTOUCH) ||
! 885: LIST_EMPTY(&ph->ph_itemlist)),
! 886: "pool_get: %s: nmissing inconsistent", pp->pr_wchan);
1.3 pk 887: /*
1.88 chs 888: * This page is now full. Move it to the full list
889: * and select a new current page.
1.3 pk 890: */
1.88 chs 891: LIST_REMOVE(ph, ph_pagelist);
892: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
893: pool_update_curpage(pp);
1.1 pk 894: }
1.3 pk 895:
896: pp->pr_nget++;
1.20 thorpej 897:
898: /*
899: * If we have a low water mark and we are now below that low
900: * water mark, add more items to the pool.
901: */
1.53 thorpej 902: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 903: /*
904: * XXX: Should we log a warning? Should we set up a timeout
905: * to try again in a second or so? The latter could break
906: * a caller's assumptions about interrupt protection, etc.
907: */
908: }
909:
1.134 ad 910: mutex_exit(&pp->pr_lock);
1.125 ad 911: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
912: FREECHECK_OUT(&pp->pr_freecheck, v);
1.204 maxv 913: pool_redzone_fill(pp, v);
1.1 pk 914: return (v);
915: }
916:
917: /*
1.43 thorpej 918: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 919: */
1.43 thorpej 920: static void
1.101 thorpej 921: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 922: {
923: struct pool_item *pi = v;
1.3 pk 924: struct pool_item_header *ph;
925:
1.134 ad 926: KASSERT(mutex_owned(&pp->pr_lock));
1.204 maxv 927: pool_redzone_check(pp, v);
1.125 ad 928: FREECHECK_IN(&pp->pr_freecheck, v);
1.134 ad 929: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 930:
1.206.2.1! pgoyette 931: KASSERTMSG((pp->pr_nout > 0),
! 932: "pool_put: pool %s: putting with none out", pp->pr_wchan);
1.3 pk 933:
1.121 yamt 934: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.3 pk 935: panic("pool_put: %s: page header missing", pp->pr_wchan);
936: }
1.28 thorpej 937:
1.3 pk 938: /*
939: * Return to item list.
940: */
1.97 yamt 941: if (pp->pr_roflags & PR_NOTOUCH) {
942: pr_item_notouch_put(pp, ph, v);
943: } else {
1.2 pk 944: #ifdef DIAGNOSTIC
1.97 yamt 945: pi->pi_magic = PI_MAGIC;
1.3 pk 946: #endif
1.32 chs 947: #ifdef DEBUG
1.97 yamt 948: {
949: int i, *ip = v;
1.32 chs 950:
1.97 yamt 951: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
952: *ip++ = PI_MAGIC;
953: }
1.32 chs 954: }
955: #endif
956:
1.102 chs 957: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 958: }
1.79 thorpej 959: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 960: ph->ph_nmissing--;
961: pp->pr_nput++;
1.20 thorpej 962: pp->pr_nitems++;
963: pp->pr_nout--;
1.3 pk 964:
965: /* Cancel "pool empty" condition if it exists */
966: if (pp->pr_curpage == NULL)
967: pp->pr_curpage = ph;
968:
969: if (pp->pr_flags & PR_WANTED) {
970: pp->pr_flags &= ~PR_WANTED;
1.134 ad 971: cv_broadcast(&pp->pr_cv);
1.3 pk 972: }
973:
974: /*
1.88 chs 975: * If this page is now empty, do one of two things:
1.21 thorpej 976: *
1.88 chs 977: * (1) If we have more pages than the page high water mark,
1.96 thorpej 978: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 979: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
980: * CLAIM.
1.21 thorpej 981: *
1.88 chs 982: * (2) Otherwise, move the page to the empty page list.
983: *
984: * Either way, select a new current page (so we use a partially-full
985: * page if one is available).
1.3 pk 986: */
987: if (ph->ph_nmissing == 0) {
1.6 thorpej 988: pp->pr_nidle++;
1.90 thorpej 989: if (pp->pr_npages > pp->pr_minpages &&
1.152 yamt 990: pp->pr_npages > pp->pr_maxpages) {
1.101 thorpej 991: pr_rmpage(pp, ph, pq);
1.3 pk 992: } else {
1.88 chs 993: LIST_REMOVE(ph, ph_pagelist);
994: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 995:
1.21 thorpej 996: /*
997: * Update the timestamp on the page. A page must
998: * be idle for some period of time before it can
999: * be reclaimed by the pagedaemon. This minimizes
1000: * ping-pong'ing for memory.
1.151 yamt 1001: *
1002: * note for 64-bit time_t: truncating to 32-bit is not
1003: * a problem for our usage.
1.21 thorpej 1004: */
1.151 yamt 1005: ph->ph_time = time_uptime;
1.1 pk 1006: }
1.88 chs 1007: pool_update_curpage(pp);
1.1 pk 1008: }
1.88 chs 1009:
1.21 thorpej 1010: /*
1.88 chs 1011: * If the page was previously completely full, move it to the
1012: * partially-full list and make it the current page. The next
1013: * allocation will get the item from this page, instead of
1014: * further fragmenting the pool.
1.21 thorpej 1015: */
1016: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1017: LIST_REMOVE(ph, ph_pagelist);
1018: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1019: pp->pr_curpage = ph;
1020: }
1.43 thorpej 1021: }
1022:
1.56 sommerfe 1023: void
1024: pool_put(struct pool *pp, void *v)
1025: {
1.101 thorpej 1026: struct pool_pagelist pq;
1027:
1028: LIST_INIT(&pq);
1.56 sommerfe 1029:
1.134 ad 1030: mutex_enter(&pp->pr_lock);
1.101 thorpej 1031: pool_do_put(pp, v, &pq);
1.134 ad 1032: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1033:
1.102 chs 1034: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1035: }
1.57 sommerfe 1036:
1.74 thorpej 1037: /*
1.113 yamt 1038: * pool_grow: grow a pool by a page.
1039: *
1040: * => called with pool locked.
1041: * => unlock and relock the pool.
1042: * => return with pool locked.
1043: */
1044:
1045: static int
1046: pool_grow(struct pool *pp, int flags)
1047: {
1048: struct pool_item_header *ph = NULL;
1049: char *cp;
1050:
1.134 ad 1051: mutex_exit(&pp->pr_lock);
1.113 yamt 1052: cp = pool_allocator_alloc(pp, flags);
1053: if (__predict_true(cp != NULL)) {
1054: ph = pool_alloc_item_header(pp, cp, flags);
1055: }
1056: if (__predict_false(cp == NULL || ph == NULL)) {
1057: if (cp != NULL) {
1058: pool_allocator_free(pp, cp);
1059: }
1.134 ad 1060: mutex_enter(&pp->pr_lock);
1.113 yamt 1061: return ENOMEM;
1062: }
1063:
1.134 ad 1064: mutex_enter(&pp->pr_lock);
1.113 yamt 1065: pool_prime_page(pp, cp, ph);
1066: pp->pr_npagealloc++;
1067: return 0;
1068: }
1069:
1070: /*
1.74 thorpej 1071: * Add N items to the pool.
1072: */
1073: int
1074: pool_prime(struct pool *pp, int n)
1075: {
1.75 simonb 1076: int newpages;
1.113 yamt 1077: int error = 0;
1.74 thorpej 1078:
1.134 ad 1079: mutex_enter(&pp->pr_lock);
1.74 thorpej 1080:
1081: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1082:
1083: while (newpages-- > 0) {
1.113 yamt 1084: error = pool_grow(pp, PR_NOWAIT);
1085: if (error) {
1.74 thorpej 1086: break;
1087: }
1088: pp->pr_minpages++;
1089: }
1090:
1091: if (pp->pr_minpages >= pp->pr_maxpages)
1092: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1093:
1.134 ad 1094: mutex_exit(&pp->pr_lock);
1.113 yamt 1095: return error;
1.74 thorpej 1096: }
1.55 thorpej 1097:
1098: /*
1.3 pk 1099: * Add a page worth of items to the pool.
1.21 thorpej 1100: *
1101: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1102: */
1.55 thorpej 1103: static void
1.128 christos 1104: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1105: {
1106: struct pool_item *pi;
1.128 christos 1107: void *cp = storage;
1.125 ad 1108: const unsigned int align = pp->pr_align;
1109: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1110: int n;
1.36 pk 1111:
1.134 ad 1112: KASSERT(mutex_owned(&pp->pr_lock));
1.206.2.1! pgoyette 1113: KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) ||
! 1114: (((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)),
! 1115: "pool_prime_page: %s: unaligned page: %p", pp->pr_wchan, cp);
1.3 pk 1116:
1117: /*
1118: * Insert page header.
1119: */
1.88 chs 1120: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1121: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1122: ph->ph_page = storage;
1123: ph->ph_nmissing = 0;
1.151 yamt 1124: ph->ph_time = time_uptime;
1.88 chs 1125: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1126: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1127:
1.6 thorpej 1128: pp->pr_nidle++;
1129:
1.3 pk 1130: /*
1131: * Color this page.
1132: */
1.141 yamt 1133: ph->ph_off = pp->pr_curcolor;
1134: cp = (char *)cp + ph->ph_off;
1.3 pk 1135: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1136: pp->pr_curcolor = 0;
1137:
1138: /*
1139: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1140: */
1141: if (ioff != 0)
1.128 christos 1142: cp = (char *)cp + align - ioff;
1.3 pk 1143:
1.125 ad 1144: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1145:
1.3 pk 1146: /*
1147: * Insert remaining chunks on the bucket list.
1148: */
1149: n = pp->pr_itemsperpage;
1.20 thorpej 1150: pp->pr_nitems += n;
1.3 pk 1151:
1.97 yamt 1152: if (pp->pr_roflags & PR_NOTOUCH) {
1.141 yamt 1153: pr_item_notouch_init(pp, ph);
1.97 yamt 1154: } else {
1155: while (n--) {
1156: pi = (struct pool_item *)cp;
1.78 thorpej 1157:
1.97 yamt 1158: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1159:
1.97 yamt 1160: /* Insert on page list */
1.102 chs 1161: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1162: #ifdef DIAGNOSTIC
1.97 yamt 1163: pi->pi_magic = PI_MAGIC;
1.3 pk 1164: #endif
1.128 christos 1165: cp = (char *)cp + pp->pr_size;
1.125 ad 1166:
1167: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1168: }
1.3 pk 1169: }
1170:
1171: /*
1172: * If the pool was depleted, point at the new page.
1173: */
1174: if (pp->pr_curpage == NULL)
1175: pp->pr_curpage = ph;
1176:
1177: if (++pp->pr_npages > pp->pr_hiwat)
1178: pp->pr_hiwat = pp->pr_npages;
1179: }
1180:
1.20 thorpej 1181: /*
1.52 thorpej 1182: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1183: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1184: *
1.21 thorpej 1185: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1186: *
1.73 thorpej 1187: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1188: * with it locked.
1189: */
1190: static int
1.42 thorpej 1191: pool_catchup(struct pool *pp)
1.20 thorpej 1192: {
1193: int error = 0;
1194:
1.54 thorpej 1195: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1196: error = pool_grow(pp, PR_NOWAIT);
1197: if (error) {
1.20 thorpej 1198: break;
1199: }
1200: }
1.113 yamt 1201: return error;
1.20 thorpej 1202: }
1203:
1.88 chs 1204: static void
1205: pool_update_curpage(struct pool *pp)
1206: {
1207:
1208: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1209: if (pp->pr_curpage == NULL) {
1210: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1211: }
1.168 yamt 1212: KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1213: (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1.88 chs 1214: }
1215:
1.3 pk 1216: void
1.42 thorpej 1217: pool_setlowat(struct pool *pp, int n)
1.3 pk 1218: {
1.15 pk 1219:
1.134 ad 1220: mutex_enter(&pp->pr_lock);
1.21 thorpej 1221:
1.3 pk 1222: pp->pr_minitems = n;
1.15 pk 1223: pp->pr_minpages = (n == 0)
1224: ? 0
1.18 thorpej 1225: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1226:
1227: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1228: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1229: /*
1230: * XXX: Should we log a warning? Should we set up a timeout
1231: * to try again in a second or so? The latter could break
1232: * a caller's assumptions about interrupt protection, etc.
1233: */
1234: }
1.21 thorpej 1235:
1.134 ad 1236: mutex_exit(&pp->pr_lock);
1.3 pk 1237: }
1238:
1239: void
1.42 thorpej 1240: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1241: {
1.15 pk 1242:
1.134 ad 1243: mutex_enter(&pp->pr_lock);
1.21 thorpej 1244:
1.15 pk 1245: pp->pr_maxpages = (n == 0)
1246: ? 0
1.18 thorpej 1247: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1248:
1.134 ad 1249: mutex_exit(&pp->pr_lock);
1.3 pk 1250: }
1251:
1.20 thorpej 1252: void
1.42 thorpej 1253: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1254: {
1255:
1.134 ad 1256: mutex_enter(&pp->pr_lock);
1.20 thorpej 1257:
1258: pp->pr_hardlimit = n;
1259: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1260: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1261: pp->pr_hardlimit_warning_last.tv_sec = 0;
1262: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1263:
1264: /*
1.21 thorpej 1265: * In-line version of pool_sethiwat(), because we don't want to
1266: * release the lock.
1.20 thorpej 1267: */
1268: pp->pr_maxpages = (n == 0)
1269: ? 0
1270: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1271:
1.134 ad 1272: mutex_exit(&pp->pr_lock);
1.20 thorpej 1273: }
1.3 pk 1274:
1275: /*
1276: * Release all complete pages that have not been used recently.
1.184 rmind 1277: *
1.197 jym 1278: * Must not be called from interrupt context.
1.3 pk 1279: */
1.66 thorpej 1280: int
1.56 sommerfe 1281: pool_reclaim(struct pool *pp)
1.3 pk 1282: {
1283: struct pool_item_header *ph, *phnext;
1.61 chs 1284: struct pool_pagelist pq;
1.151 yamt 1285: uint32_t curtime;
1.134 ad 1286: bool klock;
1287: int rv;
1.3 pk 1288:
1.197 jym 1289: KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1.184 rmind 1290:
1.68 thorpej 1291: if (pp->pr_drain_hook != NULL) {
1292: /*
1293: * The drain hook must be called with the pool unlocked.
1294: */
1295: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1296: }
1297:
1.134 ad 1298: /*
1.157 ad 1299: * XXXSMP Because we do not want to cause non-MPSAFE code
1300: * to block.
1.134 ad 1301: */
1302: if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1303: pp->pr_ipl == IPL_SOFTSERIAL) {
1304: KERNEL_LOCK(1, NULL);
1305: klock = true;
1306: } else
1307: klock = false;
1308:
1309: /* Reclaim items from the pool's cache (if any). */
1310: if (pp->pr_cache != NULL)
1311: pool_cache_invalidate(pp->pr_cache);
1312:
1313: if (mutex_tryenter(&pp->pr_lock) == 0) {
1314: if (klock) {
1315: KERNEL_UNLOCK_ONE(NULL);
1316: }
1.66 thorpej 1317: return (0);
1.134 ad 1318: }
1.68 thorpej 1319:
1.88 chs 1320: LIST_INIT(&pq);
1.43 thorpej 1321:
1.151 yamt 1322: curtime = time_uptime;
1.21 thorpej 1323:
1.88 chs 1324: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1325: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1326:
1327: /* Check our minimum page claim */
1328: if (pp->pr_npages <= pp->pr_minpages)
1329: break;
1330:
1.88 chs 1331: KASSERT(ph->ph_nmissing == 0);
1.191 para 1332: if (curtime - ph->ph_time < pool_inactive_time)
1.88 chs 1333: continue;
1.21 thorpej 1334:
1.88 chs 1335: /*
1336: * If freeing this page would put us below
1337: * the low water mark, stop now.
1338: */
1339: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1340: pp->pr_minitems)
1341: break;
1.21 thorpej 1342:
1.88 chs 1343: pr_rmpage(pp, ph, &pq);
1.3 pk 1344: }
1345:
1.134 ad 1346: mutex_exit(&pp->pr_lock);
1347:
1348: if (LIST_EMPTY(&pq))
1349: rv = 0;
1350: else {
1351: pr_pagelist_free(pp, &pq);
1352: rv = 1;
1353: }
1354:
1355: if (klock) {
1356: KERNEL_UNLOCK_ONE(NULL);
1357: }
1.66 thorpej 1358:
1.134 ad 1359: return (rv);
1.3 pk 1360: }
1361:
1362: /*
1.197 jym 1363: * Drain pools, one at a time. The drained pool is returned within ppp.
1.131 ad 1364: *
1.134 ad 1365: * Note, must never be called from interrupt context.
1.3 pk 1366: */
1.197 jym 1367: bool
1368: pool_drain(struct pool **ppp)
1.3 pk 1369: {
1.197 jym 1370: bool reclaimed;
1.3 pk 1371: struct pool *pp;
1.134 ad 1372:
1.145 ad 1373: KASSERT(!TAILQ_EMPTY(&pool_head));
1.3 pk 1374:
1.61 chs 1375: pp = NULL;
1.134 ad 1376:
1377: /* Find next pool to drain, and add a reference. */
1378: mutex_enter(&pool_head_lock);
1379: do {
1380: if (drainpp == NULL) {
1.145 ad 1381: drainpp = TAILQ_FIRST(&pool_head);
1.134 ad 1382: }
1383: if (drainpp != NULL) {
1384: pp = drainpp;
1.145 ad 1385: drainpp = TAILQ_NEXT(pp, pr_poollist);
1.134 ad 1386: }
1387: /*
1388: * Skip completely idle pools. We depend on at least
1389: * one pool in the system being active.
1390: */
1391: } while (pp == NULL || pp->pr_npages == 0);
1392: pp->pr_refcnt++;
1393: mutex_exit(&pool_head_lock);
1394:
1395: /* Drain the cache (if any) and pool.. */
1.186 pooka 1396: reclaimed = pool_reclaim(pp);
1.134 ad 1397:
1398: /* Finally, unlock the pool. */
1399: mutex_enter(&pool_head_lock);
1400: pp->pr_refcnt--;
1401: cv_broadcast(&pool_busy);
1402: mutex_exit(&pool_head_lock);
1.186 pooka 1403:
1.197 jym 1404: if (ppp != NULL)
1405: *ppp = pp;
1406:
1.186 pooka 1407: return reclaimed;
1.3 pk 1408: }
1409:
1410: /*
1411: * Diagnostic helpers.
1412: */
1.21 thorpej 1413:
1.25 thorpej 1414: void
1.108 yamt 1415: pool_printall(const char *modif, void (*pr)(const char *, ...))
1416: {
1417: struct pool *pp;
1418:
1.145 ad 1419: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.108 yamt 1420: pool_printit(pp, modif, pr);
1421: }
1422: }
1423:
1424: void
1.42 thorpej 1425: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1426: {
1427:
1428: if (pp == NULL) {
1429: (*pr)("Must specify a pool to print.\n");
1430: return;
1431: }
1432:
1433: pool_print1(pp, modif, pr);
1434: }
1435:
1.21 thorpej 1436: static void
1.124 yamt 1437: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1438: void (*pr)(const char *, ...))
1.88 chs 1439: {
1440: struct pool_item_header *ph;
1.206.2.1! pgoyette 1441: struct pool_item *pi __diagused;
1.88 chs 1442:
1443: LIST_FOREACH(ph, pl, ph_pagelist) {
1.151 yamt 1444: (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1445: ph->ph_page, ph->ph_nmissing, ph->ph_time);
1.88 chs 1446: #ifdef DIAGNOSTIC
1.97 yamt 1447: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1448: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1449: if (pi->pi_magic != PI_MAGIC) {
1450: (*pr)("\t\t\titem %p, magic 0x%x\n",
1451: pi, pi->pi_magic);
1452: }
1.88 chs 1453: }
1454: }
1455: #endif
1456: }
1457: }
1458:
1459: static void
1.42 thorpej 1460: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1461: {
1.25 thorpej 1462: struct pool_item_header *ph;
1.134 ad 1463: pool_cache_t pc;
1464: pcg_t *pcg;
1465: pool_cache_cpu_t *cc;
1466: uint64_t cpuhit, cpumiss;
1.44 thorpej 1467: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1468: char c;
1469:
1470: while ((c = *modif++) != '\0') {
1471: if (c == 'l')
1472: print_log = 1;
1473: if (c == 'p')
1474: print_pagelist = 1;
1.44 thorpej 1475: if (c == 'c')
1476: print_cache = 1;
1.25 thorpej 1477: }
1478:
1.134 ad 1479: if ((pc = pp->pr_cache) != NULL) {
1480: (*pr)("POOL CACHE");
1481: } else {
1482: (*pr)("POOL");
1483: }
1484:
1485: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1486: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1487: pp->pr_roflags);
1.66 thorpej 1488: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1489: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1490: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1491: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1492: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1493:
1.134 ad 1494: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1495: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1496: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1497: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1498:
1499: if (print_pagelist == 0)
1500: goto skip_pagelist;
1501:
1.88 chs 1502: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1503: (*pr)("\n\tempty page list:\n");
1.97 yamt 1504: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1505: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1506: (*pr)("\n\tfull page list:\n");
1.97 yamt 1507: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1508: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1509: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1510: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1511:
1.25 thorpej 1512: if (pp->pr_curpage == NULL)
1513: (*pr)("\tno current page\n");
1514: else
1515: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1516:
1517: skip_pagelist:
1518: if (print_log == 0)
1519: goto skip_log;
1520:
1521: (*pr)("\n");
1.3 pk 1522:
1.25 thorpej 1523: skip_log:
1.44 thorpej 1524:
1.102 chs 1525: #define PR_GROUPLIST(pcg) \
1526: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1.142 ad 1527: for (i = 0; i < pcg->pcg_size; i++) { \
1.102 chs 1528: if (pcg->pcg_objects[i].pcgo_pa != \
1529: POOL_PADDR_INVALID) { \
1530: (*pr)("\t\t\t%p, 0x%llx\n", \
1531: pcg->pcg_objects[i].pcgo_va, \
1532: (unsigned long long) \
1533: pcg->pcg_objects[i].pcgo_pa); \
1534: } else { \
1535: (*pr)("\t\t\t%p\n", \
1536: pcg->pcg_objects[i].pcgo_va); \
1537: } \
1538: }
1539:
1.134 ad 1540: if (pc != NULL) {
1541: cpuhit = 0;
1542: cpumiss = 0;
1.183 ad 1543: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.134 ad 1544: if ((cc = pc->pc_cpus[i]) == NULL)
1545: continue;
1546: cpuhit += cc->cc_hits;
1547: cpumiss += cc->cc_misses;
1548: }
1549: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1550: (*pr)("\tcache layer hits %llu misses %llu\n",
1551: pc->pc_hits, pc->pc_misses);
1552: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1553: pc->pc_hits + pc->pc_misses - pc->pc_contended,
1554: pc->pc_contended);
1555: (*pr)("\tcache layer empty groups %u full groups %u\n",
1556: pc->pc_nempty, pc->pc_nfull);
1557: if (print_cache) {
1558: (*pr)("\tfull cache groups:\n");
1559: for (pcg = pc->pc_fullgroups; pcg != NULL;
1560: pcg = pcg->pcg_next) {
1561: PR_GROUPLIST(pcg);
1562: }
1563: (*pr)("\tempty cache groups:\n");
1564: for (pcg = pc->pc_emptygroups; pcg != NULL;
1565: pcg = pcg->pcg_next) {
1566: PR_GROUPLIST(pcg);
1567: }
1.103 chs 1568: }
1.44 thorpej 1569: }
1.102 chs 1570: #undef PR_GROUPLIST
1.88 chs 1571: }
1572:
1573: static int
1574: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1575: {
1576: struct pool_item *pi;
1.128 christos 1577: void *page;
1.88 chs 1578: int n;
1579:
1.121 yamt 1580: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1581: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1582: if (page != ph->ph_page &&
1583: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1584: if (label != NULL)
1585: printf("%s: ", label);
1586: printf("pool(%p:%s): page inconsistency: page %p;"
1587: " at page head addr %p (p %p)\n", pp,
1588: pp->pr_wchan, ph->ph_page,
1589: ph, page);
1590: return 1;
1591: }
1.88 chs 1592: }
1.3 pk 1593:
1.97 yamt 1594: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1595: return 0;
1596:
1.102 chs 1597: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1598: pi != NULL;
1.102 chs 1599: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1600:
1601: #ifdef DIAGNOSTIC
1602: if (pi->pi_magic != PI_MAGIC) {
1603: if (label != NULL)
1604: printf("%s: ", label);
1605: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1606: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1607: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1608: n, pi);
1.88 chs 1609: panic("pool");
1610: }
1611: #endif
1.121 yamt 1612: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1613: continue;
1614: }
1.128 christos 1615: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1616: if (page == ph->ph_page)
1617: continue;
1618:
1619: if (label != NULL)
1620: printf("%s: ", label);
1621: printf("pool(%p:%s): page inconsistency: page %p;"
1622: " item ordinal %d; addr %p (p %p)\n", pp,
1623: pp->pr_wchan, ph->ph_page,
1624: n, pi, page);
1625: return 1;
1626: }
1627: return 0;
1.3 pk 1628: }
1629:
1.88 chs 1630:
1.3 pk 1631: int
1.42 thorpej 1632: pool_chk(struct pool *pp, const char *label)
1.3 pk 1633: {
1634: struct pool_item_header *ph;
1635: int r = 0;
1636:
1.134 ad 1637: mutex_enter(&pp->pr_lock);
1.88 chs 1638: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1639: r = pool_chk_page(pp, label, ph);
1640: if (r) {
1641: goto out;
1642: }
1643: }
1644: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1645: r = pool_chk_page(pp, label, ph);
1646: if (r) {
1.3 pk 1647: goto out;
1648: }
1.88 chs 1649: }
1650: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1651: r = pool_chk_page(pp, label, ph);
1652: if (r) {
1.3 pk 1653: goto out;
1654: }
1655: }
1.88 chs 1656:
1.3 pk 1657: out:
1.134 ad 1658: mutex_exit(&pp->pr_lock);
1.3 pk 1659: return (r);
1.43 thorpej 1660: }
1661:
1662: /*
1663: * pool_cache_init:
1664: *
1665: * Initialize a pool cache.
1.134 ad 1666: */
1667: pool_cache_t
1668: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
1669: const char *wchan, struct pool_allocator *palloc, int ipl,
1670: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
1671: {
1672: pool_cache_t pc;
1673:
1674: pc = pool_get(&cache_pool, PR_WAITOK);
1675: if (pc == NULL)
1676: return NULL;
1677:
1678: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
1679: palloc, ipl, ctor, dtor, arg);
1680:
1681: return pc;
1682: }
1683:
1684: /*
1685: * pool_cache_bootstrap:
1.43 thorpej 1686: *
1.134 ad 1687: * Kernel-private version of pool_cache_init(). The caller
1688: * provides initial storage.
1.43 thorpej 1689: */
1690: void
1.134 ad 1691: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
1692: u_int align_offset, u_int flags, const char *wchan,
1693: struct pool_allocator *palloc, int ipl,
1694: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 1695: void *arg)
1696: {
1.134 ad 1697: CPU_INFO_ITERATOR cii;
1.145 ad 1698: pool_cache_t pc1;
1.134 ad 1699: struct cpu_info *ci;
1700: struct pool *pp;
1701:
1702: pp = &pc->pc_pool;
1703: if (palloc == NULL && ipl == IPL_NONE)
1704: palloc = &pool_allocator_nointr;
1705: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.157 ad 1706: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1.43 thorpej 1707:
1.134 ad 1708: if (ctor == NULL) {
1709: ctor = (int (*)(void *, void *, int))nullop;
1710: }
1711: if (dtor == NULL) {
1712: dtor = (void (*)(void *, void *))nullop;
1713: }
1.43 thorpej 1714:
1.134 ad 1715: pc->pc_emptygroups = NULL;
1716: pc->pc_fullgroups = NULL;
1717: pc->pc_partgroups = NULL;
1.43 thorpej 1718: pc->pc_ctor = ctor;
1719: pc->pc_dtor = dtor;
1720: pc->pc_arg = arg;
1.134 ad 1721: pc->pc_hits = 0;
1.48 thorpej 1722: pc->pc_misses = 0;
1.134 ad 1723: pc->pc_nempty = 0;
1724: pc->pc_npart = 0;
1725: pc->pc_nfull = 0;
1726: pc->pc_contended = 0;
1727: pc->pc_refcnt = 0;
1.136 yamt 1728: pc->pc_freecheck = NULL;
1.134 ad 1729:
1.142 ad 1730: if ((flags & PR_LARGECACHE) != 0) {
1731: pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
1.163 ad 1732: pc->pc_pcgpool = &pcg_large_pool;
1.142 ad 1733: } else {
1734: pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
1.163 ad 1735: pc->pc_pcgpool = &pcg_normal_pool;
1.142 ad 1736: }
1737:
1.134 ad 1738: /* Allocate per-CPU caches. */
1739: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
1740: pc->pc_ncpu = 0;
1.139 ad 1741: if (ncpu < 2) {
1.137 ad 1742: /* XXX For sparc: boot CPU is not attached yet. */
1743: pool_cache_cpu_init1(curcpu(), pc);
1744: } else {
1745: for (CPU_INFO_FOREACH(cii, ci)) {
1746: pool_cache_cpu_init1(ci, pc);
1747: }
1.134 ad 1748: }
1.145 ad 1749:
1750: /* Add to list of all pools. */
1751: if (__predict_true(!cold))
1.134 ad 1752: mutex_enter(&pool_head_lock);
1.145 ad 1753: TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
1754: if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
1755: break;
1756: }
1757: if (pc1 == NULL)
1758: TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
1759: else
1760: TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
1761: if (__predict_true(!cold))
1.134 ad 1762: mutex_exit(&pool_head_lock);
1.145 ad 1763:
1764: membar_sync();
1765: pp->pr_cache = pc;
1.43 thorpej 1766: }
1767:
1768: /*
1769: * pool_cache_destroy:
1770: *
1771: * Destroy a pool cache.
1772: */
1773: void
1.134 ad 1774: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 1775: {
1.191 para 1776:
1777: pool_cache_bootstrap_destroy(pc);
1778: pool_put(&cache_pool, pc);
1779: }
1780:
1781: /*
1782: * pool_cache_bootstrap_destroy:
1783: *
1784: * Destroy a pool cache.
1785: */
1786: void
1787: pool_cache_bootstrap_destroy(pool_cache_t pc)
1788: {
1.134 ad 1789: struct pool *pp = &pc->pc_pool;
1.175 jym 1790: u_int i;
1.134 ad 1791:
1792: /* Remove it from the global list. */
1793: mutex_enter(&pool_head_lock);
1794: while (pc->pc_refcnt != 0)
1795: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 1796: TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1.134 ad 1797: mutex_exit(&pool_head_lock);
1.43 thorpej 1798:
1799: /* First, invalidate the entire cache. */
1800: pool_cache_invalidate(pc);
1801:
1.134 ad 1802: /* Disassociate it from the pool. */
1803: mutex_enter(&pp->pr_lock);
1804: pp->pr_cache = NULL;
1805: mutex_exit(&pp->pr_lock);
1806:
1807: /* Destroy per-CPU data */
1.183 ad 1808: for (i = 0; i < __arraycount(pc->pc_cpus); i++)
1.175 jym 1809: pool_cache_invalidate_cpu(pc, i);
1.134 ad 1810:
1811: /* Finally, destroy it. */
1812: mutex_destroy(&pc->pc_lock);
1813: pool_destroy(pp);
1814: }
1815:
1816: /*
1817: * pool_cache_cpu_init1:
1818: *
1819: * Called for each pool_cache whenever a new CPU is attached.
1820: */
1821: static void
1822: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
1823: {
1824: pool_cache_cpu_t *cc;
1.137 ad 1825: int index;
1.134 ad 1826:
1.137 ad 1827: index = ci->ci_index;
1828:
1.183 ad 1829: KASSERT(index < __arraycount(pc->pc_cpus));
1.134 ad 1830:
1.137 ad 1831: if ((cc = pc->pc_cpus[index]) != NULL) {
1832: KASSERT(cc->cc_cpuindex == index);
1.134 ad 1833: return;
1834: }
1835:
1836: /*
1837: * The first CPU is 'free'. This needs to be the case for
1838: * bootstrap - we may not be able to allocate yet.
1839: */
1840: if (pc->pc_ncpu == 0) {
1841: cc = &pc->pc_cpu0;
1842: pc->pc_ncpu = 1;
1843: } else {
1844: mutex_enter(&pc->pc_lock);
1845: pc->pc_ncpu++;
1846: mutex_exit(&pc->pc_lock);
1847: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
1848: }
1849:
1850: cc->cc_ipl = pc->pc_pool.pr_ipl;
1851: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
1852: cc->cc_cache = pc;
1.137 ad 1853: cc->cc_cpuindex = index;
1.134 ad 1854: cc->cc_hits = 0;
1855: cc->cc_misses = 0;
1.169 yamt 1856: cc->cc_current = __UNCONST(&pcg_dummy);
1857: cc->cc_previous = __UNCONST(&pcg_dummy);
1.134 ad 1858:
1.137 ad 1859: pc->pc_cpus[index] = cc;
1.43 thorpej 1860: }
1861:
1.134 ad 1862: /*
1863: * pool_cache_cpu_init:
1864: *
1865: * Called whenever a new CPU is attached.
1866: */
1867: void
1868: pool_cache_cpu_init(struct cpu_info *ci)
1.43 thorpej 1869: {
1.134 ad 1870: pool_cache_t pc;
1871:
1872: mutex_enter(&pool_head_lock);
1.145 ad 1873: TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1.134 ad 1874: pc->pc_refcnt++;
1875: mutex_exit(&pool_head_lock);
1.43 thorpej 1876:
1.134 ad 1877: pool_cache_cpu_init1(ci, pc);
1.43 thorpej 1878:
1.134 ad 1879: mutex_enter(&pool_head_lock);
1880: pc->pc_refcnt--;
1881: cv_broadcast(&pool_busy);
1882: }
1883: mutex_exit(&pool_head_lock);
1.43 thorpej 1884: }
1885:
1.134 ad 1886: /*
1887: * pool_cache_reclaim:
1888: *
1889: * Reclaim memory from a pool cache.
1890: */
1891: bool
1892: pool_cache_reclaim(pool_cache_t pc)
1.43 thorpej 1893: {
1894:
1.134 ad 1895: return pool_reclaim(&pc->pc_pool);
1896: }
1.43 thorpej 1897:
1.136 yamt 1898: static void
1899: pool_cache_destruct_object1(pool_cache_t pc, void *object)
1900: {
1901:
1902: (*pc->pc_dtor)(pc->pc_arg, object);
1903: pool_put(&pc->pc_pool, object);
1904: }
1905:
1.134 ad 1906: /*
1907: * pool_cache_destruct_object:
1908: *
1909: * Force destruction of an object and its release back into
1910: * the pool.
1911: */
1912: void
1913: pool_cache_destruct_object(pool_cache_t pc, void *object)
1914: {
1915:
1.136 yamt 1916: FREECHECK_IN(&pc->pc_freecheck, object);
1917:
1918: pool_cache_destruct_object1(pc, object);
1.43 thorpej 1919: }
1920:
1.134 ad 1921: /*
1922: * pool_cache_invalidate_groups:
1923: *
1924: * Invalidate a chain of groups and destruct all objects.
1925: */
1.102 chs 1926: static void
1.134 ad 1927: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1.102 chs 1928: {
1.134 ad 1929: void *object;
1930: pcg_t *next;
1931: int i;
1932:
1933: for (; pcg != NULL; pcg = next) {
1934: next = pcg->pcg_next;
1935:
1936: for (i = 0; i < pcg->pcg_avail; i++) {
1937: object = pcg->pcg_objects[i].pcgo_va;
1.136 yamt 1938: pool_cache_destruct_object1(pc, object);
1.134 ad 1939: }
1.102 chs 1940:
1.142 ad 1941: if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
1942: pool_put(&pcg_large_pool, pcg);
1943: } else {
1944: KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
1945: pool_put(&pcg_normal_pool, pcg);
1946: }
1.102 chs 1947: }
1948: }
1949:
1.43 thorpej 1950: /*
1.134 ad 1951: * pool_cache_invalidate:
1.43 thorpej 1952: *
1.134 ad 1953: * Invalidate a pool cache (destruct and release all of the
1954: * cached objects). Does not reclaim objects from the pool.
1.176 thorpej 1955: *
1956: * Note: For pool caches that provide constructed objects, there
1957: * is an assumption that another level of synchronization is occurring
1958: * between the input to the constructor and the cache invalidation.
1.196 jym 1959: *
1960: * Invalidation is a costly process and should not be called from
1961: * interrupt context.
1.43 thorpej 1962: */
1.134 ad 1963: void
1964: pool_cache_invalidate(pool_cache_t pc)
1965: {
1.196 jym 1966: uint64_t where;
1.134 ad 1967: pcg_t *full, *empty, *part;
1.196 jym 1968:
1969: KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1.176 thorpej 1970:
1.177 jym 1971: if (ncpu < 2 || !mp_online) {
1.176 thorpej 1972: /*
1973: * We might be called early enough in the boot process
1974: * for the CPU data structures to not be fully initialized.
1.196 jym 1975: * In this case, transfer the content of the local CPU's
1976: * cache back into global cache as only this CPU is currently
1977: * running.
1.176 thorpej 1978: */
1.196 jym 1979: pool_cache_transfer(pc);
1.176 thorpej 1980: } else {
1981: /*
1.196 jym 1982: * Signal all CPUs that they must transfer their local
1983: * cache back to the global pool then wait for the xcall to
1984: * complete.
1.176 thorpej 1985: */
1.196 jym 1986: where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
1987: pc, NULL);
1.176 thorpej 1988: xc_wait(where);
1989: }
1.196 jym 1990:
1991: /* Empty pool caches, then invalidate objects */
1.134 ad 1992: mutex_enter(&pc->pc_lock);
1993: full = pc->pc_fullgroups;
1994: empty = pc->pc_emptygroups;
1995: part = pc->pc_partgroups;
1996: pc->pc_fullgroups = NULL;
1997: pc->pc_emptygroups = NULL;
1998: pc->pc_partgroups = NULL;
1999: pc->pc_nfull = 0;
2000: pc->pc_nempty = 0;
2001: pc->pc_npart = 0;
2002: mutex_exit(&pc->pc_lock);
2003:
2004: pool_cache_invalidate_groups(pc, full);
2005: pool_cache_invalidate_groups(pc, empty);
2006: pool_cache_invalidate_groups(pc, part);
2007: }
2008:
1.175 jym 2009: /*
2010: * pool_cache_invalidate_cpu:
2011: *
2012: * Invalidate all CPU-bound cached objects in pool cache, the CPU being
2013: * identified by its associated index.
2014: * It is caller's responsibility to ensure that no operation is
2015: * taking place on this pool cache while doing this invalidation.
2016: * WARNING: as no inter-CPU locking is enforced, trying to invalidate
2017: * pool cached objects from a CPU different from the one currently running
2018: * may result in an undefined behaviour.
2019: */
2020: static void
2021: pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2022: {
2023: pool_cache_cpu_t *cc;
2024: pcg_t *pcg;
2025:
2026: if ((cc = pc->pc_cpus[index]) == NULL)
2027: return;
2028:
2029: if ((pcg = cc->cc_current) != &pcg_dummy) {
2030: pcg->pcg_next = NULL;
2031: pool_cache_invalidate_groups(pc, pcg);
2032: }
2033: if ((pcg = cc->cc_previous) != &pcg_dummy) {
2034: pcg->pcg_next = NULL;
2035: pool_cache_invalidate_groups(pc, pcg);
2036: }
2037: if (cc != &pc->pc_cpu0)
2038: pool_put(&cache_cpu_pool, cc);
2039:
2040: }
2041:
1.134 ad 2042: void
2043: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2044: {
2045:
2046: pool_set_drain_hook(&pc->pc_pool, fn, arg);
2047: }
2048:
2049: void
2050: pool_cache_setlowat(pool_cache_t pc, int n)
2051: {
2052:
2053: pool_setlowat(&pc->pc_pool, n);
2054: }
2055:
2056: void
2057: pool_cache_sethiwat(pool_cache_t pc, int n)
2058: {
2059:
2060: pool_sethiwat(&pc->pc_pool, n);
2061: }
2062:
2063: void
2064: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2065: {
2066:
2067: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2068: }
2069:
1.162 ad 2070: static bool __noinline
2071: pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
1.134 ad 2072: paddr_t *pap, int flags)
1.43 thorpej 2073: {
1.134 ad 2074: pcg_t *pcg, *cur;
2075: uint64_t ncsw;
2076: pool_cache_t pc;
1.43 thorpej 2077: void *object;
1.58 thorpej 2078:
1.168 yamt 2079: KASSERT(cc->cc_current->pcg_avail == 0);
2080: KASSERT(cc->cc_previous->pcg_avail == 0);
2081:
1.134 ad 2082: pc = cc->cc_cache;
2083: cc->cc_misses++;
1.43 thorpej 2084:
1.134 ad 2085: /*
2086: * Nothing was available locally. Try and grab a group
2087: * from the cache.
2088: */
1.162 ad 2089: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.134 ad 2090: ncsw = curlwp->l_ncsw;
2091: mutex_enter(&pc->pc_lock);
2092: pc->pc_contended++;
1.43 thorpej 2093:
1.134 ad 2094: /*
2095: * If we context switched while locking, then
2096: * our view of the per-CPU data is invalid:
2097: * retry.
2098: */
2099: if (curlwp->l_ncsw != ncsw) {
2100: mutex_exit(&pc->pc_lock);
1.162 ad 2101: return true;
1.43 thorpej 2102: }
1.102 chs 2103: }
1.43 thorpej 2104:
1.162 ad 2105: if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
1.43 thorpej 2106: /*
1.134 ad 2107: * If there's a full group, release our empty
2108: * group back to the cache. Install the full
2109: * group as cc_current and return.
1.43 thorpej 2110: */
1.162 ad 2111: if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
1.134 ad 2112: KASSERT(cur->pcg_avail == 0);
2113: cur->pcg_next = pc->pc_emptygroups;
2114: pc->pc_emptygroups = cur;
2115: pc->pc_nempty++;
1.87 thorpej 2116: }
1.142 ad 2117: KASSERT(pcg->pcg_avail == pcg->pcg_size);
1.134 ad 2118: cc->cc_current = pcg;
2119: pc->pc_fullgroups = pcg->pcg_next;
2120: pc->pc_hits++;
2121: pc->pc_nfull--;
2122: mutex_exit(&pc->pc_lock);
1.162 ad 2123: return true;
1.134 ad 2124: }
2125:
2126: /*
2127: * Nothing available locally or in cache. Take the slow
2128: * path: fetch a new object from the pool and construct
2129: * it.
2130: */
2131: pc->pc_misses++;
2132: mutex_exit(&pc->pc_lock);
1.162 ad 2133: splx(s);
1.134 ad 2134:
2135: object = pool_get(&pc->pc_pool, flags);
2136: *objectp = object;
1.162 ad 2137: if (__predict_false(object == NULL))
2138: return false;
1.125 ad 2139:
1.162 ad 2140: if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
1.134 ad 2141: pool_put(&pc->pc_pool, object);
2142: *objectp = NULL;
1.162 ad 2143: return false;
1.43 thorpej 2144: }
2145:
1.134 ad 2146: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2147: (pc->pc_pool.pr_align - 1)) == 0);
1.43 thorpej 2148:
1.134 ad 2149: if (pap != NULL) {
2150: #ifdef POOL_VTOPHYS
2151: *pap = POOL_VTOPHYS(object);
2152: #else
2153: *pap = POOL_PADDR_INVALID;
2154: #endif
1.102 chs 2155: }
1.43 thorpej 2156:
1.125 ad 2157: FREECHECK_OUT(&pc->pc_freecheck, object);
1.204 maxv 2158: pool_redzone_fill(&pc->pc_pool, object);
1.162 ad 2159: return false;
1.43 thorpej 2160: }
2161:
2162: /*
1.134 ad 2163: * pool_cache_get{,_paddr}:
1.43 thorpej 2164: *
1.134 ad 2165: * Get an object from a pool cache (optionally returning
2166: * the physical address of the object).
1.43 thorpej 2167: */
1.134 ad 2168: void *
2169: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.43 thorpej 2170: {
1.134 ad 2171: pool_cache_cpu_t *cc;
2172: pcg_t *pcg;
2173: void *object;
1.60 thorpej 2174: int s;
1.43 thorpej 2175:
1.184 rmind 2176: KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
1.185 rmind 2177: (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
1.190 jym 2178: "pool '%s' is IPL_NONE, but called from interrupt context\n",
2179: pc->pc_pool.pr_wchan);
1.184 rmind 2180:
1.155 ad 2181: if (flags & PR_WAITOK) {
1.154 yamt 2182: ASSERT_SLEEPABLE();
1.155 ad 2183: }
1.125 ad 2184:
1.162 ad 2185: /* Lock out interrupts and disable preemption. */
2186: s = splvm();
1.165 yamt 2187: while (/* CONSTCOND */ true) {
1.134 ad 2188: /* Try and allocate an object from the current group. */
1.162 ad 2189: cc = pc->pc_cpus[curcpu()->ci_index];
2190: KASSERT(cc->cc_cache == pc);
1.134 ad 2191: pcg = cc->cc_current;
1.162 ad 2192: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2193: object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
1.162 ad 2194: if (__predict_false(pap != NULL))
1.134 ad 2195: *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
1.148 yamt 2196: #if defined(DIAGNOSTIC)
1.134 ad 2197: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
1.163 ad 2198: KASSERT(pcg->pcg_avail < pcg->pcg_size);
1.134 ad 2199: KASSERT(object != NULL);
1.163 ad 2200: #endif
1.134 ad 2201: cc->cc_hits++;
1.162 ad 2202: splx(s);
1.134 ad 2203: FREECHECK_OUT(&pc->pc_freecheck, object);
1.204 maxv 2204: pool_redzone_fill(&pc->pc_pool, object);
1.134 ad 2205: return object;
1.43 thorpej 2206: }
2207:
2208: /*
1.134 ad 2209: * That failed. If the previous group isn't empty, swap
2210: * it with the current group and allocate from there.
1.43 thorpej 2211: */
1.134 ad 2212: pcg = cc->cc_previous;
1.162 ad 2213: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2214: cc->cc_previous = cc->cc_current;
2215: cc->cc_current = pcg;
2216: continue;
1.43 thorpej 2217: }
2218:
1.134 ad 2219: /*
2220: * Can't allocate from either group: try the slow path.
2221: * If get_slow() allocated an object for us, or if
1.162 ad 2222: * no more objects are available, it will return false.
1.134 ad 2223: * Otherwise, we need to retry.
2224: */
1.165 yamt 2225: if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2226: break;
2227: }
1.43 thorpej 2228:
1.134 ad 2229: return object;
1.51 thorpej 2230: }
2231:
1.162 ad 2232: static bool __noinline
2233: pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
1.51 thorpej 2234: {
1.200 pooka 2235: struct lwp *l = curlwp;
1.163 ad 2236: pcg_t *pcg, *cur;
1.134 ad 2237: uint64_t ncsw;
2238: pool_cache_t pc;
1.51 thorpej 2239:
1.168 yamt 2240: KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2241: KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2242:
1.134 ad 2243: pc = cc->cc_cache;
1.171 ad 2244: pcg = NULL;
1.134 ad 2245: cc->cc_misses++;
1.200 pooka 2246: ncsw = l->l_ncsw;
1.43 thorpej 2247:
1.171 ad 2248: /*
2249: * If there are no empty groups in the cache then allocate one
2250: * while still unlocked.
2251: */
2252: if (__predict_false(pc->pc_emptygroups == NULL)) {
2253: if (__predict_true(!pool_cache_disable)) {
2254: pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2255: }
1.200 pooka 2256: /*
2257: * If pool_get() blocked, then our view of
2258: * the per-CPU data is invalid: retry.
2259: */
2260: if (__predict_false(l->l_ncsw != ncsw)) {
2261: if (pcg != NULL) {
2262: pool_put(pc->pc_pcgpool, pcg);
2263: }
2264: return true;
2265: }
1.171 ad 2266: if (__predict_true(pcg != NULL)) {
2267: pcg->pcg_avail = 0;
2268: pcg->pcg_size = pc->pc_pcgsize;
2269: }
2270: }
2271:
1.162 ad 2272: /* Lock the cache. */
2273: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.134 ad 2274: mutex_enter(&pc->pc_lock);
2275: pc->pc_contended++;
1.162 ad 2276:
1.163 ad 2277: /*
2278: * If we context switched while locking, then our view of
2279: * the per-CPU data is invalid: retry.
2280: */
1.200 pooka 2281: if (__predict_false(l->l_ncsw != ncsw)) {
1.163 ad 2282: mutex_exit(&pc->pc_lock);
1.171 ad 2283: if (pcg != NULL) {
2284: pool_put(pc->pc_pcgpool, pcg);
2285: }
1.163 ad 2286: return true;
2287: }
1.162 ad 2288: }
1.102 chs 2289:
1.163 ad 2290: /* If there are no empty groups in the cache then allocate one. */
1.171 ad 2291: if (pcg == NULL && pc->pc_emptygroups != NULL) {
2292: pcg = pc->pc_emptygroups;
1.163 ad 2293: pc->pc_emptygroups = pcg->pcg_next;
2294: pc->pc_nempty--;
1.134 ad 2295: }
1.130 ad 2296:
1.162 ad 2297: /*
2298: * If there's a empty group, release our full group back
2299: * to the cache. Install the empty group to the local CPU
2300: * and return.
2301: */
1.163 ad 2302: if (pcg != NULL) {
1.134 ad 2303: KASSERT(pcg->pcg_avail == 0);
1.162 ad 2304: if (__predict_false(cc->cc_previous == &pcg_dummy)) {
1.146 ad 2305: cc->cc_previous = pcg;
2306: } else {
1.162 ad 2307: cur = cc->cc_current;
2308: if (__predict_true(cur != &pcg_dummy)) {
1.163 ad 2309: KASSERT(cur->pcg_avail == cur->pcg_size);
1.146 ad 2310: cur->pcg_next = pc->pc_fullgroups;
2311: pc->pc_fullgroups = cur;
2312: pc->pc_nfull++;
2313: }
2314: cc->cc_current = pcg;
2315: }
1.163 ad 2316: pc->pc_hits++;
1.134 ad 2317: mutex_exit(&pc->pc_lock);
1.162 ad 2318: return true;
1.102 chs 2319: }
1.105 christos 2320:
1.134 ad 2321: /*
1.162 ad 2322: * Nothing available locally or in cache, and we didn't
2323: * allocate an empty group. Take the slow path and destroy
2324: * the object here and now.
1.134 ad 2325: */
2326: pc->pc_misses++;
2327: mutex_exit(&pc->pc_lock);
1.162 ad 2328: splx(s);
2329: pool_cache_destruct_object(pc, object);
1.105 christos 2330:
1.162 ad 2331: return false;
1.134 ad 2332: }
1.102 chs 2333:
1.43 thorpej 2334: /*
1.134 ad 2335: * pool_cache_put{,_paddr}:
1.43 thorpej 2336: *
1.134 ad 2337: * Put an object back to the pool cache (optionally caching the
2338: * physical address of the object).
1.43 thorpej 2339: */
1.101 thorpej 2340: void
1.134 ad 2341: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2342: {
1.134 ad 2343: pool_cache_cpu_t *cc;
2344: pcg_t *pcg;
2345: int s;
1.101 thorpej 2346:
1.172 yamt 2347: KASSERT(object != NULL);
1.204 maxv 2348: pool_redzone_check(&pc->pc_pool, object);
1.134 ad 2349: FREECHECK_IN(&pc->pc_freecheck, object);
1.101 thorpej 2350:
1.162 ad 2351: /* Lock out interrupts and disable preemption. */
2352: s = splvm();
1.165 yamt 2353: while (/* CONSTCOND */ true) {
1.134 ad 2354: /* If the current group isn't full, release it there. */
1.162 ad 2355: cc = pc->pc_cpus[curcpu()->ci_index];
2356: KASSERT(cc->cc_cache == pc);
1.134 ad 2357: pcg = cc->cc_current;
1.162 ad 2358: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2359: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2360: pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2361: pcg->pcg_avail++;
2362: cc->cc_hits++;
1.162 ad 2363: splx(s);
1.134 ad 2364: return;
2365: }
1.43 thorpej 2366:
1.134 ad 2367: /*
1.162 ad 2368: * That failed. If the previous group isn't full, swap
1.134 ad 2369: * it with the current group and try again.
2370: */
2371: pcg = cc->cc_previous;
1.162 ad 2372: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2373: cc->cc_previous = cc->cc_current;
2374: cc->cc_current = pcg;
2375: continue;
2376: }
1.43 thorpej 2377:
1.134 ad 2378: /*
2379: * Can't free to either group: try the slow path.
2380: * If put_slow() releases the object for us, it
1.162 ad 2381: * will return false. Otherwise we need to retry.
1.134 ad 2382: */
1.165 yamt 2383: if (!pool_cache_put_slow(cc, s, object))
2384: break;
2385: }
1.43 thorpej 2386: }
2387:
2388: /*
1.196 jym 2389: * pool_cache_transfer:
1.43 thorpej 2390: *
1.134 ad 2391: * Transfer objects from the per-CPU cache to the global cache.
2392: * Run within a cross-call thread.
1.43 thorpej 2393: */
2394: static void
1.196 jym 2395: pool_cache_transfer(pool_cache_t pc)
1.43 thorpej 2396: {
1.134 ad 2397: pool_cache_cpu_t *cc;
2398: pcg_t *prev, *cur, **list;
1.162 ad 2399: int s;
1.134 ad 2400:
1.162 ad 2401: s = splvm();
2402: mutex_enter(&pc->pc_lock);
2403: cc = pc->pc_cpus[curcpu()->ci_index];
1.134 ad 2404: cur = cc->cc_current;
1.169 yamt 2405: cc->cc_current = __UNCONST(&pcg_dummy);
1.134 ad 2406: prev = cc->cc_previous;
1.169 yamt 2407: cc->cc_previous = __UNCONST(&pcg_dummy);
1.162 ad 2408: if (cur != &pcg_dummy) {
1.142 ad 2409: if (cur->pcg_avail == cur->pcg_size) {
1.134 ad 2410: list = &pc->pc_fullgroups;
2411: pc->pc_nfull++;
2412: } else if (cur->pcg_avail == 0) {
2413: list = &pc->pc_emptygroups;
2414: pc->pc_nempty++;
2415: } else {
2416: list = &pc->pc_partgroups;
2417: pc->pc_npart++;
2418: }
2419: cur->pcg_next = *list;
2420: *list = cur;
2421: }
1.162 ad 2422: if (prev != &pcg_dummy) {
1.142 ad 2423: if (prev->pcg_avail == prev->pcg_size) {
1.134 ad 2424: list = &pc->pc_fullgroups;
2425: pc->pc_nfull++;
2426: } else if (prev->pcg_avail == 0) {
2427: list = &pc->pc_emptygroups;
2428: pc->pc_nempty++;
2429: } else {
2430: list = &pc->pc_partgroups;
2431: pc->pc_npart++;
2432: }
2433: prev->pcg_next = *list;
2434: *list = prev;
2435: }
2436: mutex_exit(&pc->pc_lock);
2437: splx(s);
1.3 pk 2438: }
1.66 thorpej 2439:
2440: /*
2441: * Pool backend allocators.
2442: *
2443: * Each pool has a backend allocator that handles allocation, deallocation,
2444: * and any additional draining that might be needed.
2445: *
2446: * We provide two standard allocators:
2447: *
2448: * pool_allocator_kmem - the default when no allocator is specified
2449: *
2450: * pool_allocator_nointr - used for pools that will not be accessed
2451: * in interrupt context.
2452: */
2453: void *pool_page_alloc(struct pool *, int);
2454: void pool_page_free(struct pool *, void *);
2455:
1.112 bjh21 2456: #ifdef POOL_SUBPAGE
2457: struct pool_allocator pool_allocator_kmem_fullpage = {
1.192 rmind 2458: .pa_alloc = pool_page_alloc,
2459: .pa_free = pool_page_free,
2460: .pa_pagesz = 0
1.112 bjh21 2461: };
2462: #else
1.66 thorpej 2463: struct pool_allocator pool_allocator_kmem = {
1.191 para 2464: .pa_alloc = pool_page_alloc,
2465: .pa_free = pool_page_free,
2466: .pa_pagesz = 0
1.66 thorpej 2467: };
1.112 bjh21 2468: #endif
1.66 thorpej 2469:
1.112 bjh21 2470: #ifdef POOL_SUBPAGE
2471: struct pool_allocator pool_allocator_nointr_fullpage = {
1.194 para 2472: .pa_alloc = pool_page_alloc,
2473: .pa_free = pool_page_free,
1.192 rmind 2474: .pa_pagesz = 0
1.112 bjh21 2475: };
2476: #else
1.66 thorpej 2477: struct pool_allocator pool_allocator_nointr = {
1.191 para 2478: .pa_alloc = pool_page_alloc,
2479: .pa_free = pool_page_free,
2480: .pa_pagesz = 0
1.66 thorpej 2481: };
1.112 bjh21 2482: #endif
1.66 thorpej 2483:
2484: #ifdef POOL_SUBPAGE
2485: void *pool_subpage_alloc(struct pool *, int);
2486: void pool_subpage_free(struct pool *, void *);
2487:
1.112 bjh21 2488: struct pool_allocator pool_allocator_kmem = {
1.193 he 2489: .pa_alloc = pool_subpage_alloc,
2490: .pa_free = pool_subpage_free,
2491: .pa_pagesz = POOL_SUBPAGE
1.112 bjh21 2492: };
2493:
2494: struct pool_allocator pool_allocator_nointr = {
1.192 rmind 2495: .pa_alloc = pool_subpage_alloc,
2496: .pa_free = pool_subpage_free,
2497: .pa_pagesz = POOL_SUBPAGE
1.66 thorpej 2498: };
2499: #endif /* POOL_SUBPAGE */
2500:
1.117 yamt 2501: static void *
2502: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2503: {
1.117 yamt 2504: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2505: void *res;
2506:
1.117 yamt 2507: res = (*pa->pa_alloc)(pp, flags);
2508: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2509: /*
1.117 yamt 2510: * We only run the drain hook here if PR_NOWAIT.
2511: * In other cases, the hook will be run in
2512: * pool_reclaim().
1.66 thorpej 2513: */
1.117 yamt 2514: if (pp->pr_drain_hook != NULL) {
2515: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2516: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2517: }
1.117 yamt 2518: }
2519: return res;
1.66 thorpej 2520: }
2521:
1.117 yamt 2522: static void
1.66 thorpej 2523: pool_allocator_free(struct pool *pp, void *v)
2524: {
2525: struct pool_allocator *pa = pp->pr_alloc;
2526:
2527: (*pa->pa_free)(pp, v);
2528: }
2529:
2530: void *
1.124 yamt 2531: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2532: {
1.192 rmind 2533: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
1.191 para 2534: vmem_addr_t va;
1.192 rmind 2535: int ret;
1.191 para 2536:
1.192 rmind 2537: ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2538: vflags | VM_INSTANTFIT, &va);
1.66 thorpej 2539:
1.192 rmind 2540: return ret ? NULL : (void *)va;
1.66 thorpej 2541: }
2542:
2543: void
1.124 yamt 2544: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2545: {
2546:
1.191 para 2547: uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
1.98 yamt 2548: }
2549:
2550: static void *
1.124 yamt 2551: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2552: {
1.192 rmind 2553: const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2554: vmem_addr_t va;
2555: int ret;
1.191 para 2556:
1.192 rmind 2557: ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2558: vflags | VM_INSTANTFIT, &va);
1.98 yamt 2559:
1.192 rmind 2560: return ret ? NULL : (void *)va;
1.98 yamt 2561: }
2562:
2563: static void
1.124 yamt 2564: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2565: {
2566:
1.192 rmind 2567: vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
1.66 thorpej 2568: }
2569:
1.204 maxv 2570: #ifdef POOL_REDZONE
2571: #if defined(_LP64)
2572: # define PRIME 0x9e37fffffffc0000UL
2573: #else /* defined(_LP64) */
2574: # define PRIME 0x9e3779b1
2575: #endif /* defined(_LP64) */
2576: #define STATIC_BYTE 0xFE
2577: CTASSERT(POOL_REDZONE_SIZE > 1);
2578:
2579: static inline uint8_t
2580: pool_pattern_generate(const void *p)
2581: {
2582: return (uint8_t)(((uintptr_t)p) * PRIME
2583: >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
2584: }
2585:
2586: static void
2587: pool_redzone_init(struct pool *pp, size_t requested_size)
2588: {
2589: size_t nsz;
2590:
2591: if (pp->pr_roflags & PR_NOTOUCH) {
2592: pp->pr_reqsize = 0;
2593: pp->pr_redzone = false;
2594: return;
2595: }
2596:
2597: /*
2598: * We may have extended the requested size earlier; check if
2599: * there's naturally space in the padding for a red zone.
2600: */
2601: if (pp->pr_size - requested_size >= POOL_REDZONE_SIZE) {
2602: pp->pr_reqsize = requested_size;
2603: pp->pr_redzone = true;
2604: return;
2605: }
2606:
2607: /*
2608: * No space in the natural padding; check if we can extend a
2609: * bit the size of the pool.
2610: */
2611: nsz = roundup(pp->pr_size + POOL_REDZONE_SIZE, pp->pr_align);
2612: if (nsz <= pp->pr_alloc->pa_pagesz) {
2613: /* Ok, we can */
2614: pp->pr_size = nsz;
2615: pp->pr_reqsize = requested_size;
2616: pp->pr_redzone = true;
2617: } else {
2618: /* No space for a red zone... snif :'( */
2619: pp->pr_reqsize = 0;
2620: pp->pr_redzone = false;
2621: printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
2622: }
2623: }
2624:
2625: static void
2626: pool_redzone_fill(struct pool *pp, void *p)
2627: {
2628: uint8_t *cp, pat;
2629: const uint8_t *ep;
2630:
2631: if (!pp->pr_redzone)
2632: return;
2633:
2634: cp = (uint8_t *)p + pp->pr_reqsize;
2635: ep = cp + POOL_REDZONE_SIZE;
2636:
2637: /*
2638: * We really don't want the first byte of the red zone to be '\0';
2639: * an off-by-one in a string may not be properly detected.
2640: */
2641: pat = pool_pattern_generate(cp);
2642: *cp = (pat == '\0') ? STATIC_BYTE: pat;
2643: cp++;
2644:
2645: while (cp < ep) {
2646: *cp = pool_pattern_generate(cp);
2647: cp++;
2648: }
2649: }
2650:
2651: static void
2652: pool_redzone_check(struct pool *pp, void *p)
2653: {
2654: uint8_t *cp, pat, expected;
2655: const uint8_t *ep;
2656:
2657: if (!pp->pr_redzone)
2658: return;
2659:
2660: cp = (uint8_t *)p + pp->pr_reqsize;
2661: ep = cp + POOL_REDZONE_SIZE;
2662:
2663: pat = pool_pattern_generate(cp);
2664: expected = (pat == '\0') ? STATIC_BYTE: pat;
2665: if (expected != *cp) {
2666: panic("%s: %p: 0x%02x != 0x%02x\n",
2667: __func__, cp, *cp, expected);
2668: }
2669: cp++;
2670:
2671: while (cp < ep) {
2672: expected = pool_pattern_generate(cp);
2673: if (*cp != expected) {
2674: panic("%s: %p: 0x%02x != 0x%02x\n",
2675: __func__, cp, *cp, expected);
2676: }
2677: cp++;
2678: }
2679: }
2680:
2681: #endif /* POOL_REDZONE */
2682:
2683:
1.66 thorpej 2684: #ifdef POOL_SUBPAGE
2685: /* Sub-page allocator, for machines with large hardware pages. */
2686: void *
2687: pool_subpage_alloc(struct pool *pp, int flags)
2688: {
1.134 ad 2689: return pool_get(&psppool, flags);
1.66 thorpej 2690: }
2691:
2692: void
2693: pool_subpage_free(struct pool *pp, void *v)
2694: {
2695: pool_put(&psppool, v);
2696: }
2697:
1.112 bjh21 2698: #endif /* POOL_SUBPAGE */
1.141 yamt 2699:
2700: #if defined(DDB)
2701: static bool
2702: pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2703: {
2704:
2705: return (uintptr_t)ph->ph_page <= addr &&
2706: addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2707: }
2708:
1.143 yamt 2709: static bool
2710: pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2711: {
2712:
2713: return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2714: }
2715:
2716: static bool
2717: pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2718: {
2719: int i;
2720:
2721: if (pcg == NULL) {
2722: return false;
2723: }
1.144 yamt 2724: for (i = 0; i < pcg->pcg_avail; i++) {
1.143 yamt 2725: if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2726: return true;
2727: }
2728: }
2729: return false;
2730: }
2731:
2732: static bool
2733: pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2734: {
2735:
2736: if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2737: unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2738: pool_item_bitmap_t *bitmap =
2739: ph->ph_bitmap + (idx / BITMAP_SIZE);
2740: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2741:
2742: return (*bitmap & mask) == 0;
2743: } else {
2744: struct pool_item *pi;
2745:
2746: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2747: if (pool_in_item(pp, pi, addr)) {
2748: return false;
2749: }
2750: }
2751: return true;
2752: }
2753: }
2754:
1.141 yamt 2755: void
2756: pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2757: {
2758: struct pool *pp;
2759:
1.145 ad 2760: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.141 yamt 2761: struct pool_item_header *ph;
2762: uintptr_t item;
1.143 yamt 2763: bool allocated = true;
2764: bool incache = false;
2765: bool incpucache = false;
2766: char cpucachestr[32];
1.141 yamt 2767:
2768: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2769: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2770: if (pool_in_page(pp, ph, addr)) {
2771: goto found;
2772: }
2773: }
2774: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2775: if (pool_in_page(pp, ph, addr)) {
1.143 yamt 2776: allocated =
2777: pool_allocated(pp, ph, addr);
2778: goto found;
2779: }
2780: }
2781: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2782: if (pool_in_page(pp, ph, addr)) {
2783: allocated = false;
1.141 yamt 2784: goto found;
2785: }
2786: }
2787: continue;
2788: } else {
2789: ph = pr_find_pagehead_noalign(pp, (void *)addr);
2790: if (ph == NULL || !pool_in_page(pp, ph, addr)) {
2791: continue;
2792: }
1.143 yamt 2793: allocated = pool_allocated(pp, ph, addr);
1.141 yamt 2794: }
2795: found:
1.143 yamt 2796: if (allocated && pp->pr_cache) {
2797: pool_cache_t pc = pp->pr_cache;
2798: struct pool_cache_group *pcg;
2799: int i;
2800:
2801: for (pcg = pc->pc_fullgroups; pcg != NULL;
2802: pcg = pcg->pcg_next) {
2803: if (pool_in_cg(pp, pcg, addr)) {
2804: incache = true;
2805: goto print;
2806: }
2807: }
1.183 ad 2808: for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1.143 yamt 2809: pool_cache_cpu_t *cc;
2810:
2811: if ((cc = pc->pc_cpus[i]) == NULL) {
2812: continue;
2813: }
2814: if (pool_in_cg(pp, cc->cc_current, addr) ||
2815: pool_in_cg(pp, cc->cc_previous, addr)) {
2816: struct cpu_info *ci =
1.170 ad 2817: cpu_lookup(i);
1.143 yamt 2818:
2819: incpucache = true;
2820: snprintf(cpucachestr,
2821: sizeof(cpucachestr),
2822: "cached by CPU %u",
1.153 martin 2823: ci->ci_index);
1.143 yamt 2824: goto print;
2825: }
2826: }
2827: }
2828: print:
1.141 yamt 2829: item = (uintptr_t)ph->ph_page + ph->ph_off;
2830: item = item + rounddown(addr - item, pp->pr_size);
1.143 yamt 2831: (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
1.141 yamt 2832: (void *)addr, item, (size_t)(addr - item),
1.143 yamt 2833: pp->pr_wchan,
2834: incpucache ? cpucachestr :
2835: incache ? "cached" : allocated ? "allocated" : "free");
1.141 yamt 2836: }
2837: }
2838: #endif /* defined(DDB) */
1.203 joerg 2839:
2840: static int
2841: pool_sysctl(SYSCTLFN_ARGS)
2842: {
2843: struct pool_sysctl data;
2844: struct pool *pp;
2845: struct pool_cache *pc;
2846: pool_cache_cpu_t *cc;
2847: int error;
2848: size_t i, written;
2849:
2850: if (oldp == NULL) {
2851: *oldlenp = 0;
2852: TAILQ_FOREACH(pp, &pool_head, pr_poollist)
2853: *oldlenp += sizeof(data);
2854: return 0;
2855: }
2856:
2857: memset(&data, 0, sizeof(data));
2858: error = 0;
2859: written = 0;
2860: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
2861: if (written + sizeof(data) > *oldlenp)
2862: break;
2863: strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
2864: data.pr_pagesize = pp->pr_alloc->pa_pagesz;
2865: data.pr_flags = pp->pr_roflags | pp->pr_flags;
2866: #define COPY(field) data.field = pp->field
2867: COPY(pr_size);
2868:
2869: COPY(pr_itemsperpage);
2870: COPY(pr_nitems);
2871: COPY(pr_nout);
2872: COPY(pr_hardlimit);
2873: COPY(pr_npages);
2874: COPY(pr_minpages);
2875: COPY(pr_maxpages);
2876:
2877: COPY(pr_nget);
2878: COPY(pr_nfail);
2879: COPY(pr_nput);
2880: COPY(pr_npagealloc);
2881: COPY(pr_npagefree);
2882: COPY(pr_hiwat);
2883: COPY(pr_nidle);
2884: #undef COPY
2885:
2886: data.pr_cache_nmiss_pcpu = 0;
2887: data.pr_cache_nhit_pcpu = 0;
2888: if (pp->pr_cache) {
2889: pc = pp->pr_cache;
2890: data.pr_cache_meta_size = pc->pc_pcgsize;
2891: data.pr_cache_nfull = pc->pc_nfull;
2892: data.pr_cache_npartial = pc->pc_npart;
2893: data.pr_cache_nempty = pc->pc_nempty;
2894: data.pr_cache_ncontended = pc->pc_contended;
2895: data.pr_cache_nmiss_global = pc->pc_misses;
2896: data.pr_cache_nhit_global = pc->pc_hits;
2897: for (i = 0; i < pc->pc_ncpu; ++i) {
2898: cc = pc->pc_cpus[i];
2899: if (cc == NULL)
2900: continue;
1.206 knakahar 2901: data.pr_cache_nmiss_pcpu += cc->cc_misses;
2902: data.pr_cache_nhit_pcpu += cc->cc_hits;
1.203 joerg 2903: }
2904: } else {
2905: data.pr_cache_meta_size = 0;
2906: data.pr_cache_nfull = 0;
2907: data.pr_cache_npartial = 0;
2908: data.pr_cache_nempty = 0;
2909: data.pr_cache_ncontended = 0;
2910: data.pr_cache_nmiss_global = 0;
2911: data.pr_cache_nhit_global = 0;
2912: }
2913:
2914: error = sysctl_copyout(l, &data, oldp, sizeof(data));
2915: if (error)
2916: break;
2917: written += sizeof(data);
2918: oldp = (char *)oldp + sizeof(data);
2919: }
2920:
2921: *oldlenp = written;
2922: return error;
2923: }
2924:
2925: SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
2926: {
2927: const struct sysctlnode *rnode = NULL;
2928:
2929: sysctl_createv(clog, 0, NULL, &rnode,
2930: CTLFLAG_PERMANENT,
2931: CTLTYPE_STRUCT, "pool",
2932: SYSCTL_DESCR("Get pool statistics"),
2933: pool_sysctl, 0, NULL, 0,
2934: CTL_KERN, CTL_CREATE, CTL_EOL);
2935: }
CVSweb <webmaster@jp.NetBSD.org>