Annotation of src/sys/kern/subr_pool.c, Revision 1.175
1.175 ! jym 1: /* $NetBSD: subr_pool.c,v 1.174 2009/09/13 18:45:11 pooka Exp $ */
1.1 pk 2:
3: /*-
1.161 ad 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.134 ad 9: * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: *
20: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30: * POSSIBILITY OF SUCH DAMAGE.
31: */
1.64 lukem 32:
33: #include <sys/cdefs.h>
1.175 ! jym 34: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.174 2009/09/13 18:45:11 pooka Exp $");
1.24 scottr 35:
1.141 yamt 36: #include "opt_ddb.h"
1.25 thorpej 37: #include "opt_pool.h"
1.24 scottr 38: #include "opt_poollog.h"
1.28 thorpej 39: #include "opt_lockdebug.h"
1.1 pk 40:
41: #include <sys/param.h>
42: #include <sys/systm.h>
1.135 yamt 43: #include <sys/bitops.h>
1.1 pk 44: #include <sys/proc.h>
45: #include <sys/errno.h>
46: #include <sys/kernel.h>
47: #include <sys/malloc.h>
48: #include <sys/pool.h>
1.20 thorpej 49: #include <sys/syslog.h>
1.125 ad 50: #include <sys/debug.h>
1.134 ad 51: #include <sys/lockdebug.h>
52: #include <sys/xcall.h>
53: #include <sys/cpu.h>
1.145 ad 54: #include <sys/atomic.h>
1.3 pk 55:
56: #include <uvm/uvm.h>
57:
1.1 pk 58: /*
59: * Pool resource management utility.
1.3 pk 60: *
1.88 chs 61: * Memory is allocated in pages which are split into pieces according to
62: * the pool item size. Each page is kept on one of three lists in the
63: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
64: * for empty, full and partially-full pages respectively. The individual
65: * pool items are on a linked list headed by `ph_itemlist' in each page
66: * header. The memory for building the page list is either taken from
67: * the allocated pages themselves (for small pool items) or taken from
68: * an internal pool of page headers (`phpool').
1.1 pk 69: */
70:
1.3 pk 71: /* List of all pools */
1.173 rmind 72: static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.134 ad 73:
1.3 pk 74: /* Private pool for page header structures */
1.97 yamt 75: #define PHPOOL_MAX 8
76: static struct pool phpool[PHPOOL_MAX];
1.135 yamt 77: #define PHPOOL_FREELIST_NELEM(idx) \
78: (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
1.3 pk 79:
1.62 bjh21 80: #ifdef POOL_SUBPAGE
81: /* Pool of subpages for use by normal pools. */
82: static struct pool psppool;
83: #endif
84:
1.117 yamt 85: static SLIST_HEAD(, pool_allocator) pa_deferinitq =
86: SLIST_HEAD_INITIALIZER(pa_deferinitq);
87:
1.98 yamt 88: static void *pool_page_alloc_meta(struct pool *, int);
89: static void pool_page_free_meta(struct pool *, void *);
90:
91: /* allocator for pool metadata */
1.134 ad 92: struct pool_allocator pool_allocator_meta = {
1.117 yamt 93: pool_page_alloc_meta, pool_page_free_meta,
94: .pa_backingmapptr = &kmem_map,
1.98 yamt 95: };
96:
1.3 pk 97: /* # of seconds to retain page after last use */
98: int pool_inactive_time = 10;
99:
100: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 101: static struct pool *drainpp;
102:
1.134 ad 103: /* This lock protects both pool_head and drainpp. */
104: static kmutex_t pool_head_lock;
105: static kcondvar_t pool_busy;
1.3 pk 106:
1.135 yamt 107: typedef uint32_t pool_item_bitmap_t;
108: #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
109: #define BITMAP_MASK (BITMAP_SIZE - 1)
1.99 yamt 110:
1.3 pk 111: struct pool_item_header {
112: /* Page headers */
1.88 chs 113: LIST_ENTRY(pool_item_header)
1.3 pk 114: ph_pagelist; /* pool page list */
1.88 chs 115: SPLAY_ENTRY(pool_item_header)
116: ph_node; /* Off-page page headers */
1.128 christos 117: void * ph_page; /* this page's address */
1.151 yamt 118: uint32_t ph_time; /* last referenced */
1.135 yamt 119: uint16_t ph_nmissing; /* # of chunks in use */
1.141 yamt 120: uint16_t ph_off; /* start offset in page */
1.97 yamt 121: union {
122: /* !PR_NOTOUCH */
123: struct {
1.102 chs 124: LIST_HEAD(, pool_item)
1.97 yamt 125: phu_itemlist; /* chunk list for this page */
126: } phu_normal;
127: /* PR_NOTOUCH */
128: struct {
1.141 yamt 129: pool_item_bitmap_t phu_bitmap[1];
1.97 yamt 130: } phu_notouch;
131: } ph_u;
1.3 pk 132: };
1.97 yamt 133: #define ph_itemlist ph_u.phu_normal.phu_itemlist
1.135 yamt 134: #define ph_bitmap ph_u.phu_notouch.phu_bitmap
1.3 pk 135:
1.1 pk 136: struct pool_item {
1.3 pk 137: #ifdef DIAGNOSTIC
1.82 thorpej 138: u_int pi_magic;
1.33 chs 139: #endif
1.134 ad 140: #define PI_MAGIC 0xdeaddeadU
1.3 pk 141: /* Other entries use only this list entry */
1.102 chs 142: LIST_ENTRY(pool_item) pi_list;
1.3 pk 143: };
144:
1.53 thorpej 145: #define POOL_NEEDS_CATCHUP(pp) \
146: ((pp)->pr_nitems < (pp)->pr_minitems)
147:
1.43 thorpej 148: /*
149: * Pool cache management.
150: *
151: * Pool caches provide a way for constructed objects to be cached by the
152: * pool subsystem. This can lead to performance improvements by avoiding
153: * needless object construction/destruction; it is deferred until absolutely
154: * necessary.
155: *
1.134 ad 156: * Caches are grouped into cache groups. Each cache group references up
157: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
158: * object from the pool, it calls the object's constructor and places it
159: * into a cache group. When a cache group frees an object back to the
160: * pool, it first calls the object's destructor. This allows the object
161: * to persist in constructed form while freed to the cache.
162: *
163: * The pool references each cache, so that when a pool is drained by the
164: * pagedaemon, it can drain each individual cache as well. Each time a
165: * cache is drained, the most idle cache group is freed to the pool in
166: * its entirety.
1.43 thorpej 167: *
168: * Pool caches are layed on top of pools. By layering them, we can avoid
169: * the complexity of cache management for pools which would not benefit
170: * from it.
171: */
172:
1.142 ad 173: static struct pool pcg_normal_pool;
174: static struct pool pcg_large_pool;
1.134 ad 175: static struct pool cache_pool;
176: static struct pool cache_cpu_pool;
1.3 pk 177:
1.145 ad 178: /* List of all caches. */
179: TAILQ_HEAD(,pool_cache) pool_cache_head =
180: TAILQ_HEAD_INITIALIZER(pool_cache_head);
181:
1.162 ad 182: int pool_cache_disable; /* global disable for caching */
1.169 yamt 183: static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */
1.145 ad 184:
1.162 ad 185: static bool pool_cache_put_slow(pool_cache_cpu_t *, int,
186: void *);
187: static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
188: void **, paddr_t *, int);
1.134 ad 189: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
190: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
1.175 ! jym 191: static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
1.134 ad 192: static void pool_cache_xcall(pool_cache_t);
1.3 pk 193:
1.42 thorpej 194: static int pool_catchup(struct pool *);
1.128 christos 195: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 196: struct pool_item_header *);
1.88 chs 197: static void pool_update_curpage(struct pool *);
1.66 thorpej 198:
1.113 yamt 199: static int pool_grow(struct pool *, int);
1.117 yamt 200: static void *pool_allocator_alloc(struct pool *, int);
201: static void pool_allocator_free(struct pool *, void *);
1.3 pk 202:
1.97 yamt 203: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 204: void (*)(const char *, ...));
1.42 thorpej 205: static void pool_print1(struct pool *, const char *,
206: void (*)(const char *, ...));
1.3 pk 207:
1.88 chs 208: static int pool_chk_page(struct pool *, const char *,
209: struct pool_item_header *);
210:
1.3 pk 211: /*
1.52 thorpej 212: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 213: */
214: struct pool_log {
215: const char *pl_file;
216: long pl_line;
217: int pl_action;
1.25 thorpej 218: #define PRLOG_GET 1
219: #define PRLOG_PUT 2
1.3 pk 220: void *pl_addr;
1.1 pk 221: };
222:
1.86 matt 223: #ifdef POOL_DIAGNOSTIC
1.3 pk 224: /* Number of entries in pool log buffers */
1.17 thorpej 225: #ifndef POOL_LOGSIZE
226: #define POOL_LOGSIZE 10
227: #endif
228:
229: int pool_logsize = POOL_LOGSIZE;
1.1 pk 230:
1.110 perry 231: static inline void
1.42 thorpej 232: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 233: {
234: int n = pp->pr_curlogentry;
235: struct pool_log *pl;
236:
1.20 thorpej 237: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 238: return;
239:
240: /*
241: * Fill in the current entry. Wrap around and overwrite
242: * the oldest entry if necessary.
243: */
244: pl = &pp->pr_log[n];
245: pl->pl_file = file;
246: pl->pl_line = line;
247: pl->pl_action = action;
248: pl->pl_addr = v;
249: if (++n >= pp->pr_logsize)
250: n = 0;
251: pp->pr_curlogentry = n;
252: }
253:
254: static void
1.42 thorpej 255: pr_printlog(struct pool *pp, struct pool_item *pi,
256: void (*pr)(const char *, ...))
1.3 pk 257: {
258: int i = pp->pr_logsize;
259: int n = pp->pr_curlogentry;
260:
1.20 thorpej 261: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 262: return;
263:
264: /*
265: * Print all entries in this pool's log.
266: */
267: while (i-- > 0) {
268: struct pool_log *pl = &pp->pr_log[n];
269: if (pl->pl_action != 0) {
1.25 thorpej 270: if (pi == NULL || pi == pl->pl_addr) {
271: (*pr)("\tlog entry %d:\n", i);
272: (*pr)("\t\taction = %s, addr = %p\n",
273: pl->pl_action == PRLOG_GET ? "get" : "put",
274: pl->pl_addr);
275: (*pr)("\t\tfile: %s at line %lu\n",
276: pl->pl_file, pl->pl_line);
277: }
1.3 pk 278: }
279: if (++n >= pp->pr_logsize)
280: n = 0;
281: }
282: }
1.25 thorpej 283:
1.110 perry 284: static inline void
1.42 thorpej 285: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 286: {
287:
1.34 thorpej 288: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 289: printf("pool %s: reentrancy at file %s line %ld\n",
290: pp->pr_wchan, file, line);
291: printf(" previous entry at file %s line %ld\n",
292: pp->pr_entered_file, pp->pr_entered_line);
293: panic("pr_enter");
294: }
295:
296: pp->pr_entered_file = file;
297: pp->pr_entered_line = line;
298: }
299:
1.110 perry 300: static inline void
1.42 thorpej 301: pr_leave(struct pool *pp)
1.25 thorpej 302: {
303:
1.34 thorpej 304: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 305: printf("pool %s not entered?\n", pp->pr_wchan);
306: panic("pr_leave");
307: }
308:
309: pp->pr_entered_file = NULL;
310: pp->pr_entered_line = 0;
311: }
312:
1.110 perry 313: static inline void
1.42 thorpej 314: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 315: {
316:
317: if (pp->pr_entered_file != NULL)
318: (*pr)("\n\tcurrently entered from file %s line %ld\n",
319: pp->pr_entered_file, pp->pr_entered_line);
320: }
1.3 pk 321: #else
1.25 thorpej 322: #define pr_log(pp, v, action, file, line)
323: #define pr_printlog(pp, pi, pr)
324: #define pr_enter(pp, file, line)
325: #define pr_leave(pp)
326: #define pr_enter_check(pp, pr)
1.59 thorpej 327: #endif /* POOL_DIAGNOSTIC */
1.3 pk 328:
1.135 yamt 329: static inline unsigned int
1.97 yamt 330: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
331: const void *v)
332: {
333: const char *cp = v;
1.135 yamt 334: unsigned int idx;
1.97 yamt 335:
336: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 337: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 338: KASSERT(idx < pp->pr_itemsperpage);
339: return idx;
340: }
341:
1.110 perry 342: static inline void
1.97 yamt 343: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
344: void *obj)
345: {
1.135 yamt 346: unsigned int idx = pr_item_notouch_index(pp, ph, obj);
347: pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
348: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
1.97 yamt 349:
1.135 yamt 350: KASSERT((*bitmap & mask) == 0);
351: *bitmap |= mask;
1.97 yamt 352: }
353:
1.110 perry 354: static inline void *
1.97 yamt 355: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
356: {
1.135 yamt 357: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
358: unsigned int idx;
359: int i;
1.97 yamt 360:
1.135 yamt 361: for (i = 0; ; i++) {
362: int bit;
1.97 yamt 363:
1.135 yamt 364: KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
365: bit = ffs32(bitmap[i]);
366: if (bit) {
367: pool_item_bitmap_t mask;
368:
369: bit--;
370: idx = (i * BITMAP_SIZE) + bit;
371: mask = 1 << bit;
372: KASSERT((bitmap[i] & mask) != 0);
373: bitmap[i] &= ~mask;
374: break;
375: }
376: }
377: KASSERT(idx < pp->pr_itemsperpage);
1.128 christos 378: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 379: }
380:
1.135 yamt 381: static inline void
1.141 yamt 382: pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
1.135 yamt 383: {
384: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
385: const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
386: int i;
387:
388: for (i = 0; i < n; i++) {
389: bitmap[i] = (pool_item_bitmap_t)-1;
390: }
391: }
392:
1.110 perry 393: static inline int
1.88 chs 394: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
395: {
1.121 yamt 396:
397: /*
398: * we consider pool_item_header with smaller ph_page bigger.
399: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
400: */
401:
1.88 chs 402: if (a->ph_page < b->ph_page)
1.121 yamt 403: return (1);
404: else if (a->ph_page > b->ph_page)
1.88 chs 405: return (-1);
406: else
407: return (0);
408: }
409:
410: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
411: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
412:
1.141 yamt 413: static inline struct pool_item_header *
414: pr_find_pagehead_noalign(struct pool *pp, void *v)
415: {
416: struct pool_item_header *ph, tmp;
417:
418: tmp.ph_page = (void *)(uintptr_t)v;
419: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
420: if (ph == NULL) {
421: ph = SPLAY_ROOT(&pp->pr_phtree);
422: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
423: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
424: }
425: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
426: }
427:
428: return ph;
429: }
430:
1.3 pk 431: /*
1.121 yamt 432: * Return the pool page header based on item address.
1.3 pk 433: */
1.110 perry 434: static inline struct pool_item_header *
1.121 yamt 435: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 436: {
1.88 chs 437: struct pool_item_header *ph, tmp;
1.3 pk 438:
1.121 yamt 439: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.141 yamt 440: ph = pr_find_pagehead_noalign(pp, v);
1.121 yamt 441: } else {
1.128 christos 442: void *page =
443: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 444:
445: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 446: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 447: } else {
448: tmp.ph_page = page;
449: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
450: }
451: }
1.3 pk 452:
1.121 yamt 453: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 454: ((char *)ph->ph_page <= (char *)v &&
455: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 456: return ph;
1.3 pk 457: }
458:
1.101 thorpej 459: static void
460: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
461: {
462: struct pool_item_header *ph;
463:
464: while ((ph = LIST_FIRST(pq)) != NULL) {
465: LIST_REMOVE(ph, ph_pagelist);
466: pool_allocator_free(pp, ph->ph_page);
1.134 ad 467: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 468: pool_put(pp->pr_phpool, ph);
469: }
470: }
471:
1.3 pk 472: /*
473: * Remove a page from the pool.
474: */
1.110 perry 475: static inline void
1.61 chs 476: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
477: struct pool_pagelist *pq)
1.3 pk 478: {
479:
1.134 ad 480: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 481:
1.3 pk 482: /*
1.7 thorpej 483: * If the page was idle, decrement the idle page count.
1.3 pk 484: */
1.6 thorpej 485: if (ph->ph_nmissing == 0) {
486: #ifdef DIAGNOSTIC
487: if (pp->pr_nidle == 0)
488: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 489: if (pp->pr_nitems < pp->pr_itemsperpage)
490: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 491: #endif
492: pp->pr_nidle--;
493: }
1.7 thorpej 494:
1.20 thorpej 495: pp->pr_nitems -= pp->pr_itemsperpage;
496:
1.7 thorpej 497: /*
1.101 thorpej 498: * Unlink the page from the pool and queue it for release.
1.7 thorpej 499: */
1.88 chs 500: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 501: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
502: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 503: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
504:
1.7 thorpej 505: pp->pr_npages--;
506: pp->pr_npagefree++;
1.6 thorpej 507:
1.88 chs 508: pool_update_curpage(pp);
1.3 pk 509: }
510:
1.126 thorpej 511: static bool
1.117 yamt 512: pa_starved_p(struct pool_allocator *pa)
513: {
514:
515: if (pa->pa_backingmap != NULL) {
516: return vm_map_starved_p(pa->pa_backingmap);
517: }
1.127 thorpej 518: return false;
1.117 yamt 519: }
520:
521: static int
1.124 yamt 522: pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
1.117 yamt 523: {
524: struct pool *pp = obj;
525: struct pool_allocator *pa = pp->pr_alloc;
526:
527: KASSERT(&pp->pr_reclaimerentry == ce);
528: pool_reclaim(pp);
529: if (!pa_starved_p(pa)) {
530: return CALLBACK_CHAIN_ABORT;
531: }
532: return CALLBACK_CHAIN_CONTINUE;
533: }
534:
535: static void
536: pool_reclaim_register(struct pool *pp)
537: {
538: struct vm_map *map = pp->pr_alloc->pa_backingmap;
539: int s;
540:
541: if (map == NULL) {
542: return;
543: }
544:
545: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
546: callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
547: &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
548: splx(s);
549: }
550:
551: static void
552: pool_reclaim_unregister(struct pool *pp)
553: {
554: struct vm_map *map = pp->pr_alloc->pa_backingmap;
555: int s;
556:
557: if (map == NULL) {
558: return;
559: }
560:
561: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
562: callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
563: &pp->pr_reclaimerentry);
564: splx(s);
565: }
566:
567: static void
568: pa_reclaim_register(struct pool_allocator *pa)
569: {
570: struct vm_map *map = *pa->pa_backingmapptr;
571: struct pool *pp;
572:
573: KASSERT(pa->pa_backingmap == NULL);
574: if (map == NULL) {
575: SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
576: return;
577: }
578: pa->pa_backingmap = map;
579: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
580: pool_reclaim_register(pp);
581: }
582: }
583:
1.3 pk 584: /*
1.94 simonb 585: * Initialize all the pools listed in the "pools" link set.
586: */
587: void
1.117 yamt 588: pool_subsystem_init(void)
1.94 simonb 589: {
1.117 yamt 590: struct pool_allocator *pa;
1.94 simonb 591:
1.134 ad 592: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
593: cv_init(&pool_busy, "poolbusy");
594:
1.117 yamt 595: while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
596: KASSERT(pa->pa_backingmapptr != NULL);
597: KASSERT(*pa->pa_backingmapptr != NULL);
598: SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
599: pa_reclaim_register(pa);
600: }
1.134 ad 601:
1.156 ad 602: pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
1.134 ad 603: 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);
604:
1.156 ad 605: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
1.134 ad 606: 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
1.94 simonb 607: }
608:
609: /*
1.3 pk 610: * Initialize the given pool resource structure.
611: *
612: * We export this routine to allow other kernel parts to declare
613: * static pools that must be initialized before malloc() is available.
614: */
615: void
1.42 thorpej 616: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.129 ad 617: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 618: {
1.116 simonb 619: struct pool *pp1;
1.92 enami 620: size_t trysize, phsize;
1.134 ad 621: int off, slack;
1.3 pk 622:
1.116 simonb 623: #ifdef DEBUG
624: /*
625: * Check that the pool hasn't already been initialised and
626: * added to the list of all pools.
627: */
1.145 ad 628: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
1.116 simonb 629: if (pp == pp1)
630: panic("pool_init: pool %s already initialised",
631: wchan);
632: }
633: #endif
634:
1.25 thorpej 635: #ifdef POOL_DIAGNOSTIC
636: /*
637: * Always log if POOL_DIAGNOSTIC is defined.
638: */
639: if (pool_logsize != 0)
640: flags |= PR_LOGGING;
641: #endif
642:
1.66 thorpej 643: if (palloc == NULL)
644: palloc = &pool_allocator_kmem;
1.112 bjh21 645: #ifdef POOL_SUBPAGE
646: if (size > palloc->pa_pagesz) {
647: if (palloc == &pool_allocator_kmem)
648: palloc = &pool_allocator_kmem_fullpage;
649: else if (palloc == &pool_allocator_nointr)
650: palloc = &pool_allocator_nointr_fullpage;
651: }
1.66 thorpej 652: #endif /* POOL_SUBPAGE */
653: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
1.112 bjh21 654: if (palloc->pa_pagesz == 0)
1.66 thorpej 655: palloc->pa_pagesz = PAGE_SIZE;
656:
657: TAILQ_INIT(&palloc->pa_list);
658:
1.134 ad 659: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 660: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
661: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.117 yamt 662:
663: if (palloc->pa_backingmapptr != NULL) {
664: pa_reclaim_register(palloc);
665: }
1.66 thorpej 666: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 667: }
1.3 pk 668:
669: if (align == 0)
670: align = ALIGN(1);
1.14 thorpej 671:
1.120 yamt 672: if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
1.14 thorpej 673: size = sizeof(struct pool_item);
1.3 pk 674:
1.78 thorpej 675: size = roundup(size, align);
1.66 thorpej 676: #ifdef DIAGNOSTIC
677: if (size > palloc->pa_pagesz)
1.121 yamt 678: panic("pool_init: pool item size (%zu) too large", size);
1.66 thorpej 679: #endif
1.35 pk 680:
1.3 pk 681: /*
682: * Initialize the pool structure.
683: */
1.88 chs 684: LIST_INIT(&pp->pr_emptypages);
685: LIST_INIT(&pp->pr_fullpages);
686: LIST_INIT(&pp->pr_partpages);
1.134 ad 687: pp->pr_cache = NULL;
1.3 pk 688: pp->pr_curpage = NULL;
689: pp->pr_npages = 0;
690: pp->pr_minitems = 0;
691: pp->pr_minpages = 0;
692: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 693: pp->pr_roflags = flags;
694: pp->pr_flags = 0;
1.35 pk 695: pp->pr_size = size;
1.3 pk 696: pp->pr_align = align;
697: pp->pr_wchan = wchan;
1.66 thorpej 698: pp->pr_alloc = palloc;
1.20 thorpej 699: pp->pr_nitems = 0;
700: pp->pr_nout = 0;
701: pp->pr_hardlimit = UINT_MAX;
702: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 703: pp->pr_hardlimit_ratecap.tv_sec = 0;
704: pp->pr_hardlimit_ratecap.tv_usec = 0;
705: pp->pr_hardlimit_warning_last.tv_sec = 0;
706: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 707: pp->pr_drain_hook = NULL;
708: pp->pr_drain_hook_arg = NULL;
1.125 ad 709: pp->pr_freecheck = NULL;
1.3 pk 710:
711: /*
712: * Decide whether to put the page header off page to avoid
1.92 enami 713: * wasting too large a part of the page or too big item.
714: * Off-page page headers go on a hash table, so we can match
715: * a returned item with its header based on the page address.
716: * We use 1/16 of the page size and about 8 times of the item
717: * size as the threshold (XXX: tune)
718: *
719: * However, we'll put the header into the page if we can put
720: * it without wasting any items.
721: *
722: * Silently enforce `0 <= ioff < align'.
1.3 pk 723: */
1.92 enami 724: pp->pr_itemoffset = ioff %= align;
725: /* See the comment below about reserved bytes. */
726: trysize = palloc->pa_pagesz - ((align - ioff) % align);
727: phsize = ALIGN(sizeof(struct pool_item_header));
1.121 yamt 728: if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 729: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
730: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 731: /* Use the end of the page for the page header */
1.20 thorpej 732: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 733: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 734: } else {
1.3 pk 735: /* The page header will be taken from our page header pool */
736: pp->pr_phoffset = 0;
1.66 thorpej 737: off = palloc->pa_pagesz;
1.88 chs 738: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 739: }
1.1 pk 740:
1.3 pk 741: /*
742: * Alignment is to take place at `ioff' within the item. This means
743: * we must reserve up to `align - 1' bytes on the page to allow
744: * appropriate positioning of each item.
745: */
746: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 747: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 748: if ((pp->pr_roflags & PR_NOTOUCH)) {
749: int idx;
750:
751: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
752: idx++) {
753: /* nothing */
754: }
755: if (idx >= PHPOOL_MAX) {
756: /*
757: * if you see this panic, consider to tweak
758: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
759: */
760: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
761: pp->pr_wchan, pp->pr_itemsperpage);
762: }
763: pp->pr_phpool = &phpool[idx];
764: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
765: pp->pr_phpool = &phpool[0];
766: }
767: #if defined(DIAGNOSTIC)
768: else {
769: pp->pr_phpool = NULL;
770: }
771: #endif
1.3 pk 772:
773: /*
774: * Use the slack between the chunks and the page header
775: * for "cache coloring".
776: */
777: slack = off - pp->pr_itemsperpage * pp->pr_size;
778: pp->pr_maxcolor = (slack / align) * align;
779: pp->pr_curcolor = 0;
780:
781: pp->pr_nget = 0;
782: pp->pr_nfail = 0;
783: pp->pr_nput = 0;
784: pp->pr_npagealloc = 0;
785: pp->pr_npagefree = 0;
1.1 pk 786: pp->pr_hiwat = 0;
1.8 thorpej 787: pp->pr_nidle = 0;
1.134 ad 788: pp->pr_refcnt = 0;
1.3 pk 789:
1.59 thorpej 790: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 791: if (flags & PR_LOGGING) {
792: if (kmem_map == NULL ||
793: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
794: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 795: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 796: pp->pr_curlogentry = 0;
797: pp->pr_logsize = pool_logsize;
798: }
1.59 thorpej 799: #endif
1.25 thorpej 800:
801: pp->pr_entered_file = NULL;
802: pp->pr_entered_line = 0;
1.3 pk 803:
1.157 ad 804: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
1.134 ad 805: cv_init(&pp->pr_cv, wchan);
806: pp->pr_ipl = ipl;
1.1 pk 807:
1.3 pk 808: /*
1.43 thorpej 809: * Initialize private page header pool and cache magazine pool if we
810: * haven't done so yet.
1.23 thorpej 811: * XXX LOCKING.
1.3 pk 812: */
1.97 yamt 813: if (phpool[0].pr_size == 0) {
814: int idx;
815: for (idx = 0; idx < PHPOOL_MAX; idx++) {
816: static char phpool_names[PHPOOL_MAX][6+1+6+1];
817: int nelem;
818: size_t sz;
819:
820: nelem = PHPOOL_FREELIST_NELEM(idx);
821: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
822: "phpool-%d", nelem);
823: sz = sizeof(struct pool_item_header);
824: if (nelem) {
1.135 yamt 825: sz = offsetof(struct pool_item_header,
826: ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
1.97 yamt 827: }
828: pool_init(&phpool[idx], sz, 0, 0, 0,
1.129 ad 829: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.97 yamt 830: }
1.62 bjh21 831: #ifdef POOL_SUBPAGE
832: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.129 ad 833: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
1.62 bjh21 834: #endif
1.142 ad 835:
836: size = sizeof(pcg_t) +
837: (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
1.156 ad 838: pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
1.142 ad 839: "pcgnormal", &pool_allocator_meta, IPL_VM);
840:
841: size = sizeof(pcg_t) +
842: (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
1.156 ad 843: pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
1.142 ad 844: "pcglarge", &pool_allocator_meta, IPL_VM);
1.1 pk 845: }
846:
1.145 ad 847: /* Insert into the list of all pools. */
848: if (__predict_true(!cold))
1.134 ad 849: mutex_enter(&pool_head_lock);
1.145 ad 850: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
851: if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
852: break;
853: }
854: if (pp1 == NULL)
855: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
856: else
857: TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
858: if (__predict_true(!cold))
1.134 ad 859: mutex_exit(&pool_head_lock);
860:
1.167 skrll 861: /* Insert this into the list of pools using this allocator. */
1.145 ad 862: if (__predict_true(!cold))
1.134 ad 863: mutex_enter(&palloc->pa_lock);
1.145 ad 864: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
865: if (__predict_true(!cold))
1.134 ad 866: mutex_exit(&palloc->pa_lock);
1.66 thorpej 867:
1.117 yamt 868: pool_reclaim_register(pp);
1.1 pk 869: }
870:
871: /*
872: * De-commision a pool resource.
873: */
874: void
1.42 thorpej 875: pool_destroy(struct pool *pp)
1.1 pk 876: {
1.101 thorpej 877: struct pool_pagelist pq;
1.3 pk 878: struct pool_item_header *ph;
1.43 thorpej 879:
1.101 thorpej 880: /* Remove from global pool list */
1.134 ad 881: mutex_enter(&pool_head_lock);
882: while (pp->pr_refcnt != 0)
883: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 884: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.101 thorpej 885: if (drainpp == pp)
886: drainpp = NULL;
1.134 ad 887: mutex_exit(&pool_head_lock);
1.101 thorpej 888:
889: /* Remove this pool from its allocator's list of pools. */
1.117 yamt 890: pool_reclaim_unregister(pp);
1.134 ad 891: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 892: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.134 ad 893: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 894:
1.134 ad 895: mutex_enter(&pp->pr_lock);
1.101 thorpej 896:
1.134 ad 897: KASSERT(pp->pr_cache == NULL);
1.3 pk 898:
899: #ifdef DIAGNOSTIC
1.20 thorpej 900: if (pp->pr_nout != 0) {
1.25 thorpej 901: pr_printlog(pp, NULL, printf);
1.80 provos 902: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 903: pp->pr_nout);
1.3 pk 904: }
905: #endif
1.1 pk 906:
1.101 thorpej 907: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
908: KASSERT(LIST_EMPTY(&pp->pr_partpages));
909:
1.3 pk 910: /* Remove all pages */
1.101 thorpej 911: LIST_INIT(&pq);
1.88 chs 912: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 913: pr_rmpage(pp, ph, &pq);
914:
1.134 ad 915: mutex_exit(&pp->pr_lock);
1.3 pk 916:
1.101 thorpej 917: pr_pagelist_free(pp, &pq);
1.3 pk 918:
1.59 thorpej 919: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 920: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 921: free(pp->pr_log, M_TEMP);
1.59 thorpej 922: #endif
1.134 ad 923:
924: cv_destroy(&pp->pr_cv);
925: mutex_destroy(&pp->pr_lock);
1.1 pk 926: }
927:
1.68 thorpej 928: void
929: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
930: {
931:
932: /* XXX no locking -- must be used just after pool_init() */
933: #ifdef DIAGNOSTIC
934: if (pp->pr_drain_hook != NULL)
935: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
936: #endif
937: pp->pr_drain_hook = fn;
938: pp->pr_drain_hook_arg = arg;
939: }
940:
1.88 chs 941: static struct pool_item_header *
1.128 christos 942: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 943: {
944: struct pool_item_header *ph;
945:
946: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 947: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.134 ad 948: else
1.97 yamt 949: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 950:
951: return (ph);
952: }
1.1 pk 953:
954: /*
1.134 ad 955: * Grab an item from the pool.
1.1 pk 956: */
1.3 pk 957: void *
1.59 thorpej 958: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 959: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 960: #else
961: pool_get(struct pool *pp, int flags)
962: #endif
1.1 pk 963: {
964: struct pool_item *pi;
1.3 pk 965: struct pool_item_header *ph;
1.55 thorpej 966: void *v;
1.1 pk 967:
1.2 pk 968: #ifdef DIAGNOSTIC
1.95 atatat 969: if (__predict_false(pp->pr_itemsperpage == 0))
970: panic("pool_get: pool %p: pr_itemsperpage is zero, "
971: "pool not initialized?", pp);
1.84 thorpej 972: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 973: (flags & PR_WAITOK) != 0))
1.77 matt 974: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 975:
1.102 chs 976: #endif /* DIAGNOSTIC */
1.58 thorpej 977: #ifdef LOCKDEBUG
1.155 ad 978: if (flags & PR_WAITOK) {
1.154 yamt 979: ASSERT_SLEEPABLE();
1.155 ad 980: }
1.56 sommerfe 981: #endif
1.1 pk 982:
1.134 ad 983: mutex_enter(&pp->pr_lock);
1.25 thorpej 984: pr_enter(pp, file, line);
1.20 thorpej 985:
986: startover:
987: /*
988: * Check to see if we've reached the hard limit. If we have,
989: * and we can wait, then wait until an item has been returned to
990: * the pool.
991: */
992: #ifdef DIAGNOSTIC
1.34 thorpej 993: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 994: pr_leave(pp);
1.134 ad 995: mutex_exit(&pp->pr_lock);
1.20 thorpej 996: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
997: }
998: #endif
1.34 thorpej 999: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 1000: if (pp->pr_drain_hook != NULL) {
1001: /*
1002: * Since the drain hook is going to free things
1003: * back to the pool, unlock, call the hook, re-lock,
1004: * and check the hardlimit condition again.
1005: */
1006: pr_leave(pp);
1.134 ad 1007: mutex_exit(&pp->pr_lock);
1.68 thorpej 1008: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.134 ad 1009: mutex_enter(&pp->pr_lock);
1.68 thorpej 1010: pr_enter(pp, file, line);
1011: if (pp->pr_nout < pp->pr_hardlimit)
1012: goto startover;
1013: }
1014:
1.29 sommerfe 1015: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 1016: /*
1017: * XXX: A warning isn't logged in this case. Should
1018: * it be?
1019: */
1020: pp->pr_flags |= PR_WANTED;
1.25 thorpej 1021: pr_leave(pp);
1.134 ad 1022: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.25 thorpej 1023: pr_enter(pp, file, line);
1.20 thorpej 1024: goto startover;
1025: }
1.31 thorpej 1026:
1027: /*
1028: * Log a message that the hard limit has been hit.
1029: */
1030: if (pp->pr_hardlimit_warning != NULL &&
1031: ratecheck(&pp->pr_hardlimit_warning_last,
1032: &pp->pr_hardlimit_ratecap))
1033: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 1034:
1035: pp->pr_nfail++;
1036:
1.25 thorpej 1037: pr_leave(pp);
1.134 ad 1038: mutex_exit(&pp->pr_lock);
1.20 thorpej 1039: return (NULL);
1040: }
1041:
1.3 pk 1042: /*
1043: * The convention we use is that if `curpage' is not NULL, then
1044: * it points at a non-empty bucket. In particular, `curpage'
1045: * never points at a page header which has PR_PHINPAGE set and
1046: * has no items in its bucket.
1047: */
1.20 thorpej 1048: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 1049: int error;
1050:
1.20 thorpej 1051: #ifdef DIAGNOSTIC
1052: if (pp->pr_nitems != 0) {
1.134 ad 1053: mutex_exit(&pp->pr_lock);
1.20 thorpej 1054: printf("pool_get: %s: curpage NULL, nitems %u\n",
1055: pp->pr_wchan, pp->pr_nitems);
1.80 provos 1056: panic("pool_get: nitems inconsistent");
1.20 thorpej 1057: }
1058: #endif
1059:
1.21 thorpej 1060: /*
1061: * Call the back-end page allocator for more memory.
1062: * Release the pool lock, as the back-end page allocator
1063: * may block.
1064: */
1.25 thorpej 1065: pr_leave(pp);
1.113 yamt 1066: error = pool_grow(pp, flags);
1067: pr_enter(pp, file, line);
1068: if (error != 0) {
1.21 thorpej 1069: /*
1.55 thorpej 1070: * We were unable to allocate a page or item
1071: * header, but we released the lock during
1072: * allocation, so perhaps items were freed
1073: * back to the pool. Check for this case.
1.21 thorpej 1074: */
1075: if (pp->pr_curpage != NULL)
1076: goto startover;
1.15 pk 1077:
1.117 yamt 1078: pp->pr_nfail++;
1.25 thorpej 1079: pr_leave(pp);
1.134 ad 1080: mutex_exit(&pp->pr_lock);
1.117 yamt 1081: return (NULL);
1.1 pk 1082: }
1.3 pk 1083:
1.20 thorpej 1084: /* Start the allocation process over. */
1085: goto startover;
1.3 pk 1086: }
1.97 yamt 1087: if (pp->pr_roflags & PR_NOTOUCH) {
1088: #ifdef DIAGNOSTIC
1089: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1090: pr_leave(pp);
1.134 ad 1091: mutex_exit(&pp->pr_lock);
1.97 yamt 1092: panic("pool_get: %s: page empty", pp->pr_wchan);
1093: }
1094: #endif
1095: v = pr_item_notouch_get(pp, ph);
1096: #ifdef POOL_DIAGNOSTIC
1097: pr_log(pp, v, PRLOG_GET, file, line);
1098: #endif
1099: } else {
1.102 chs 1100: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 1101: if (__predict_false(v == NULL)) {
1102: pr_leave(pp);
1.134 ad 1103: mutex_exit(&pp->pr_lock);
1.97 yamt 1104: panic("pool_get: %s: page empty", pp->pr_wchan);
1105: }
1.20 thorpej 1106: #ifdef DIAGNOSTIC
1.97 yamt 1107: if (__predict_false(pp->pr_nitems == 0)) {
1108: pr_leave(pp);
1.134 ad 1109: mutex_exit(&pp->pr_lock);
1.97 yamt 1110: printf("pool_get: %s: items on itemlist, nitems %u\n",
1111: pp->pr_wchan, pp->pr_nitems);
1112: panic("pool_get: nitems inconsistent");
1113: }
1.65 enami 1114: #endif
1.56 sommerfe 1115:
1.65 enami 1116: #ifdef POOL_DIAGNOSTIC
1.97 yamt 1117: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 1118: #endif
1.3 pk 1119:
1.65 enami 1120: #ifdef DIAGNOSTIC
1.97 yamt 1121: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1122: pr_printlog(pp, pi, printf);
1123: panic("pool_get(%s): free list modified: "
1124: "magic=%x; page %p; item addr %p\n",
1125: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1126: }
1.3 pk 1127: #endif
1128:
1.97 yamt 1129: /*
1130: * Remove from item list.
1131: */
1.102 chs 1132: LIST_REMOVE(pi, pi_list);
1.97 yamt 1133: }
1.20 thorpej 1134: pp->pr_nitems--;
1135: pp->pr_nout++;
1.6 thorpej 1136: if (ph->ph_nmissing == 0) {
1137: #ifdef DIAGNOSTIC
1.34 thorpej 1138: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 1139: panic("pool_get: nidle inconsistent");
1140: #endif
1141: pp->pr_nidle--;
1.88 chs 1142:
1143: /*
1144: * This page was previously empty. Move it to the list of
1145: * partially-full pages. This page is already curpage.
1146: */
1147: LIST_REMOVE(ph, ph_pagelist);
1148: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 1149: }
1.3 pk 1150: ph->ph_nmissing++;
1.97 yamt 1151: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 1152: #ifdef DIAGNOSTIC
1.97 yamt 1153: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 1154: !LIST_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 1155: pr_leave(pp);
1.134 ad 1156: mutex_exit(&pp->pr_lock);
1.21 thorpej 1157: panic("pool_get: %s: nmissing inconsistent",
1158: pp->pr_wchan);
1159: }
1160: #endif
1.3 pk 1161: /*
1.88 chs 1162: * This page is now full. Move it to the full list
1163: * and select a new current page.
1.3 pk 1164: */
1.88 chs 1165: LIST_REMOVE(ph, ph_pagelist);
1166: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1167: pool_update_curpage(pp);
1.1 pk 1168: }
1.3 pk 1169:
1170: pp->pr_nget++;
1.111 christos 1171: pr_leave(pp);
1.20 thorpej 1172:
1173: /*
1174: * If we have a low water mark and we are now below that low
1175: * water mark, add more items to the pool.
1176: */
1.53 thorpej 1177: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1178: /*
1179: * XXX: Should we log a warning? Should we set up a timeout
1180: * to try again in a second or so? The latter could break
1181: * a caller's assumptions about interrupt protection, etc.
1182: */
1183: }
1184:
1.134 ad 1185: mutex_exit(&pp->pr_lock);
1.125 ad 1186: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
1187: FREECHECK_OUT(&pp->pr_freecheck, v);
1.1 pk 1188: return (v);
1189: }
1190:
1191: /*
1.43 thorpej 1192: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1193: */
1.43 thorpej 1194: static void
1.101 thorpej 1195: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 1196: {
1197: struct pool_item *pi = v;
1.3 pk 1198: struct pool_item_header *ph;
1199:
1.134 ad 1200: KASSERT(mutex_owned(&pp->pr_lock));
1.125 ad 1201: FREECHECK_IN(&pp->pr_freecheck, v);
1.134 ad 1202: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 1203:
1.30 thorpej 1204: #ifdef DIAGNOSTIC
1.34 thorpej 1205: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1206: printf("pool %s: putting with none out\n",
1207: pp->pr_wchan);
1208: panic("pool_put");
1209: }
1210: #endif
1.3 pk 1211:
1.121 yamt 1212: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.25 thorpej 1213: pr_printlog(pp, NULL, printf);
1.3 pk 1214: panic("pool_put: %s: page header missing", pp->pr_wchan);
1215: }
1.28 thorpej 1216:
1.3 pk 1217: /*
1218: * Return to item list.
1219: */
1.97 yamt 1220: if (pp->pr_roflags & PR_NOTOUCH) {
1221: pr_item_notouch_put(pp, ph, v);
1222: } else {
1.2 pk 1223: #ifdef DIAGNOSTIC
1.97 yamt 1224: pi->pi_magic = PI_MAGIC;
1.3 pk 1225: #endif
1.32 chs 1226: #ifdef DEBUG
1.97 yamt 1227: {
1228: int i, *ip = v;
1.32 chs 1229:
1.97 yamt 1230: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1231: *ip++ = PI_MAGIC;
1232: }
1.32 chs 1233: }
1234: #endif
1235:
1.102 chs 1236: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 1237: }
1.79 thorpej 1238: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1239: ph->ph_nmissing--;
1240: pp->pr_nput++;
1.20 thorpej 1241: pp->pr_nitems++;
1242: pp->pr_nout--;
1.3 pk 1243:
1244: /* Cancel "pool empty" condition if it exists */
1245: if (pp->pr_curpage == NULL)
1246: pp->pr_curpage = ph;
1247:
1248: if (pp->pr_flags & PR_WANTED) {
1249: pp->pr_flags &= ~PR_WANTED;
1.134 ad 1250: cv_broadcast(&pp->pr_cv);
1.3 pk 1251: }
1252:
1253: /*
1.88 chs 1254: * If this page is now empty, do one of two things:
1.21 thorpej 1255: *
1.88 chs 1256: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1257: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1258: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1259: * CLAIM.
1.21 thorpej 1260: *
1.88 chs 1261: * (2) Otherwise, move the page to the empty page list.
1262: *
1263: * Either way, select a new current page (so we use a partially-full
1264: * page if one is available).
1.3 pk 1265: */
1266: if (ph->ph_nmissing == 0) {
1.6 thorpej 1267: pp->pr_nidle++;
1.90 thorpej 1268: if (pp->pr_npages > pp->pr_minpages &&
1.152 yamt 1269: pp->pr_npages > pp->pr_maxpages) {
1.101 thorpej 1270: pr_rmpage(pp, ph, pq);
1.3 pk 1271: } else {
1.88 chs 1272: LIST_REMOVE(ph, ph_pagelist);
1273: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1274:
1.21 thorpej 1275: /*
1276: * Update the timestamp on the page. A page must
1277: * be idle for some period of time before it can
1278: * be reclaimed by the pagedaemon. This minimizes
1279: * ping-pong'ing for memory.
1.151 yamt 1280: *
1281: * note for 64-bit time_t: truncating to 32-bit is not
1282: * a problem for our usage.
1.21 thorpej 1283: */
1.151 yamt 1284: ph->ph_time = time_uptime;
1.1 pk 1285: }
1.88 chs 1286: pool_update_curpage(pp);
1.1 pk 1287: }
1.88 chs 1288:
1.21 thorpej 1289: /*
1.88 chs 1290: * If the page was previously completely full, move it to the
1291: * partially-full list and make it the current page. The next
1292: * allocation will get the item from this page, instead of
1293: * further fragmenting the pool.
1.21 thorpej 1294: */
1295: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1296: LIST_REMOVE(ph, ph_pagelist);
1297: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1298: pp->pr_curpage = ph;
1299: }
1.43 thorpej 1300: }
1301:
1302: /*
1.134 ad 1303: * Return resource to the pool.
1.43 thorpej 1304: */
1.59 thorpej 1305: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1306: void
1307: _pool_put(struct pool *pp, void *v, const char *file, long line)
1308: {
1.101 thorpej 1309: struct pool_pagelist pq;
1310:
1311: LIST_INIT(&pq);
1.43 thorpej 1312:
1.134 ad 1313: mutex_enter(&pp->pr_lock);
1.43 thorpej 1314: pr_enter(pp, file, line);
1315:
1.56 sommerfe 1316: pr_log(pp, v, PRLOG_PUT, file, line);
1317:
1.101 thorpej 1318: pool_do_put(pp, v, &pq);
1.21 thorpej 1319:
1.25 thorpej 1320: pr_leave(pp);
1.134 ad 1321: mutex_exit(&pp->pr_lock);
1.101 thorpej 1322:
1.102 chs 1323: pr_pagelist_free(pp, &pq);
1.1 pk 1324: }
1.57 sommerfe 1325: #undef pool_put
1.59 thorpej 1326: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1327:
1.56 sommerfe 1328: void
1329: pool_put(struct pool *pp, void *v)
1330: {
1.101 thorpej 1331: struct pool_pagelist pq;
1332:
1333: LIST_INIT(&pq);
1.56 sommerfe 1334:
1.134 ad 1335: mutex_enter(&pp->pr_lock);
1.101 thorpej 1336: pool_do_put(pp, v, &pq);
1.134 ad 1337: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1338:
1.102 chs 1339: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1340: }
1.57 sommerfe 1341:
1.59 thorpej 1342: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1343: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1344: #endif
1.74 thorpej 1345:
1346: /*
1.113 yamt 1347: * pool_grow: grow a pool by a page.
1348: *
1349: * => called with pool locked.
1350: * => unlock and relock the pool.
1351: * => return with pool locked.
1352: */
1353:
1354: static int
1355: pool_grow(struct pool *pp, int flags)
1356: {
1357: struct pool_item_header *ph = NULL;
1358: char *cp;
1359:
1.134 ad 1360: mutex_exit(&pp->pr_lock);
1.113 yamt 1361: cp = pool_allocator_alloc(pp, flags);
1362: if (__predict_true(cp != NULL)) {
1363: ph = pool_alloc_item_header(pp, cp, flags);
1364: }
1365: if (__predict_false(cp == NULL || ph == NULL)) {
1366: if (cp != NULL) {
1367: pool_allocator_free(pp, cp);
1368: }
1.134 ad 1369: mutex_enter(&pp->pr_lock);
1.113 yamt 1370: return ENOMEM;
1371: }
1372:
1.134 ad 1373: mutex_enter(&pp->pr_lock);
1.113 yamt 1374: pool_prime_page(pp, cp, ph);
1375: pp->pr_npagealloc++;
1376: return 0;
1377: }
1378:
1379: /*
1.74 thorpej 1380: * Add N items to the pool.
1381: */
1382: int
1383: pool_prime(struct pool *pp, int n)
1384: {
1.75 simonb 1385: int newpages;
1.113 yamt 1386: int error = 0;
1.74 thorpej 1387:
1.134 ad 1388: mutex_enter(&pp->pr_lock);
1.74 thorpej 1389:
1390: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1391:
1392: while (newpages-- > 0) {
1.113 yamt 1393: error = pool_grow(pp, PR_NOWAIT);
1394: if (error) {
1.74 thorpej 1395: break;
1396: }
1397: pp->pr_minpages++;
1398: }
1399:
1400: if (pp->pr_minpages >= pp->pr_maxpages)
1401: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1402:
1.134 ad 1403: mutex_exit(&pp->pr_lock);
1.113 yamt 1404: return error;
1.74 thorpej 1405: }
1.55 thorpej 1406:
1407: /*
1.3 pk 1408: * Add a page worth of items to the pool.
1.21 thorpej 1409: *
1410: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1411: */
1.55 thorpej 1412: static void
1.128 christos 1413: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1414: {
1415: struct pool_item *pi;
1.128 christos 1416: void *cp = storage;
1.125 ad 1417: const unsigned int align = pp->pr_align;
1418: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1419: int n;
1.36 pk 1420:
1.134 ad 1421: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 1422:
1.66 thorpej 1423: #ifdef DIAGNOSTIC
1.121 yamt 1424: if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1.150 skrll 1425: ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1426: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1427: #endif
1.3 pk 1428:
1429: /*
1430: * Insert page header.
1431: */
1.88 chs 1432: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1433: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1434: ph->ph_page = storage;
1435: ph->ph_nmissing = 0;
1.151 yamt 1436: ph->ph_time = time_uptime;
1.88 chs 1437: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1438: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1439:
1.6 thorpej 1440: pp->pr_nidle++;
1441:
1.3 pk 1442: /*
1443: * Color this page.
1444: */
1.141 yamt 1445: ph->ph_off = pp->pr_curcolor;
1446: cp = (char *)cp + ph->ph_off;
1.3 pk 1447: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1448: pp->pr_curcolor = 0;
1449:
1450: /*
1451: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1452: */
1453: if (ioff != 0)
1.128 christos 1454: cp = (char *)cp + align - ioff;
1.3 pk 1455:
1.125 ad 1456: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1457:
1.3 pk 1458: /*
1459: * Insert remaining chunks on the bucket list.
1460: */
1461: n = pp->pr_itemsperpage;
1.20 thorpej 1462: pp->pr_nitems += n;
1.3 pk 1463:
1.97 yamt 1464: if (pp->pr_roflags & PR_NOTOUCH) {
1.141 yamt 1465: pr_item_notouch_init(pp, ph);
1.97 yamt 1466: } else {
1467: while (n--) {
1468: pi = (struct pool_item *)cp;
1.78 thorpej 1469:
1.97 yamt 1470: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1471:
1.97 yamt 1472: /* Insert on page list */
1.102 chs 1473: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1474: #ifdef DIAGNOSTIC
1.97 yamt 1475: pi->pi_magic = PI_MAGIC;
1.3 pk 1476: #endif
1.128 christos 1477: cp = (char *)cp + pp->pr_size;
1.125 ad 1478:
1479: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1480: }
1.3 pk 1481: }
1482:
1483: /*
1484: * If the pool was depleted, point at the new page.
1485: */
1486: if (pp->pr_curpage == NULL)
1487: pp->pr_curpage = ph;
1488:
1489: if (++pp->pr_npages > pp->pr_hiwat)
1490: pp->pr_hiwat = pp->pr_npages;
1491: }
1492:
1.20 thorpej 1493: /*
1.52 thorpej 1494: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1495: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1496: *
1.21 thorpej 1497: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1498: *
1.73 thorpej 1499: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1500: * with it locked.
1501: */
1502: static int
1.42 thorpej 1503: pool_catchup(struct pool *pp)
1.20 thorpej 1504: {
1505: int error = 0;
1506:
1.54 thorpej 1507: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1508: error = pool_grow(pp, PR_NOWAIT);
1509: if (error) {
1.20 thorpej 1510: break;
1511: }
1512: }
1.113 yamt 1513: return error;
1.20 thorpej 1514: }
1515:
1.88 chs 1516: static void
1517: pool_update_curpage(struct pool *pp)
1518: {
1519:
1520: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1521: if (pp->pr_curpage == NULL) {
1522: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1523: }
1.168 yamt 1524: KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1525: (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1.88 chs 1526: }
1527:
1.3 pk 1528: void
1.42 thorpej 1529: pool_setlowat(struct pool *pp, int n)
1.3 pk 1530: {
1.15 pk 1531:
1.134 ad 1532: mutex_enter(&pp->pr_lock);
1.21 thorpej 1533:
1.3 pk 1534: pp->pr_minitems = n;
1.15 pk 1535: pp->pr_minpages = (n == 0)
1536: ? 0
1.18 thorpej 1537: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1538:
1539: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1540: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1541: /*
1542: * XXX: Should we log a warning? Should we set up a timeout
1543: * to try again in a second or so? The latter could break
1544: * a caller's assumptions about interrupt protection, etc.
1545: */
1546: }
1.21 thorpej 1547:
1.134 ad 1548: mutex_exit(&pp->pr_lock);
1.3 pk 1549: }
1550:
1551: void
1.42 thorpej 1552: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1553: {
1.15 pk 1554:
1.134 ad 1555: mutex_enter(&pp->pr_lock);
1.21 thorpej 1556:
1.15 pk 1557: pp->pr_maxpages = (n == 0)
1558: ? 0
1.18 thorpej 1559: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1560:
1.134 ad 1561: mutex_exit(&pp->pr_lock);
1.3 pk 1562: }
1563:
1.20 thorpej 1564: void
1.42 thorpej 1565: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1566: {
1567:
1.134 ad 1568: mutex_enter(&pp->pr_lock);
1.20 thorpej 1569:
1570: pp->pr_hardlimit = n;
1571: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1572: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1573: pp->pr_hardlimit_warning_last.tv_sec = 0;
1574: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1575:
1576: /*
1.21 thorpej 1577: * In-line version of pool_sethiwat(), because we don't want to
1578: * release the lock.
1.20 thorpej 1579: */
1580: pp->pr_maxpages = (n == 0)
1581: ? 0
1582: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1583:
1.134 ad 1584: mutex_exit(&pp->pr_lock);
1.20 thorpej 1585: }
1.3 pk 1586:
1587: /*
1588: * Release all complete pages that have not been used recently.
1589: */
1.66 thorpej 1590: int
1.59 thorpej 1591: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1592: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1593: #else
1594: pool_reclaim(struct pool *pp)
1595: #endif
1.3 pk 1596: {
1597: struct pool_item_header *ph, *phnext;
1.61 chs 1598: struct pool_pagelist pq;
1.151 yamt 1599: uint32_t curtime;
1.134 ad 1600: bool klock;
1601: int rv;
1.3 pk 1602:
1.68 thorpej 1603: if (pp->pr_drain_hook != NULL) {
1604: /*
1605: * The drain hook must be called with the pool unlocked.
1606: */
1607: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1608: }
1609:
1.134 ad 1610: /*
1.157 ad 1611: * XXXSMP Because we do not want to cause non-MPSAFE code
1612: * to block.
1.134 ad 1613: */
1614: if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1615: pp->pr_ipl == IPL_SOFTSERIAL) {
1616: KERNEL_LOCK(1, NULL);
1617: klock = true;
1618: } else
1619: klock = false;
1620:
1621: /* Reclaim items from the pool's cache (if any). */
1622: if (pp->pr_cache != NULL)
1623: pool_cache_invalidate(pp->pr_cache);
1624:
1625: if (mutex_tryenter(&pp->pr_lock) == 0) {
1626: if (klock) {
1627: KERNEL_UNLOCK_ONE(NULL);
1628: }
1.66 thorpej 1629: return (0);
1.134 ad 1630: }
1.25 thorpej 1631: pr_enter(pp, file, line);
1.68 thorpej 1632:
1.88 chs 1633: LIST_INIT(&pq);
1.43 thorpej 1634:
1.151 yamt 1635: curtime = time_uptime;
1.21 thorpej 1636:
1.88 chs 1637: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1638: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1639:
1640: /* Check our minimum page claim */
1641: if (pp->pr_npages <= pp->pr_minpages)
1642: break;
1643:
1.88 chs 1644: KASSERT(ph->ph_nmissing == 0);
1.151 yamt 1645: if (curtime - ph->ph_time < pool_inactive_time
1.117 yamt 1646: && !pa_starved_p(pp->pr_alloc))
1.88 chs 1647: continue;
1.21 thorpej 1648:
1.88 chs 1649: /*
1650: * If freeing this page would put us below
1651: * the low water mark, stop now.
1652: */
1653: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1654: pp->pr_minitems)
1655: break;
1.21 thorpej 1656:
1.88 chs 1657: pr_rmpage(pp, ph, &pq);
1.3 pk 1658: }
1659:
1.25 thorpej 1660: pr_leave(pp);
1.134 ad 1661: mutex_exit(&pp->pr_lock);
1662:
1663: if (LIST_EMPTY(&pq))
1664: rv = 0;
1665: else {
1666: pr_pagelist_free(pp, &pq);
1667: rv = 1;
1668: }
1669:
1670: if (klock) {
1671: KERNEL_UNLOCK_ONE(NULL);
1672: }
1.66 thorpej 1673:
1.134 ad 1674: return (rv);
1.3 pk 1675: }
1676:
1677: /*
1.134 ad 1678: * Drain pools, one at a time. This is a two stage process;
1679: * drain_start kicks off a cross call to drain CPU-level caches
1680: * if the pool has an associated pool_cache. drain_end waits
1681: * for those cross calls to finish, and then drains the cache
1682: * (if any) and pool.
1.131 ad 1683: *
1.134 ad 1684: * Note, must never be called from interrupt context.
1.3 pk 1685: */
1686: void
1.134 ad 1687: pool_drain_start(struct pool **ppp, uint64_t *wp)
1.3 pk 1688: {
1689: struct pool *pp;
1.134 ad 1690:
1.145 ad 1691: KASSERT(!TAILQ_EMPTY(&pool_head));
1.3 pk 1692:
1.61 chs 1693: pp = NULL;
1.134 ad 1694:
1695: /* Find next pool to drain, and add a reference. */
1696: mutex_enter(&pool_head_lock);
1697: do {
1698: if (drainpp == NULL) {
1.145 ad 1699: drainpp = TAILQ_FIRST(&pool_head);
1.134 ad 1700: }
1701: if (drainpp != NULL) {
1702: pp = drainpp;
1.145 ad 1703: drainpp = TAILQ_NEXT(pp, pr_poollist);
1.134 ad 1704: }
1705: /*
1706: * Skip completely idle pools. We depend on at least
1707: * one pool in the system being active.
1708: */
1709: } while (pp == NULL || pp->pr_npages == 0);
1710: pp->pr_refcnt++;
1711: mutex_exit(&pool_head_lock);
1712:
1713: /* If there is a pool_cache, drain CPU level caches. */
1714: *ppp = pp;
1715: if (pp->pr_cache != NULL) {
1716: *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall,
1717: pp->pr_cache, NULL);
1718: }
1719: }
1720:
1721: void
1722: pool_drain_end(struct pool *pp, uint64_t where)
1723: {
1724:
1725: if (pp == NULL)
1726: return;
1727:
1728: KASSERT(pp->pr_refcnt > 0);
1729:
1730: /* Wait for remote draining to complete. */
1731: if (pp->pr_cache != NULL)
1732: xc_wait(where);
1733:
1734: /* Drain the cache (if any) and pool.. */
1735: pool_reclaim(pp);
1736:
1737: /* Finally, unlock the pool. */
1738: mutex_enter(&pool_head_lock);
1739: pp->pr_refcnt--;
1740: cv_broadcast(&pool_busy);
1741: mutex_exit(&pool_head_lock);
1.3 pk 1742: }
1743:
1744: /*
1745: * Diagnostic helpers.
1746: */
1747: void
1.42 thorpej 1748: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1749: {
1750:
1.25 thorpej 1751: pool_print1(pp, modif, printf);
1.21 thorpej 1752: }
1753:
1.25 thorpej 1754: void
1.108 yamt 1755: pool_printall(const char *modif, void (*pr)(const char *, ...))
1756: {
1757: struct pool *pp;
1758:
1.145 ad 1759: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.108 yamt 1760: pool_printit(pp, modif, pr);
1761: }
1762: }
1763:
1764: void
1.42 thorpej 1765: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1766: {
1767:
1768: if (pp == NULL) {
1769: (*pr)("Must specify a pool to print.\n");
1770: return;
1771: }
1772:
1773: pool_print1(pp, modif, pr);
1774: }
1775:
1.21 thorpej 1776: static void
1.124 yamt 1777: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1778: void (*pr)(const char *, ...))
1.88 chs 1779: {
1780: struct pool_item_header *ph;
1781: #ifdef DIAGNOSTIC
1782: struct pool_item *pi;
1783: #endif
1784:
1785: LIST_FOREACH(ph, pl, ph_pagelist) {
1.151 yamt 1786: (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1787: ph->ph_page, ph->ph_nmissing, ph->ph_time);
1.88 chs 1788: #ifdef DIAGNOSTIC
1.97 yamt 1789: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1790: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1791: if (pi->pi_magic != PI_MAGIC) {
1792: (*pr)("\t\t\titem %p, magic 0x%x\n",
1793: pi, pi->pi_magic);
1794: }
1.88 chs 1795: }
1796: }
1797: #endif
1798: }
1799: }
1800:
1801: static void
1.42 thorpej 1802: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1803: {
1.25 thorpej 1804: struct pool_item_header *ph;
1.134 ad 1805: pool_cache_t pc;
1806: pcg_t *pcg;
1807: pool_cache_cpu_t *cc;
1808: uint64_t cpuhit, cpumiss;
1.44 thorpej 1809: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1810: char c;
1811:
1812: while ((c = *modif++) != '\0') {
1813: if (c == 'l')
1814: print_log = 1;
1815: if (c == 'p')
1816: print_pagelist = 1;
1.44 thorpej 1817: if (c == 'c')
1818: print_cache = 1;
1.25 thorpej 1819: }
1820:
1.134 ad 1821: if ((pc = pp->pr_cache) != NULL) {
1822: (*pr)("POOL CACHE");
1823: } else {
1824: (*pr)("POOL");
1825: }
1826:
1827: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1828: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1829: pp->pr_roflags);
1.66 thorpej 1830: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1831: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1832: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1833: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1834: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1835:
1.134 ad 1836: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1837: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1838: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1839: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1840:
1841: if (print_pagelist == 0)
1842: goto skip_pagelist;
1843:
1.88 chs 1844: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1845: (*pr)("\n\tempty page list:\n");
1.97 yamt 1846: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1847: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1848: (*pr)("\n\tfull page list:\n");
1.97 yamt 1849: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1850: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1851: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1852: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1853:
1.25 thorpej 1854: if (pp->pr_curpage == NULL)
1855: (*pr)("\tno current page\n");
1856: else
1857: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1858:
1859: skip_pagelist:
1860: if (print_log == 0)
1861: goto skip_log;
1862:
1863: (*pr)("\n");
1864: if ((pp->pr_roflags & PR_LOGGING) == 0)
1865: (*pr)("\tno log\n");
1.122 christos 1866: else {
1.25 thorpej 1867: pr_printlog(pp, NULL, pr);
1.122 christos 1868: }
1.3 pk 1869:
1.25 thorpej 1870: skip_log:
1.44 thorpej 1871:
1.102 chs 1872: #define PR_GROUPLIST(pcg) \
1873: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1.142 ad 1874: for (i = 0; i < pcg->pcg_size; i++) { \
1.102 chs 1875: if (pcg->pcg_objects[i].pcgo_pa != \
1876: POOL_PADDR_INVALID) { \
1877: (*pr)("\t\t\t%p, 0x%llx\n", \
1878: pcg->pcg_objects[i].pcgo_va, \
1879: (unsigned long long) \
1880: pcg->pcg_objects[i].pcgo_pa); \
1881: } else { \
1882: (*pr)("\t\t\t%p\n", \
1883: pcg->pcg_objects[i].pcgo_va); \
1884: } \
1885: }
1886:
1.134 ad 1887: if (pc != NULL) {
1888: cpuhit = 0;
1889: cpumiss = 0;
1890: for (i = 0; i < MAXCPUS; i++) {
1891: if ((cc = pc->pc_cpus[i]) == NULL)
1892: continue;
1893: cpuhit += cc->cc_hits;
1894: cpumiss += cc->cc_misses;
1895: }
1896: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1897: (*pr)("\tcache layer hits %llu misses %llu\n",
1898: pc->pc_hits, pc->pc_misses);
1899: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1900: pc->pc_hits + pc->pc_misses - pc->pc_contended,
1901: pc->pc_contended);
1902: (*pr)("\tcache layer empty groups %u full groups %u\n",
1903: pc->pc_nempty, pc->pc_nfull);
1904: if (print_cache) {
1905: (*pr)("\tfull cache groups:\n");
1906: for (pcg = pc->pc_fullgroups; pcg != NULL;
1907: pcg = pcg->pcg_next) {
1908: PR_GROUPLIST(pcg);
1909: }
1910: (*pr)("\tempty cache groups:\n");
1911: for (pcg = pc->pc_emptygroups; pcg != NULL;
1912: pcg = pcg->pcg_next) {
1913: PR_GROUPLIST(pcg);
1914: }
1.103 chs 1915: }
1.44 thorpej 1916: }
1.102 chs 1917: #undef PR_GROUPLIST
1.44 thorpej 1918:
1.88 chs 1919: pr_enter_check(pp, pr);
1920: }
1921:
1922: static int
1923: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1924: {
1925: struct pool_item *pi;
1.128 christos 1926: void *page;
1.88 chs 1927: int n;
1928:
1.121 yamt 1929: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1930: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1931: if (page != ph->ph_page &&
1932: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1933: if (label != NULL)
1934: printf("%s: ", label);
1935: printf("pool(%p:%s): page inconsistency: page %p;"
1936: " at page head addr %p (p %p)\n", pp,
1937: pp->pr_wchan, ph->ph_page,
1938: ph, page);
1939: return 1;
1940: }
1.88 chs 1941: }
1.3 pk 1942:
1.97 yamt 1943: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1944: return 0;
1945:
1.102 chs 1946: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1947: pi != NULL;
1.102 chs 1948: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1949:
1950: #ifdef DIAGNOSTIC
1951: if (pi->pi_magic != PI_MAGIC) {
1952: if (label != NULL)
1953: printf("%s: ", label);
1954: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1955: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1956: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1957: n, pi);
1.88 chs 1958: panic("pool");
1959: }
1960: #endif
1.121 yamt 1961: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1962: continue;
1963: }
1.128 christos 1964: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1965: if (page == ph->ph_page)
1966: continue;
1967:
1968: if (label != NULL)
1969: printf("%s: ", label);
1970: printf("pool(%p:%s): page inconsistency: page %p;"
1971: " item ordinal %d; addr %p (p %p)\n", pp,
1972: pp->pr_wchan, ph->ph_page,
1973: n, pi, page);
1974: return 1;
1975: }
1976: return 0;
1.3 pk 1977: }
1978:
1.88 chs 1979:
1.3 pk 1980: int
1.42 thorpej 1981: pool_chk(struct pool *pp, const char *label)
1.3 pk 1982: {
1983: struct pool_item_header *ph;
1984: int r = 0;
1985:
1.134 ad 1986: mutex_enter(&pp->pr_lock);
1.88 chs 1987: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1988: r = pool_chk_page(pp, label, ph);
1989: if (r) {
1990: goto out;
1991: }
1992: }
1993: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1994: r = pool_chk_page(pp, label, ph);
1995: if (r) {
1.3 pk 1996: goto out;
1997: }
1.88 chs 1998: }
1999: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2000: r = pool_chk_page(pp, label, ph);
2001: if (r) {
1.3 pk 2002: goto out;
2003: }
2004: }
1.88 chs 2005:
1.3 pk 2006: out:
1.134 ad 2007: mutex_exit(&pp->pr_lock);
1.3 pk 2008: return (r);
1.43 thorpej 2009: }
2010:
2011: /*
2012: * pool_cache_init:
2013: *
2014: * Initialize a pool cache.
1.134 ad 2015: */
2016: pool_cache_t
2017: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
2018: const char *wchan, struct pool_allocator *palloc, int ipl,
2019: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
2020: {
2021: pool_cache_t pc;
2022:
2023: pc = pool_get(&cache_pool, PR_WAITOK);
2024: if (pc == NULL)
2025: return NULL;
2026:
2027: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
2028: palloc, ipl, ctor, dtor, arg);
2029:
2030: return pc;
2031: }
2032:
2033: /*
2034: * pool_cache_bootstrap:
1.43 thorpej 2035: *
1.134 ad 2036: * Kernel-private version of pool_cache_init(). The caller
2037: * provides initial storage.
1.43 thorpej 2038: */
2039: void
1.134 ad 2040: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
2041: u_int align_offset, u_int flags, const char *wchan,
2042: struct pool_allocator *palloc, int ipl,
2043: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 2044: void *arg)
2045: {
1.134 ad 2046: CPU_INFO_ITERATOR cii;
1.145 ad 2047: pool_cache_t pc1;
1.134 ad 2048: struct cpu_info *ci;
2049: struct pool *pp;
2050:
2051: pp = &pc->pc_pool;
2052: if (palloc == NULL && ipl == IPL_NONE)
2053: palloc = &pool_allocator_nointr;
2054: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.157 ad 2055: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1.43 thorpej 2056:
1.134 ad 2057: if (ctor == NULL) {
2058: ctor = (int (*)(void *, void *, int))nullop;
2059: }
2060: if (dtor == NULL) {
2061: dtor = (void (*)(void *, void *))nullop;
2062: }
1.43 thorpej 2063:
1.134 ad 2064: pc->pc_emptygroups = NULL;
2065: pc->pc_fullgroups = NULL;
2066: pc->pc_partgroups = NULL;
1.43 thorpej 2067: pc->pc_ctor = ctor;
2068: pc->pc_dtor = dtor;
2069: pc->pc_arg = arg;
1.134 ad 2070: pc->pc_hits = 0;
1.48 thorpej 2071: pc->pc_misses = 0;
1.134 ad 2072: pc->pc_nempty = 0;
2073: pc->pc_npart = 0;
2074: pc->pc_nfull = 0;
2075: pc->pc_contended = 0;
2076: pc->pc_refcnt = 0;
1.136 yamt 2077: pc->pc_freecheck = NULL;
1.134 ad 2078:
1.142 ad 2079: if ((flags & PR_LARGECACHE) != 0) {
2080: pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
1.163 ad 2081: pc->pc_pcgpool = &pcg_large_pool;
1.142 ad 2082: } else {
2083: pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
1.163 ad 2084: pc->pc_pcgpool = &pcg_normal_pool;
1.142 ad 2085: }
2086:
1.134 ad 2087: /* Allocate per-CPU caches. */
2088: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
2089: pc->pc_ncpu = 0;
1.139 ad 2090: if (ncpu < 2) {
1.137 ad 2091: /* XXX For sparc: boot CPU is not attached yet. */
2092: pool_cache_cpu_init1(curcpu(), pc);
2093: } else {
2094: for (CPU_INFO_FOREACH(cii, ci)) {
2095: pool_cache_cpu_init1(ci, pc);
2096: }
1.134 ad 2097: }
1.145 ad 2098:
2099: /* Add to list of all pools. */
2100: if (__predict_true(!cold))
1.134 ad 2101: mutex_enter(&pool_head_lock);
1.145 ad 2102: TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
2103: if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
2104: break;
2105: }
2106: if (pc1 == NULL)
2107: TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
2108: else
2109: TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
2110: if (__predict_true(!cold))
1.134 ad 2111: mutex_exit(&pool_head_lock);
1.145 ad 2112:
2113: membar_sync();
2114: pp->pr_cache = pc;
1.43 thorpej 2115: }
2116:
2117: /*
2118: * pool_cache_destroy:
2119: *
2120: * Destroy a pool cache.
2121: */
2122: void
1.134 ad 2123: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 2124: {
1.134 ad 2125: struct pool *pp = &pc->pc_pool;
1.175 ! jym 2126: u_int i;
1.134 ad 2127:
2128: /* Remove it from the global list. */
2129: mutex_enter(&pool_head_lock);
2130: while (pc->pc_refcnt != 0)
2131: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 2132: TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1.134 ad 2133: mutex_exit(&pool_head_lock);
1.43 thorpej 2134:
2135: /* First, invalidate the entire cache. */
2136: pool_cache_invalidate(pc);
2137:
1.134 ad 2138: /* Disassociate it from the pool. */
2139: mutex_enter(&pp->pr_lock);
2140: pp->pr_cache = NULL;
2141: mutex_exit(&pp->pr_lock);
2142:
2143: /* Destroy per-CPU data */
1.175 ! jym 2144: for (i = 0; i < MAXCPUS; i++)
! 2145: pool_cache_invalidate_cpu(pc, i);
1.134 ad 2146:
2147: /* Finally, destroy it. */
2148: mutex_destroy(&pc->pc_lock);
2149: pool_destroy(pp);
2150: pool_put(&cache_pool, pc);
2151: }
2152:
2153: /*
2154: * pool_cache_cpu_init1:
2155: *
2156: * Called for each pool_cache whenever a new CPU is attached.
2157: */
2158: static void
2159: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
2160: {
2161: pool_cache_cpu_t *cc;
1.137 ad 2162: int index;
1.134 ad 2163:
1.137 ad 2164: index = ci->ci_index;
2165:
2166: KASSERT(index < MAXCPUS);
1.134 ad 2167:
1.137 ad 2168: if ((cc = pc->pc_cpus[index]) != NULL) {
2169: KASSERT(cc->cc_cpuindex == index);
1.134 ad 2170: return;
2171: }
2172:
2173: /*
2174: * The first CPU is 'free'. This needs to be the case for
2175: * bootstrap - we may not be able to allocate yet.
2176: */
2177: if (pc->pc_ncpu == 0) {
2178: cc = &pc->pc_cpu0;
2179: pc->pc_ncpu = 1;
2180: } else {
2181: mutex_enter(&pc->pc_lock);
2182: pc->pc_ncpu++;
2183: mutex_exit(&pc->pc_lock);
2184: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
2185: }
2186:
2187: cc->cc_ipl = pc->pc_pool.pr_ipl;
2188: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
2189: cc->cc_cache = pc;
1.137 ad 2190: cc->cc_cpuindex = index;
1.134 ad 2191: cc->cc_hits = 0;
2192: cc->cc_misses = 0;
1.169 yamt 2193: cc->cc_current = __UNCONST(&pcg_dummy);
2194: cc->cc_previous = __UNCONST(&pcg_dummy);
1.134 ad 2195:
1.137 ad 2196: pc->pc_cpus[index] = cc;
1.43 thorpej 2197: }
2198:
1.134 ad 2199: /*
2200: * pool_cache_cpu_init:
2201: *
2202: * Called whenever a new CPU is attached.
2203: */
2204: void
2205: pool_cache_cpu_init(struct cpu_info *ci)
1.43 thorpej 2206: {
1.134 ad 2207: pool_cache_t pc;
2208:
2209: mutex_enter(&pool_head_lock);
1.145 ad 2210: TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1.134 ad 2211: pc->pc_refcnt++;
2212: mutex_exit(&pool_head_lock);
1.43 thorpej 2213:
1.134 ad 2214: pool_cache_cpu_init1(ci, pc);
1.43 thorpej 2215:
1.134 ad 2216: mutex_enter(&pool_head_lock);
2217: pc->pc_refcnt--;
2218: cv_broadcast(&pool_busy);
2219: }
2220: mutex_exit(&pool_head_lock);
1.43 thorpej 2221: }
2222:
1.134 ad 2223: /*
2224: * pool_cache_reclaim:
2225: *
2226: * Reclaim memory from a pool cache.
2227: */
2228: bool
2229: pool_cache_reclaim(pool_cache_t pc)
1.43 thorpej 2230: {
2231:
1.134 ad 2232: return pool_reclaim(&pc->pc_pool);
2233: }
1.43 thorpej 2234:
1.136 yamt 2235: static void
2236: pool_cache_destruct_object1(pool_cache_t pc, void *object)
2237: {
2238:
2239: (*pc->pc_dtor)(pc->pc_arg, object);
2240: pool_put(&pc->pc_pool, object);
2241: }
2242:
1.134 ad 2243: /*
2244: * pool_cache_destruct_object:
2245: *
2246: * Force destruction of an object and its release back into
2247: * the pool.
2248: */
2249: void
2250: pool_cache_destruct_object(pool_cache_t pc, void *object)
2251: {
2252:
1.136 yamt 2253: FREECHECK_IN(&pc->pc_freecheck, object);
2254:
2255: pool_cache_destruct_object1(pc, object);
1.43 thorpej 2256: }
2257:
1.134 ad 2258: /*
2259: * pool_cache_invalidate_groups:
2260: *
2261: * Invalidate a chain of groups and destruct all objects.
2262: */
1.102 chs 2263: static void
1.134 ad 2264: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1.102 chs 2265: {
1.134 ad 2266: void *object;
2267: pcg_t *next;
2268: int i;
2269:
2270: for (; pcg != NULL; pcg = next) {
2271: next = pcg->pcg_next;
2272:
2273: for (i = 0; i < pcg->pcg_avail; i++) {
2274: object = pcg->pcg_objects[i].pcgo_va;
1.136 yamt 2275: pool_cache_destruct_object1(pc, object);
1.134 ad 2276: }
1.102 chs 2277:
1.142 ad 2278: if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
2279: pool_put(&pcg_large_pool, pcg);
2280: } else {
2281: KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
2282: pool_put(&pcg_normal_pool, pcg);
2283: }
1.102 chs 2284: }
2285: }
2286:
1.43 thorpej 2287: /*
1.134 ad 2288: * pool_cache_invalidate:
1.43 thorpej 2289: *
1.134 ad 2290: * Invalidate a pool cache (destruct and release all of the
2291: * cached objects). Does not reclaim objects from the pool.
1.43 thorpej 2292: */
1.134 ad 2293: void
2294: pool_cache_invalidate(pool_cache_t pc)
2295: {
2296: pcg_t *full, *empty, *part;
2297:
2298: mutex_enter(&pc->pc_lock);
2299: full = pc->pc_fullgroups;
2300: empty = pc->pc_emptygroups;
2301: part = pc->pc_partgroups;
2302: pc->pc_fullgroups = NULL;
2303: pc->pc_emptygroups = NULL;
2304: pc->pc_partgroups = NULL;
2305: pc->pc_nfull = 0;
2306: pc->pc_nempty = 0;
2307: pc->pc_npart = 0;
2308: mutex_exit(&pc->pc_lock);
2309:
2310: pool_cache_invalidate_groups(pc, full);
2311: pool_cache_invalidate_groups(pc, empty);
2312: pool_cache_invalidate_groups(pc, part);
2313: }
2314:
1.175 ! jym 2315: /*
! 2316: * pool_cache_invalidate_local:
! 2317: *
! 2318: * Invalidate all local ('current CPU') cached objects in
! 2319: * pool cache.
! 2320: * It is caller's responsibility to ensure that no operation is
! 2321: * taking place on this pool cache while doing the local invalidation.
! 2322: */
! 2323: void
! 2324: pool_cache_invalidate_local(pool_cache_t pc)
! 2325: {
! 2326: pool_cache_invalidate_cpu(pc, curcpu()->ci_index);
! 2327: }
! 2328:
! 2329: /*
! 2330: * pool_cache_invalidate_cpu:
! 2331: *
! 2332: * Invalidate all CPU-bound cached objects in pool cache, the CPU being
! 2333: * identified by its associated index.
! 2334: * It is caller's responsibility to ensure that no operation is
! 2335: * taking place on this pool cache while doing this invalidation.
! 2336: * WARNING: as no inter-CPU locking is enforced, trying to invalidate
! 2337: * pool cached objects from a CPU different from the one currently running
! 2338: * may result in an undefined behaviour.
! 2339: */
! 2340: static void
! 2341: pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
! 2342: {
! 2343:
! 2344: pool_cache_cpu_t *cc;
! 2345: pcg_t *pcg;
! 2346:
! 2347: if ((cc = pc->pc_cpus[index]) == NULL)
! 2348: return;
! 2349:
! 2350: if ((pcg = cc->cc_current) != &pcg_dummy) {
! 2351: pcg->pcg_next = NULL;
! 2352: pool_cache_invalidate_groups(pc, pcg);
! 2353: }
! 2354: if ((pcg = cc->cc_previous) != &pcg_dummy) {
! 2355: pcg->pcg_next = NULL;
! 2356: pool_cache_invalidate_groups(pc, pcg);
! 2357: }
! 2358: if (cc != &pc->pc_cpu0)
! 2359: pool_put(&cache_cpu_pool, cc);
! 2360:
! 2361: }
! 2362:
1.134 ad 2363: void
2364: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2365: {
2366:
2367: pool_set_drain_hook(&pc->pc_pool, fn, arg);
2368: }
2369:
2370: void
2371: pool_cache_setlowat(pool_cache_t pc, int n)
2372: {
2373:
2374: pool_setlowat(&pc->pc_pool, n);
2375: }
2376:
2377: void
2378: pool_cache_sethiwat(pool_cache_t pc, int n)
2379: {
2380:
2381: pool_sethiwat(&pc->pc_pool, n);
2382: }
2383:
2384: void
2385: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2386: {
2387:
2388: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2389: }
2390:
1.162 ad 2391: static bool __noinline
2392: pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
1.134 ad 2393: paddr_t *pap, int flags)
1.43 thorpej 2394: {
1.134 ad 2395: pcg_t *pcg, *cur;
2396: uint64_t ncsw;
2397: pool_cache_t pc;
1.43 thorpej 2398: void *object;
1.58 thorpej 2399:
1.168 yamt 2400: KASSERT(cc->cc_current->pcg_avail == 0);
2401: KASSERT(cc->cc_previous->pcg_avail == 0);
2402:
1.134 ad 2403: pc = cc->cc_cache;
2404: cc->cc_misses++;
1.43 thorpej 2405:
1.134 ad 2406: /*
2407: * Nothing was available locally. Try and grab a group
2408: * from the cache.
2409: */
1.162 ad 2410: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.134 ad 2411: ncsw = curlwp->l_ncsw;
2412: mutex_enter(&pc->pc_lock);
2413: pc->pc_contended++;
1.43 thorpej 2414:
1.134 ad 2415: /*
2416: * If we context switched while locking, then
2417: * our view of the per-CPU data is invalid:
2418: * retry.
2419: */
2420: if (curlwp->l_ncsw != ncsw) {
2421: mutex_exit(&pc->pc_lock);
1.162 ad 2422: return true;
1.43 thorpej 2423: }
1.102 chs 2424: }
1.43 thorpej 2425:
1.162 ad 2426: if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
1.43 thorpej 2427: /*
1.134 ad 2428: * If there's a full group, release our empty
2429: * group back to the cache. Install the full
2430: * group as cc_current and return.
1.43 thorpej 2431: */
1.162 ad 2432: if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
1.134 ad 2433: KASSERT(cur->pcg_avail == 0);
2434: cur->pcg_next = pc->pc_emptygroups;
2435: pc->pc_emptygroups = cur;
2436: pc->pc_nempty++;
1.87 thorpej 2437: }
1.142 ad 2438: KASSERT(pcg->pcg_avail == pcg->pcg_size);
1.134 ad 2439: cc->cc_current = pcg;
2440: pc->pc_fullgroups = pcg->pcg_next;
2441: pc->pc_hits++;
2442: pc->pc_nfull--;
2443: mutex_exit(&pc->pc_lock);
1.162 ad 2444: return true;
1.134 ad 2445: }
2446:
2447: /*
2448: * Nothing available locally or in cache. Take the slow
2449: * path: fetch a new object from the pool and construct
2450: * it.
2451: */
2452: pc->pc_misses++;
2453: mutex_exit(&pc->pc_lock);
1.162 ad 2454: splx(s);
1.134 ad 2455:
2456: object = pool_get(&pc->pc_pool, flags);
2457: *objectp = object;
1.162 ad 2458: if (__predict_false(object == NULL))
2459: return false;
1.125 ad 2460:
1.162 ad 2461: if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
1.134 ad 2462: pool_put(&pc->pc_pool, object);
2463: *objectp = NULL;
1.162 ad 2464: return false;
1.43 thorpej 2465: }
2466:
1.134 ad 2467: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2468: (pc->pc_pool.pr_align - 1)) == 0);
1.43 thorpej 2469:
1.134 ad 2470: if (pap != NULL) {
2471: #ifdef POOL_VTOPHYS
2472: *pap = POOL_VTOPHYS(object);
2473: #else
2474: *pap = POOL_PADDR_INVALID;
2475: #endif
1.102 chs 2476: }
1.43 thorpej 2477:
1.125 ad 2478: FREECHECK_OUT(&pc->pc_freecheck, object);
1.162 ad 2479: return false;
1.43 thorpej 2480: }
2481:
2482: /*
1.134 ad 2483: * pool_cache_get{,_paddr}:
1.43 thorpej 2484: *
1.134 ad 2485: * Get an object from a pool cache (optionally returning
2486: * the physical address of the object).
1.43 thorpej 2487: */
1.134 ad 2488: void *
2489: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.43 thorpej 2490: {
1.134 ad 2491: pool_cache_cpu_t *cc;
2492: pcg_t *pcg;
2493: void *object;
1.60 thorpej 2494: int s;
1.43 thorpej 2495:
1.134 ad 2496: #ifdef LOCKDEBUG
1.155 ad 2497: if (flags & PR_WAITOK) {
1.154 yamt 2498: ASSERT_SLEEPABLE();
1.155 ad 2499: }
1.134 ad 2500: #endif
1.125 ad 2501:
1.162 ad 2502: /* Lock out interrupts and disable preemption. */
2503: s = splvm();
1.165 yamt 2504: while (/* CONSTCOND */ true) {
1.134 ad 2505: /* Try and allocate an object from the current group. */
1.162 ad 2506: cc = pc->pc_cpus[curcpu()->ci_index];
2507: KASSERT(cc->cc_cache == pc);
1.134 ad 2508: pcg = cc->cc_current;
1.162 ad 2509: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2510: object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
1.162 ad 2511: if (__predict_false(pap != NULL))
1.134 ad 2512: *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
1.148 yamt 2513: #if defined(DIAGNOSTIC)
1.134 ad 2514: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
1.163 ad 2515: KASSERT(pcg->pcg_avail < pcg->pcg_size);
1.134 ad 2516: KASSERT(object != NULL);
1.163 ad 2517: #endif
1.134 ad 2518: cc->cc_hits++;
1.162 ad 2519: splx(s);
1.134 ad 2520: FREECHECK_OUT(&pc->pc_freecheck, object);
2521: return object;
1.43 thorpej 2522: }
2523:
2524: /*
1.134 ad 2525: * That failed. If the previous group isn't empty, swap
2526: * it with the current group and allocate from there.
1.43 thorpej 2527: */
1.134 ad 2528: pcg = cc->cc_previous;
1.162 ad 2529: if (__predict_true(pcg->pcg_avail > 0)) {
1.134 ad 2530: cc->cc_previous = cc->cc_current;
2531: cc->cc_current = pcg;
2532: continue;
1.43 thorpej 2533: }
2534:
1.134 ad 2535: /*
2536: * Can't allocate from either group: try the slow path.
2537: * If get_slow() allocated an object for us, or if
1.162 ad 2538: * no more objects are available, it will return false.
1.134 ad 2539: * Otherwise, we need to retry.
2540: */
1.165 yamt 2541: if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2542: break;
2543: }
1.43 thorpej 2544:
1.134 ad 2545: return object;
1.51 thorpej 2546: }
2547:
1.162 ad 2548: static bool __noinline
2549: pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
1.51 thorpej 2550: {
1.163 ad 2551: pcg_t *pcg, *cur;
1.134 ad 2552: uint64_t ncsw;
2553: pool_cache_t pc;
1.51 thorpej 2554:
1.168 yamt 2555: KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2556: KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2557:
1.134 ad 2558: pc = cc->cc_cache;
1.171 ad 2559: pcg = NULL;
1.134 ad 2560: cc->cc_misses++;
1.43 thorpej 2561:
1.171 ad 2562: /*
2563: * If there are no empty groups in the cache then allocate one
2564: * while still unlocked.
2565: */
2566: if (__predict_false(pc->pc_emptygroups == NULL)) {
2567: if (__predict_true(!pool_cache_disable)) {
2568: pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2569: }
2570: if (__predict_true(pcg != NULL)) {
2571: pcg->pcg_avail = 0;
2572: pcg->pcg_size = pc->pc_pcgsize;
2573: }
2574: }
2575:
1.162 ad 2576: /* Lock the cache. */
2577: if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
1.164 ad 2578: ncsw = curlwp->l_ncsw;
1.134 ad 2579: mutex_enter(&pc->pc_lock);
2580: pc->pc_contended++;
1.162 ad 2581:
1.163 ad 2582: /*
2583: * If we context switched while locking, then our view of
2584: * the per-CPU data is invalid: retry.
2585: */
2586: if (__predict_false(curlwp->l_ncsw != ncsw)) {
2587: mutex_exit(&pc->pc_lock);
1.171 ad 2588: if (pcg != NULL) {
2589: pool_put(pc->pc_pcgpool, pcg);
2590: }
1.163 ad 2591: return true;
2592: }
1.162 ad 2593: }
1.102 chs 2594:
1.163 ad 2595: /* If there are no empty groups in the cache then allocate one. */
1.171 ad 2596: if (pcg == NULL && pc->pc_emptygroups != NULL) {
2597: pcg = pc->pc_emptygroups;
1.163 ad 2598: pc->pc_emptygroups = pcg->pcg_next;
2599: pc->pc_nempty--;
1.134 ad 2600: }
1.130 ad 2601:
1.162 ad 2602: /*
2603: * If there's a empty group, release our full group back
2604: * to the cache. Install the empty group to the local CPU
2605: * and return.
2606: */
1.163 ad 2607: if (pcg != NULL) {
1.134 ad 2608: KASSERT(pcg->pcg_avail == 0);
1.162 ad 2609: if (__predict_false(cc->cc_previous == &pcg_dummy)) {
1.146 ad 2610: cc->cc_previous = pcg;
2611: } else {
1.162 ad 2612: cur = cc->cc_current;
2613: if (__predict_true(cur != &pcg_dummy)) {
1.163 ad 2614: KASSERT(cur->pcg_avail == cur->pcg_size);
1.146 ad 2615: cur->pcg_next = pc->pc_fullgroups;
2616: pc->pc_fullgroups = cur;
2617: pc->pc_nfull++;
2618: }
2619: cc->cc_current = pcg;
2620: }
1.163 ad 2621: pc->pc_hits++;
1.134 ad 2622: mutex_exit(&pc->pc_lock);
1.162 ad 2623: return true;
1.102 chs 2624: }
1.105 christos 2625:
1.134 ad 2626: /*
1.162 ad 2627: * Nothing available locally or in cache, and we didn't
2628: * allocate an empty group. Take the slow path and destroy
2629: * the object here and now.
1.134 ad 2630: */
2631: pc->pc_misses++;
2632: mutex_exit(&pc->pc_lock);
1.162 ad 2633: splx(s);
2634: pool_cache_destruct_object(pc, object);
1.105 christos 2635:
1.162 ad 2636: return false;
1.134 ad 2637: }
1.102 chs 2638:
1.43 thorpej 2639: /*
1.134 ad 2640: * pool_cache_put{,_paddr}:
1.43 thorpej 2641: *
1.134 ad 2642: * Put an object back to the pool cache (optionally caching the
2643: * physical address of the object).
1.43 thorpej 2644: */
1.101 thorpej 2645: void
1.134 ad 2646: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2647: {
1.134 ad 2648: pool_cache_cpu_t *cc;
2649: pcg_t *pcg;
2650: int s;
1.101 thorpej 2651:
1.172 yamt 2652: KASSERT(object != NULL);
1.134 ad 2653: FREECHECK_IN(&pc->pc_freecheck, object);
1.101 thorpej 2654:
1.162 ad 2655: /* Lock out interrupts and disable preemption. */
2656: s = splvm();
1.165 yamt 2657: while (/* CONSTCOND */ true) {
1.134 ad 2658: /* If the current group isn't full, release it there. */
1.162 ad 2659: cc = pc->pc_cpus[curcpu()->ci_index];
2660: KASSERT(cc->cc_cache == pc);
1.134 ad 2661: pcg = cc->cc_current;
1.162 ad 2662: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2663: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2664: pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2665: pcg->pcg_avail++;
2666: cc->cc_hits++;
1.162 ad 2667: splx(s);
1.134 ad 2668: return;
2669: }
1.43 thorpej 2670:
1.134 ad 2671: /*
1.162 ad 2672: * That failed. If the previous group isn't full, swap
1.134 ad 2673: * it with the current group and try again.
2674: */
2675: pcg = cc->cc_previous;
1.162 ad 2676: if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
1.134 ad 2677: cc->cc_previous = cc->cc_current;
2678: cc->cc_current = pcg;
2679: continue;
2680: }
1.43 thorpej 2681:
1.134 ad 2682: /*
2683: * Can't free to either group: try the slow path.
2684: * If put_slow() releases the object for us, it
1.162 ad 2685: * will return false. Otherwise we need to retry.
1.134 ad 2686: */
1.165 yamt 2687: if (!pool_cache_put_slow(cc, s, object))
2688: break;
2689: }
1.43 thorpej 2690: }
2691:
2692: /*
1.134 ad 2693: * pool_cache_xcall:
1.43 thorpej 2694: *
1.134 ad 2695: * Transfer objects from the per-CPU cache to the global cache.
2696: * Run within a cross-call thread.
1.43 thorpej 2697: */
2698: static void
1.134 ad 2699: pool_cache_xcall(pool_cache_t pc)
1.43 thorpej 2700: {
1.134 ad 2701: pool_cache_cpu_t *cc;
2702: pcg_t *prev, *cur, **list;
1.162 ad 2703: int s;
1.134 ad 2704:
1.162 ad 2705: s = splvm();
2706: mutex_enter(&pc->pc_lock);
2707: cc = pc->pc_cpus[curcpu()->ci_index];
1.134 ad 2708: cur = cc->cc_current;
1.169 yamt 2709: cc->cc_current = __UNCONST(&pcg_dummy);
1.134 ad 2710: prev = cc->cc_previous;
1.169 yamt 2711: cc->cc_previous = __UNCONST(&pcg_dummy);
1.162 ad 2712: if (cur != &pcg_dummy) {
1.142 ad 2713: if (cur->pcg_avail == cur->pcg_size) {
1.134 ad 2714: list = &pc->pc_fullgroups;
2715: pc->pc_nfull++;
2716: } else if (cur->pcg_avail == 0) {
2717: list = &pc->pc_emptygroups;
2718: pc->pc_nempty++;
2719: } else {
2720: list = &pc->pc_partgroups;
2721: pc->pc_npart++;
2722: }
2723: cur->pcg_next = *list;
2724: *list = cur;
2725: }
1.162 ad 2726: if (prev != &pcg_dummy) {
1.142 ad 2727: if (prev->pcg_avail == prev->pcg_size) {
1.134 ad 2728: list = &pc->pc_fullgroups;
2729: pc->pc_nfull++;
2730: } else if (prev->pcg_avail == 0) {
2731: list = &pc->pc_emptygroups;
2732: pc->pc_nempty++;
2733: } else {
2734: list = &pc->pc_partgroups;
2735: pc->pc_npart++;
2736: }
2737: prev->pcg_next = *list;
2738: *list = prev;
2739: }
2740: mutex_exit(&pc->pc_lock);
2741: splx(s);
1.3 pk 2742: }
1.66 thorpej 2743:
2744: /*
2745: * Pool backend allocators.
2746: *
2747: * Each pool has a backend allocator that handles allocation, deallocation,
2748: * and any additional draining that might be needed.
2749: *
2750: * We provide two standard allocators:
2751: *
2752: * pool_allocator_kmem - the default when no allocator is specified
2753: *
2754: * pool_allocator_nointr - used for pools that will not be accessed
2755: * in interrupt context.
2756: */
2757: void *pool_page_alloc(struct pool *, int);
2758: void pool_page_free(struct pool *, void *);
2759:
1.112 bjh21 2760: #ifdef POOL_SUBPAGE
2761: struct pool_allocator pool_allocator_kmem_fullpage = {
2762: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2763: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2764: };
2765: #else
1.66 thorpej 2766: struct pool_allocator pool_allocator_kmem = {
2767: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2768: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2769: };
1.112 bjh21 2770: #endif
1.66 thorpej 2771:
2772: void *pool_page_alloc_nointr(struct pool *, int);
2773: void pool_page_free_nointr(struct pool *, void *);
2774:
1.112 bjh21 2775: #ifdef POOL_SUBPAGE
2776: struct pool_allocator pool_allocator_nointr_fullpage = {
2777: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2778: .pa_backingmapptr = &kernel_map,
1.112 bjh21 2779: };
2780: #else
1.66 thorpej 2781: struct pool_allocator pool_allocator_nointr = {
2782: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2783: .pa_backingmapptr = &kernel_map,
1.66 thorpej 2784: };
1.112 bjh21 2785: #endif
1.66 thorpej 2786:
2787: #ifdef POOL_SUBPAGE
2788: void *pool_subpage_alloc(struct pool *, int);
2789: void pool_subpage_free(struct pool *, void *);
2790:
1.112 bjh21 2791: struct pool_allocator pool_allocator_kmem = {
2792: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2793: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2794: };
2795:
2796: void *pool_subpage_alloc_nointr(struct pool *, int);
2797: void pool_subpage_free_nointr(struct pool *, void *);
2798:
2799: struct pool_allocator pool_allocator_nointr = {
2800: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2801: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2802: };
2803: #endif /* POOL_SUBPAGE */
2804:
1.117 yamt 2805: static void *
2806: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2807: {
1.117 yamt 2808: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2809: void *res;
2810:
1.117 yamt 2811: res = (*pa->pa_alloc)(pp, flags);
2812: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2813: /*
1.117 yamt 2814: * We only run the drain hook here if PR_NOWAIT.
2815: * In other cases, the hook will be run in
2816: * pool_reclaim().
1.66 thorpej 2817: */
1.117 yamt 2818: if (pp->pr_drain_hook != NULL) {
2819: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2820: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2821: }
1.117 yamt 2822: }
2823: return res;
1.66 thorpej 2824: }
2825:
1.117 yamt 2826: static void
1.66 thorpej 2827: pool_allocator_free(struct pool *pp, void *v)
2828: {
2829: struct pool_allocator *pa = pp->pr_alloc;
2830:
2831: (*pa->pa_free)(pp, v);
2832: }
2833:
2834: void *
1.124 yamt 2835: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2836: {
1.127 thorpej 2837: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2838:
1.100 yamt 2839: return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
1.66 thorpej 2840: }
2841:
2842: void
1.124 yamt 2843: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2844: {
2845:
1.98 yamt 2846: uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2847: }
2848:
2849: static void *
1.124 yamt 2850: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2851: {
1.127 thorpej 2852: bool waitok = (flags & PR_WAITOK) ? true : false;
1.98 yamt 2853:
1.100 yamt 2854: return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
1.98 yamt 2855: }
2856:
2857: static void
1.124 yamt 2858: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2859: {
2860:
1.100 yamt 2861: uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
1.66 thorpej 2862: }
2863:
2864: #ifdef POOL_SUBPAGE
2865: /* Sub-page allocator, for machines with large hardware pages. */
2866: void *
2867: pool_subpage_alloc(struct pool *pp, int flags)
2868: {
1.134 ad 2869: return pool_get(&psppool, flags);
1.66 thorpej 2870: }
2871:
2872: void
2873: pool_subpage_free(struct pool *pp, void *v)
2874: {
2875: pool_put(&psppool, v);
2876: }
2877:
2878: /* We don't provide a real nointr allocator. Maybe later. */
2879: void *
1.112 bjh21 2880: pool_subpage_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2881: {
2882:
2883: return (pool_subpage_alloc(pp, flags));
2884: }
2885:
2886: void
1.112 bjh21 2887: pool_subpage_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2888: {
2889:
2890: pool_subpage_free(pp, v);
2891: }
1.112 bjh21 2892: #endif /* POOL_SUBPAGE */
1.66 thorpej 2893: void *
1.124 yamt 2894: pool_page_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2895: {
1.127 thorpej 2896: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2897:
1.100 yamt 2898: return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
1.66 thorpej 2899: }
2900:
2901: void
1.124 yamt 2902: pool_page_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2903: {
2904:
1.98 yamt 2905: uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
1.66 thorpej 2906: }
1.141 yamt 2907:
2908: #if defined(DDB)
2909: static bool
2910: pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2911: {
2912:
2913: return (uintptr_t)ph->ph_page <= addr &&
2914: addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2915: }
2916:
1.143 yamt 2917: static bool
2918: pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2919: {
2920:
2921: return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2922: }
2923:
2924: static bool
2925: pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2926: {
2927: int i;
2928:
2929: if (pcg == NULL) {
2930: return false;
2931: }
1.144 yamt 2932: for (i = 0; i < pcg->pcg_avail; i++) {
1.143 yamt 2933: if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2934: return true;
2935: }
2936: }
2937: return false;
2938: }
2939:
2940: static bool
2941: pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2942: {
2943:
2944: if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2945: unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2946: pool_item_bitmap_t *bitmap =
2947: ph->ph_bitmap + (idx / BITMAP_SIZE);
2948: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2949:
2950: return (*bitmap & mask) == 0;
2951: } else {
2952: struct pool_item *pi;
2953:
2954: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2955: if (pool_in_item(pp, pi, addr)) {
2956: return false;
2957: }
2958: }
2959: return true;
2960: }
2961: }
2962:
1.141 yamt 2963: void
2964: pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2965: {
2966: struct pool *pp;
2967:
1.145 ad 2968: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.141 yamt 2969: struct pool_item_header *ph;
2970: uintptr_t item;
1.143 yamt 2971: bool allocated = true;
2972: bool incache = false;
2973: bool incpucache = false;
2974: char cpucachestr[32];
1.141 yamt 2975:
2976: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2977: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2978: if (pool_in_page(pp, ph, addr)) {
2979: goto found;
2980: }
2981: }
2982: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2983: if (pool_in_page(pp, ph, addr)) {
1.143 yamt 2984: allocated =
2985: pool_allocated(pp, ph, addr);
2986: goto found;
2987: }
2988: }
2989: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2990: if (pool_in_page(pp, ph, addr)) {
2991: allocated = false;
1.141 yamt 2992: goto found;
2993: }
2994: }
2995: continue;
2996: } else {
2997: ph = pr_find_pagehead_noalign(pp, (void *)addr);
2998: if (ph == NULL || !pool_in_page(pp, ph, addr)) {
2999: continue;
3000: }
1.143 yamt 3001: allocated = pool_allocated(pp, ph, addr);
1.141 yamt 3002: }
3003: found:
1.143 yamt 3004: if (allocated && pp->pr_cache) {
3005: pool_cache_t pc = pp->pr_cache;
3006: struct pool_cache_group *pcg;
3007: int i;
3008:
3009: for (pcg = pc->pc_fullgroups; pcg != NULL;
3010: pcg = pcg->pcg_next) {
3011: if (pool_in_cg(pp, pcg, addr)) {
3012: incache = true;
3013: goto print;
3014: }
3015: }
3016: for (i = 0; i < MAXCPUS; i++) {
3017: pool_cache_cpu_t *cc;
3018:
3019: if ((cc = pc->pc_cpus[i]) == NULL) {
3020: continue;
3021: }
3022: if (pool_in_cg(pp, cc->cc_current, addr) ||
3023: pool_in_cg(pp, cc->cc_previous, addr)) {
3024: struct cpu_info *ci =
1.170 ad 3025: cpu_lookup(i);
1.143 yamt 3026:
3027: incpucache = true;
3028: snprintf(cpucachestr,
3029: sizeof(cpucachestr),
3030: "cached by CPU %u",
1.153 martin 3031: ci->ci_index);
1.143 yamt 3032: goto print;
3033: }
3034: }
3035: }
3036: print:
1.141 yamt 3037: item = (uintptr_t)ph->ph_page + ph->ph_off;
3038: item = item + rounddown(addr - item, pp->pr_size);
1.143 yamt 3039: (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
1.141 yamt 3040: (void *)addr, item, (size_t)(addr - item),
1.143 yamt 3041: pp->pr_wchan,
3042: incpucache ? cpucachestr :
3043: incache ? "cached" : allocated ? "allocated" : "free");
1.141 yamt 3044: }
3045: }
3046: #endif /* defined(DDB) */
CVSweb <webmaster@jp.NetBSD.org>