Annotation of src/sys/kern/subr_pool.c, Revision 1.156.2.1
1.156.2.1! yamt 1: /* $NetBSD: subr_pool.c,v 1.156 2008/03/27 18:30:15 ad Exp $ */
1.1 pk 2:
3: /*-
1.134 ad 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.134 ad 9: * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: *
20: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30: * POSSIBILITY OF SUCH DAMAGE.
31: */
1.64 lukem 32:
33: #include <sys/cdefs.h>
1.156.2.1! yamt 34: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.156 2008/03/27 18:30:15 ad Exp $");
1.24 scottr 35:
1.141 yamt 36: #include "opt_ddb.h"
1.25 thorpej 37: #include "opt_pool.h"
1.24 scottr 38: #include "opt_poollog.h"
1.28 thorpej 39: #include "opt_lockdebug.h"
1.1 pk 40:
41: #include <sys/param.h>
42: #include <sys/systm.h>
1.135 yamt 43: #include <sys/bitops.h>
1.1 pk 44: #include <sys/proc.h>
45: #include <sys/errno.h>
46: #include <sys/kernel.h>
47: #include <sys/malloc.h>
48: #include <sys/pool.h>
1.20 thorpej 49: #include <sys/syslog.h>
1.125 ad 50: #include <sys/debug.h>
1.134 ad 51: #include <sys/lockdebug.h>
52: #include <sys/xcall.h>
53: #include <sys/cpu.h>
1.145 ad 54: #include <sys/atomic.h>
1.3 pk 55:
56: #include <uvm/uvm.h>
57:
1.1 pk 58: /*
59: * Pool resource management utility.
1.3 pk 60: *
1.88 chs 61: * Memory is allocated in pages which are split into pieces according to
62: * the pool item size. Each page is kept on one of three lists in the
63: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
64: * for empty, full and partially-full pages respectively. The individual
65: * pool items are on a linked list headed by `ph_itemlist' in each page
66: * header. The memory for building the page list is either taken from
67: * the allocated pages themselves (for small pool items) or taken from
68: * an internal pool of page headers (`phpool').
1.1 pk 69: */
70:
1.3 pk 71: /* List of all pools */
1.145 ad 72: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.134 ad 73:
1.3 pk 74: /* Private pool for page header structures */
1.97 yamt 75: #define PHPOOL_MAX 8
76: static struct pool phpool[PHPOOL_MAX];
1.135 yamt 77: #define PHPOOL_FREELIST_NELEM(idx) \
78: (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
1.3 pk 79:
1.62 bjh21 80: #ifdef POOL_SUBPAGE
81: /* Pool of subpages for use by normal pools. */
82: static struct pool psppool;
83: #endif
84:
1.117 yamt 85: static SLIST_HEAD(, pool_allocator) pa_deferinitq =
86: SLIST_HEAD_INITIALIZER(pa_deferinitq);
87:
1.98 yamt 88: static void *pool_page_alloc_meta(struct pool *, int);
89: static void pool_page_free_meta(struct pool *, void *);
90:
91: /* allocator for pool metadata */
1.134 ad 92: struct pool_allocator pool_allocator_meta = {
1.117 yamt 93: pool_page_alloc_meta, pool_page_free_meta,
94: .pa_backingmapptr = &kmem_map,
1.98 yamt 95: };
96:
1.3 pk 97: /* # of seconds to retain page after last use */
98: int pool_inactive_time = 10;
99:
100: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 101: static struct pool *drainpp;
102:
1.134 ad 103: /* This lock protects both pool_head and drainpp. */
104: static kmutex_t pool_head_lock;
105: static kcondvar_t pool_busy;
1.3 pk 106:
1.135 yamt 107: typedef uint32_t pool_item_bitmap_t;
108: #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
109: #define BITMAP_MASK (BITMAP_SIZE - 1)
1.99 yamt 110:
1.3 pk 111: struct pool_item_header {
112: /* Page headers */
1.88 chs 113: LIST_ENTRY(pool_item_header)
1.3 pk 114: ph_pagelist; /* pool page list */
1.88 chs 115: SPLAY_ENTRY(pool_item_header)
116: ph_node; /* Off-page page headers */
1.128 christos 117: void * ph_page; /* this page's address */
1.151 yamt 118: uint32_t ph_time; /* last referenced */
1.135 yamt 119: uint16_t ph_nmissing; /* # of chunks in use */
1.141 yamt 120: uint16_t ph_off; /* start offset in page */
1.97 yamt 121: union {
122: /* !PR_NOTOUCH */
123: struct {
1.102 chs 124: LIST_HEAD(, pool_item)
1.97 yamt 125: phu_itemlist; /* chunk list for this page */
126: } phu_normal;
127: /* PR_NOTOUCH */
128: struct {
1.141 yamt 129: pool_item_bitmap_t phu_bitmap[1];
1.97 yamt 130: } phu_notouch;
131: } ph_u;
1.3 pk 132: };
1.97 yamt 133: #define ph_itemlist ph_u.phu_normal.phu_itemlist
1.135 yamt 134: #define ph_bitmap ph_u.phu_notouch.phu_bitmap
1.3 pk 135:
1.1 pk 136: struct pool_item {
1.3 pk 137: #ifdef DIAGNOSTIC
1.82 thorpej 138: u_int pi_magic;
1.33 chs 139: #endif
1.134 ad 140: #define PI_MAGIC 0xdeaddeadU
1.3 pk 141: /* Other entries use only this list entry */
1.102 chs 142: LIST_ENTRY(pool_item) pi_list;
1.3 pk 143: };
144:
1.53 thorpej 145: #define POOL_NEEDS_CATCHUP(pp) \
146: ((pp)->pr_nitems < (pp)->pr_minitems)
147:
1.43 thorpej 148: /*
149: * Pool cache management.
150: *
151: * Pool caches provide a way for constructed objects to be cached by the
152: * pool subsystem. This can lead to performance improvements by avoiding
153: * needless object construction/destruction; it is deferred until absolutely
154: * necessary.
155: *
1.134 ad 156: * Caches are grouped into cache groups. Each cache group references up
157: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
158: * object from the pool, it calls the object's constructor and places it
159: * into a cache group. When a cache group frees an object back to the
160: * pool, it first calls the object's destructor. This allows the object
161: * to persist in constructed form while freed to the cache.
162: *
163: * The pool references each cache, so that when a pool is drained by the
164: * pagedaemon, it can drain each individual cache as well. Each time a
165: * cache is drained, the most idle cache group is freed to the pool in
166: * its entirety.
1.43 thorpej 167: *
168: * Pool caches are layed on top of pools. By layering them, we can avoid
169: * the complexity of cache management for pools which would not benefit
170: * from it.
171: */
172:
1.142 ad 173: static struct pool pcg_normal_pool;
174: static struct pool pcg_large_pool;
1.134 ad 175: static struct pool cache_pool;
176: static struct pool cache_cpu_pool;
1.3 pk 177:
1.145 ad 178: /* List of all caches. */
179: TAILQ_HEAD(,pool_cache) pool_cache_head =
180: TAILQ_HEAD_INITIALIZER(pool_cache_head);
181:
182: int pool_cache_disable;
183:
184:
1.134 ad 185: static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *,
186: void *, paddr_t);
187: static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *,
188: void **, paddr_t *, int);
189: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
190: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
191: static void pool_cache_xcall(pool_cache_t);
1.3 pk 192:
1.42 thorpej 193: static int pool_catchup(struct pool *);
1.128 christos 194: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 195: struct pool_item_header *);
1.88 chs 196: static void pool_update_curpage(struct pool *);
1.66 thorpej 197:
1.113 yamt 198: static int pool_grow(struct pool *, int);
1.117 yamt 199: static void *pool_allocator_alloc(struct pool *, int);
200: static void pool_allocator_free(struct pool *, void *);
1.3 pk 201:
1.97 yamt 202: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 203: void (*)(const char *, ...));
1.42 thorpej 204: static void pool_print1(struct pool *, const char *,
205: void (*)(const char *, ...));
1.3 pk 206:
1.88 chs 207: static int pool_chk_page(struct pool *, const char *,
208: struct pool_item_header *);
209:
1.3 pk 210: /*
1.52 thorpej 211: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 212: */
213: struct pool_log {
214: const char *pl_file;
215: long pl_line;
216: int pl_action;
1.25 thorpej 217: #define PRLOG_GET 1
218: #define PRLOG_PUT 2
1.3 pk 219: void *pl_addr;
1.1 pk 220: };
221:
1.86 matt 222: #ifdef POOL_DIAGNOSTIC
1.3 pk 223: /* Number of entries in pool log buffers */
1.17 thorpej 224: #ifndef POOL_LOGSIZE
225: #define POOL_LOGSIZE 10
226: #endif
227:
228: int pool_logsize = POOL_LOGSIZE;
1.1 pk 229:
1.110 perry 230: static inline void
1.42 thorpej 231: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 232: {
233: int n = pp->pr_curlogentry;
234: struct pool_log *pl;
235:
1.20 thorpej 236: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 237: return;
238:
239: /*
240: * Fill in the current entry. Wrap around and overwrite
241: * the oldest entry if necessary.
242: */
243: pl = &pp->pr_log[n];
244: pl->pl_file = file;
245: pl->pl_line = line;
246: pl->pl_action = action;
247: pl->pl_addr = v;
248: if (++n >= pp->pr_logsize)
249: n = 0;
250: pp->pr_curlogentry = n;
251: }
252:
253: static void
1.42 thorpej 254: pr_printlog(struct pool *pp, struct pool_item *pi,
255: void (*pr)(const char *, ...))
1.3 pk 256: {
257: int i = pp->pr_logsize;
258: int n = pp->pr_curlogentry;
259:
1.20 thorpej 260: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 261: return;
262:
263: /*
264: * Print all entries in this pool's log.
265: */
266: while (i-- > 0) {
267: struct pool_log *pl = &pp->pr_log[n];
268: if (pl->pl_action != 0) {
1.25 thorpej 269: if (pi == NULL || pi == pl->pl_addr) {
270: (*pr)("\tlog entry %d:\n", i);
271: (*pr)("\t\taction = %s, addr = %p\n",
272: pl->pl_action == PRLOG_GET ? "get" : "put",
273: pl->pl_addr);
274: (*pr)("\t\tfile: %s at line %lu\n",
275: pl->pl_file, pl->pl_line);
276: }
1.3 pk 277: }
278: if (++n >= pp->pr_logsize)
279: n = 0;
280: }
281: }
1.25 thorpej 282:
1.110 perry 283: static inline void
1.42 thorpej 284: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 285: {
286:
1.34 thorpej 287: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 288: printf("pool %s: reentrancy at file %s line %ld\n",
289: pp->pr_wchan, file, line);
290: printf(" previous entry at file %s line %ld\n",
291: pp->pr_entered_file, pp->pr_entered_line);
292: panic("pr_enter");
293: }
294:
295: pp->pr_entered_file = file;
296: pp->pr_entered_line = line;
297: }
298:
1.110 perry 299: static inline void
1.42 thorpej 300: pr_leave(struct pool *pp)
1.25 thorpej 301: {
302:
1.34 thorpej 303: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 304: printf("pool %s not entered?\n", pp->pr_wchan);
305: panic("pr_leave");
306: }
307:
308: pp->pr_entered_file = NULL;
309: pp->pr_entered_line = 0;
310: }
311:
1.110 perry 312: static inline void
1.42 thorpej 313: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 314: {
315:
316: if (pp->pr_entered_file != NULL)
317: (*pr)("\n\tcurrently entered from file %s line %ld\n",
318: pp->pr_entered_file, pp->pr_entered_line);
319: }
1.3 pk 320: #else
1.25 thorpej 321: #define pr_log(pp, v, action, file, line)
322: #define pr_printlog(pp, pi, pr)
323: #define pr_enter(pp, file, line)
324: #define pr_leave(pp)
325: #define pr_enter_check(pp, pr)
1.59 thorpej 326: #endif /* POOL_DIAGNOSTIC */
1.3 pk 327:
1.135 yamt 328: static inline unsigned int
1.97 yamt 329: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
330: const void *v)
331: {
332: const char *cp = v;
1.135 yamt 333: unsigned int idx;
1.97 yamt 334:
335: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 336: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 337: KASSERT(idx < pp->pr_itemsperpage);
338: return idx;
339: }
340:
1.110 perry 341: static inline void
1.97 yamt 342: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
343: void *obj)
344: {
1.135 yamt 345: unsigned int idx = pr_item_notouch_index(pp, ph, obj);
346: pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
347: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
1.97 yamt 348:
1.135 yamt 349: KASSERT((*bitmap & mask) == 0);
350: *bitmap |= mask;
1.97 yamt 351: }
352:
1.110 perry 353: static inline void *
1.97 yamt 354: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
355: {
1.135 yamt 356: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
357: unsigned int idx;
358: int i;
1.97 yamt 359:
1.135 yamt 360: for (i = 0; ; i++) {
361: int bit;
1.97 yamt 362:
1.135 yamt 363: KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
364: bit = ffs32(bitmap[i]);
365: if (bit) {
366: pool_item_bitmap_t mask;
367:
368: bit--;
369: idx = (i * BITMAP_SIZE) + bit;
370: mask = 1 << bit;
371: KASSERT((bitmap[i] & mask) != 0);
372: bitmap[i] &= ~mask;
373: break;
374: }
375: }
376: KASSERT(idx < pp->pr_itemsperpage);
1.128 christos 377: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 378: }
379:
1.135 yamt 380: static inline void
1.141 yamt 381: pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
1.135 yamt 382: {
383: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
384: const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
385: int i;
386:
387: for (i = 0; i < n; i++) {
388: bitmap[i] = (pool_item_bitmap_t)-1;
389: }
390: }
391:
1.110 perry 392: static inline int
1.88 chs 393: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
394: {
1.121 yamt 395:
396: /*
397: * we consider pool_item_header with smaller ph_page bigger.
398: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
399: */
400:
1.88 chs 401: if (a->ph_page < b->ph_page)
1.121 yamt 402: return (1);
403: else if (a->ph_page > b->ph_page)
1.88 chs 404: return (-1);
405: else
406: return (0);
407: }
408:
409: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
410: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
411:
1.141 yamt 412: static inline struct pool_item_header *
413: pr_find_pagehead_noalign(struct pool *pp, void *v)
414: {
415: struct pool_item_header *ph, tmp;
416:
417: tmp.ph_page = (void *)(uintptr_t)v;
418: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
419: if (ph == NULL) {
420: ph = SPLAY_ROOT(&pp->pr_phtree);
421: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
422: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
423: }
424: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
425: }
426:
427: return ph;
428: }
429:
1.3 pk 430: /*
1.121 yamt 431: * Return the pool page header based on item address.
1.3 pk 432: */
1.110 perry 433: static inline struct pool_item_header *
1.121 yamt 434: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 435: {
1.88 chs 436: struct pool_item_header *ph, tmp;
1.3 pk 437:
1.121 yamt 438: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.141 yamt 439: ph = pr_find_pagehead_noalign(pp, v);
1.121 yamt 440: } else {
1.128 christos 441: void *page =
442: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 443:
444: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 445: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 446: } else {
447: tmp.ph_page = page;
448: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
449: }
450: }
1.3 pk 451:
1.121 yamt 452: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 453: ((char *)ph->ph_page <= (char *)v &&
454: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 455: return ph;
1.3 pk 456: }
457:
1.101 thorpej 458: static void
459: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
460: {
461: struct pool_item_header *ph;
462:
463: while ((ph = LIST_FIRST(pq)) != NULL) {
464: LIST_REMOVE(ph, ph_pagelist);
465: pool_allocator_free(pp, ph->ph_page);
1.134 ad 466: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 467: pool_put(pp->pr_phpool, ph);
468: }
469: }
470:
1.3 pk 471: /*
472: * Remove a page from the pool.
473: */
1.110 perry 474: static inline void
1.61 chs 475: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
476: struct pool_pagelist *pq)
1.3 pk 477: {
478:
1.134 ad 479: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 480:
1.3 pk 481: /*
1.7 thorpej 482: * If the page was idle, decrement the idle page count.
1.3 pk 483: */
1.6 thorpej 484: if (ph->ph_nmissing == 0) {
485: #ifdef DIAGNOSTIC
486: if (pp->pr_nidle == 0)
487: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 488: if (pp->pr_nitems < pp->pr_itemsperpage)
489: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 490: #endif
491: pp->pr_nidle--;
492: }
1.7 thorpej 493:
1.20 thorpej 494: pp->pr_nitems -= pp->pr_itemsperpage;
495:
1.7 thorpej 496: /*
1.101 thorpej 497: * Unlink the page from the pool and queue it for release.
1.7 thorpej 498: */
1.88 chs 499: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 500: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
501: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 502: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
503:
1.7 thorpej 504: pp->pr_npages--;
505: pp->pr_npagefree++;
1.6 thorpej 506:
1.88 chs 507: pool_update_curpage(pp);
1.3 pk 508: }
509:
1.126 thorpej 510: static bool
1.117 yamt 511: pa_starved_p(struct pool_allocator *pa)
512: {
513:
514: if (pa->pa_backingmap != NULL) {
515: return vm_map_starved_p(pa->pa_backingmap);
516: }
1.127 thorpej 517: return false;
1.117 yamt 518: }
519:
520: static int
1.124 yamt 521: pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
1.117 yamt 522: {
523: struct pool *pp = obj;
524: struct pool_allocator *pa = pp->pr_alloc;
525:
526: KASSERT(&pp->pr_reclaimerentry == ce);
527: pool_reclaim(pp);
528: if (!pa_starved_p(pa)) {
529: return CALLBACK_CHAIN_ABORT;
530: }
531: return CALLBACK_CHAIN_CONTINUE;
532: }
533:
534: static void
535: pool_reclaim_register(struct pool *pp)
536: {
537: struct vm_map *map = pp->pr_alloc->pa_backingmap;
538: int s;
539:
540: if (map == NULL) {
541: return;
542: }
543:
544: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
545: callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
546: &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
547: splx(s);
548: }
549:
550: static void
551: pool_reclaim_unregister(struct pool *pp)
552: {
553: struct vm_map *map = pp->pr_alloc->pa_backingmap;
554: int s;
555:
556: if (map == NULL) {
557: return;
558: }
559:
560: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
561: callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
562: &pp->pr_reclaimerentry);
563: splx(s);
564: }
565:
566: static void
567: pa_reclaim_register(struct pool_allocator *pa)
568: {
569: struct vm_map *map = *pa->pa_backingmapptr;
570: struct pool *pp;
571:
572: KASSERT(pa->pa_backingmap == NULL);
573: if (map == NULL) {
574: SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
575: return;
576: }
577: pa->pa_backingmap = map;
578: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
579: pool_reclaim_register(pp);
580: }
581: }
582:
1.3 pk 583: /*
1.94 simonb 584: * Initialize all the pools listed in the "pools" link set.
585: */
586: void
1.117 yamt 587: pool_subsystem_init(void)
1.94 simonb 588: {
1.117 yamt 589: struct pool_allocator *pa;
1.94 simonb 590: __link_set_decl(pools, struct link_pool_init);
591: struct link_pool_init * const *pi;
592:
1.134 ad 593: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
594: cv_init(&pool_busy, "poolbusy");
595:
1.94 simonb 596: __link_set_foreach(pi, pools)
597: pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
598: (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
1.129 ad 599: (*pi)->palloc, (*pi)->ipl);
1.117 yamt 600:
601: while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
602: KASSERT(pa->pa_backingmapptr != NULL);
603: KASSERT(*pa->pa_backingmapptr != NULL);
604: SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
605: pa_reclaim_register(pa);
606: }
1.134 ad 607:
1.156 ad 608: pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
1.134 ad 609: 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);
610:
1.156 ad 611: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
1.134 ad 612: 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
1.94 simonb 613: }
614:
615: /*
1.3 pk 616: * Initialize the given pool resource structure.
617: *
618: * We export this routine to allow other kernel parts to declare
619: * static pools that must be initialized before malloc() is available.
620: */
621: void
1.42 thorpej 622: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.129 ad 623: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 624: {
1.116 simonb 625: struct pool *pp1;
1.92 enami 626: size_t trysize, phsize;
1.134 ad 627: int off, slack;
1.3 pk 628:
1.116 simonb 629: #ifdef DEBUG
630: /*
631: * Check that the pool hasn't already been initialised and
632: * added to the list of all pools.
633: */
1.145 ad 634: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
1.116 simonb 635: if (pp == pp1)
636: panic("pool_init: pool %s already initialised",
637: wchan);
638: }
639: #endif
640:
1.25 thorpej 641: #ifdef POOL_DIAGNOSTIC
642: /*
643: * Always log if POOL_DIAGNOSTIC is defined.
644: */
645: if (pool_logsize != 0)
646: flags |= PR_LOGGING;
647: #endif
648:
1.66 thorpej 649: if (palloc == NULL)
650: palloc = &pool_allocator_kmem;
1.112 bjh21 651: #ifdef POOL_SUBPAGE
652: if (size > palloc->pa_pagesz) {
653: if (palloc == &pool_allocator_kmem)
654: palloc = &pool_allocator_kmem_fullpage;
655: else if (palloc == &pool_allocator_nointr)
656: palloc = &pool_allocator_nointr_fullpage;
657: }
1.66 thorpej 658: #endif /* POOL_SUBPAGE */
659: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
1.112 bjh21 660: if (palloc->pa_pagesz == 0)
1.66 thorpej 661: palloc->pa_pagesz = PAGE_SIZE;
662:
663: TAILQ_INIT(&palloc->pa_list);
664:
1.134 ad 665: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 666: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
667: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.117 yamt 668:
669: if (palloc->pa_backingmapptr != NULL) {
670: pa_reclaim_register(palloc);
671: }
1.66 thorpej 672: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 673: }
1.3 pk 674:
675: if (align == 0)
676: align = ALIGN(1);
1.14 thorpej 677:
1.120 yamt 678: if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
1.14 thorpej 679: size = sizeof(struct pool_item);
1.3 pk 680:
1.78 thorpej 681: size = roundup(size, align);
1.66 thorpej 682: #ifdef DIAGNOSTIC
683: if (size > palloc->pa_pagesz)
1.121 yamt 684: panic("pool_init: pool item size (%zu) too large", size);
1.66 thorpej 685: #endif
1.35 pk 686:
1.3 pk 687: /*
688: * Initialize the pool structure.
689: */
1.88 chs 690: LIST_INIT(&pp->pr_emptypages);
691: LIST_INIT(&pp->pr_fullpages);
692: LIST_INIT(&pp->pr_partpages);
1.134 ad 693: pp->pr_cache = NULL;
1.3 pk 694: pp->pr_curpage = NULL;
695: pp->pr_npages = 0;
696: pp->pr_minitems = 0;
697: pp->pr_minpages = 0;
698: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 699: pp->pr_roflags = flags;
700: pp->pr_flags = 0;
1.35 pk 701: pp->pr_size = size;
1.3 pk 702: pp->pr_align = align;
703: pp->pr_wchan = wchan;
1.66 thorpej 704: pp->pr_alloc = palloc;
1.20 thorpej 705: pp->pr_nitems = 0;
706: pp->pr_nout = 0;
707: pp->pr_hardlimit = UINT_MAX;
708: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 709: pp->pr_hardlimit_ratecap.tv_sec = 0;
710: pp->pr_hardlimit_ratecap.tv_usec = 0;
711: pp->pr_hardlimit_warning_last.tv_sec = 0;
712: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 713: pp->pr_drain_hook = NULL;
714: pp->pr_drain_hook_arg = NULL;
1.125 ad 715: pp->pr_freecheck = NULL;
1.3 pk 716:
717: /*
718: * Decide whether to put the page header off page to avoid
1.92 enami 719: * wasting too large a part of the page or too big item.
720: * Off-page page headers go on a hash table, so we can match
721: * a returned item with its header based on the page address.
722: * We use 1/16 of the page size and about 8 times of the item
723: * size as the threshold (XXX: tune)
724: *
725: * However, we'll put the header into the page if we can put
726: * it without wasting any items.
727: *
728: * Silently enforce `0 <= ioff < align'.
1.3 pk 729: */
1.92 enami 730: pp->pr_itemoffset = ioff %= align;
731: /* See the comment below about reserved bytes. */
732: trysize = palloc->pa_pagesz - ((align - ioff) % align);
733: phsize = ALIGN(sizeof(struct pool_item_header));
1.121 yamt 734: if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 735: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
736: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 737: /* Use the end of the page for the page header */
1.20 thorpej 738: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 739: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 740: } else {
1.3 pk 741: /* The page header will be taken from our page header pool */
742: pp->pr_phoffset = 0;
1.66 thorpej 743: off = palloc->pa_pagesz;
1.88 chs 744: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 745: }
1.1 pk 746:
1.3 pk 747: /*
748: * Alignment is to take place at `ioff' within the item. This means
749: * we must reserve up to `align - 1' bytes on the page to allow
750: * appropriate positioning of each item.
751: */
752: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 753: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 754: if ((pp->pr_roflags & PR_NOTOUCH)) {
755: int idx;
756:
757: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
758: idx++) {
759: /* nothing */
760: }
761: if (idx >= PHPOOL_MAX) {
762: /*
763: * if you see this panic, consider to tweak
764: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
765: */
766: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
767: pp->pr_wchan, pp->pr_itemsperpage);
768: }
769: pp->pr_phpool = &phpool[idx];
770: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
771: pp->pr_phpool = &phpool[0];
772: }
773: #if defined(DIAGNOSTIC)
774: else {
775: pp->pr_phpool = NULL;
776: }
777: #endif
1.3 pk 778:
779: /*
780: * Use the slack between the chunks and the page header
781: * for "cache coloring".
782: */
783: slack = off - pp->pr_itemsperpage * pp->pr_size;
784: pp->pr_maxcolor = (slack / align) * align;
785: pp->pr_curcolor = 0;
786:
787: pp->pr_nget = 0;
788: pp->pr_nfail = 0;
789: pp->pr_nput = 0;
790: pp->pr_npagealloc = 0;
791: pp->pr_npagefree = 0;
1.1 pk 792: pp->pr_hiwat = 0;
1.8 thorpej 793: pp->pr_nidle = 0;
1.134 ad 794: pp->pr_refcnt = 0;
1.3 pk 795:
1.59 thorpej 796: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 797: if (flags & PR_LOGGING) {
798: if (kmem_map == NULL ||
799: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
800: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 801: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 802: pp->pr_curlogentry = 0;
803: pp->pr_logsize = pool_logsize;
804: }
1.59 thorpej 805: #endif
1.25 thorpej 806:
807: pp->pr_entered_file = NULL;
808: pp->pr_entered_line = 0;
1.3 pk 809:
1.156.2.1! yamt 810: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
1.134 ad 811: cv_init(&pp->pr_cv, wchan);
812: pp->pr_ipl = ipl;
1.1 pk 813:
1.3 pk 814: /*
1.43 thorpej 815: * Initialize private page header pool and cache magazine pool if we
816: * haven't done so yet.
1.23 thorpej 817: * XXX LOCKING.
1.3 pk 818: */
1.97 yamt 819: if (phpool[0].pr_size == 0) {
820: int idx;
821: for (idx = 0; idx < PHPOOL_MAX; idx++) {
822: static char phpool_names[PHPOOL_MAX][6+1+6+1];
823: int nelem;
824: size_t sz;
825:
826: nelem = PHPOOL_FREELIST_NELEM(idx);
827: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
828: "phpool-%d", nelem);
829: sz = sizeof(struct pool_item_header);
830: if (nelem) {
1.135 yamt 831: sz = offsetof(struct pool_item_header,
832: ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
1.97 yamt 833: }
834: pool_init(&phpool[idx], sz, 0, 0, 0,
1.129 ad 835: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.97 yamt 836: }
1.62 bjh21 837: #ifdef POOL_SUBPAGE
838: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.129 ad 839: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
1.62 bjh21 840: #endif
1.142 ad 841:
842: size = sizeof(pcg_t) +
843: (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
1.156 ad 844: pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
1.142 ad 845: "pcgnormal", &pool_allocator_meta, IPL_VM);
846:
847: size = sizeof(pcg_t) +
848: (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
1.156 ad 849: pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
1.142 ad 850: "pcglarge", &pool_allocator_meta, IPL_VM);
1.1 pk 851: }
852:
1.145 ad 853: /* Insert into the list of all pools. */
854: if (__predict_true(!cold))
1.134 ad 855: mutex_enter(&pool_head_lock);
1.145 ad 856: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
857: if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
858: break;
859: }
860: if (pp1 == NULL)
861: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
862: else
863: TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
864: if (__predict_true(!cold))
1.134 ad 865: mutex_exit(&pool_head_lock);
866:
867: /* Insert this into the list of pools using this allocator. */
1.145 ad 868: if (__predict_true(!cold))
1.134 ad 869: mutex_enter(&palloc->pa_lock);
1.145 ad 870: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
871: if (__predict_true(!cold))
1.134 ad 872: mutex_exit(&palloc->pa_lock);
1.66 thorpej 873:
1.117 yamt 874: pool_reclaim_register(pp);
1.1 pk 875: }
876:
877: /*
878: * De-commision a pool resource.
879: */
880: void
1.42 thorpej 881: pool_destroy(struct pool *pp)
1.1 pk 882: {
1.101 thorpej 883: struct pool_pagelist pq;
1.3 pk 884: struct pool_item_header *ph;
1.43 thorpej 885:
1.101 thorpej 886: /* Remove from global pool list */
1.134 ad 887: mutex_enter(&pool_head_lock);
888: while (pp->pr_refcnt != 0)
889: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 890: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.101 thorpej 891: if (drainpp == pp)
892: drainpp = NULL;
1.134 ad 893: mutex_exit(&pool_head_lock);
1.101 thorpej 894:
895: /* Remove this pool from its allocator's list of pools. */
1.117 yamt 896: pool_reclaim_unregister(pp);
1.134 ad 897: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 898: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.134 ad 899: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 900:
1.134 ad 901: mutex_enter(&pp->pr_lock);
1.101 thorpej 902:
1.134 ad 903: KASSERT(pp->pr_cache == NULL);
1.3 pk 904:
905: #ifdef DIAGNOSTIC
1.20 thorpej 906: if (pp->pr_nout != 0) {
1.25 thorpej 907: pr_printlog(pp, NULL, printf);
1.80 provos 908: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 909: pp->pr_nout);
1.3 pk 910: }
911: #endif
1.1 pk 912:
1.101 thorpej 913: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
914: KASSERT(LIST_EMPTY(&pp->pr_partpages));
915:
1.3 pk 916: /* Remove all pages */
1.101 thorpej 917: LIST_INIT(&pq);
1.88 chs 918: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 919: pr_rmpage(pp, ph, &pq);
920:
1.134 ad 921: mutex_exit(&pp->pr_lock);
1.3 pk 922:
1.101 thorpej 923: pr_pagelist_free(pp, &pq);
1.3 pk 924:
1.59 thorpej 925: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 926: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 927: free(pp->pr_log, M_TEMP);
1.59 thorpej 928: #endif
1.134 ad 929:
930: cv_destroy(&pp->pr_cv);
931: mutex_destroy(&pp->pr_lock);
1.1 pk 932: }
933:
1.68 thorpej 934: void
935: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
936: {
937:
938: /* XXX no locking -- must be used just after pool_init() */
939: #ifdef DIAGNOSTIC
940: if (pp->pr_drain_hook != NULL)
941: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
942: #endif
943: pp->pr_drain_hook = fn;
944: pp->pr_drain_hook_arg = arg;
945: }
946:
1.88 chs 947: static struct pool_item_header *
1.128 christos 948: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 949: {
950: struct pool_item_header *ph;
951:
952: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 953: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.134 ad 954: else
1.97 yamt 955: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 956:
957: return (ph);
958: }
1.1 pk 959:
960: /*
1.134 ad 961: * Grab an item from the pool.
1.1 pk 962: */
1.3 pk 963: void *
1.59 thorpej 964: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 965: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 966: #else
967: pool_get(struct pool *pp, int flags)
968: #endif
1.1 pk 969: {
970: struct pool_item *pi;
1.3 pk 971: struct pool_item_header *ph;
1.55 thorpej 972: void *v;
1.1 pk 973:
1.2 pk 974: #ifdef DIAGNOSTIC
1.95 atatat 975: if (__predict_false(pp->pr_itemsperpage == 0))
976: panic("pool_get: pool %p: pr_itemsperpage is zero, "
977: "pool not initialized?", pp);
1.84 thorpej 978: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 979: (flags & PR_WAITOK) != 0))
1.77 matt 980: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 981:
1.102 chs 982: #endif /* DIAGNOSTIC */
1.58 thorpej 983: #ifdef LOCKDEBUG
1.155 ad 984: if (flags & PR_WAITOK) {
1.154 yamt 985: ASSERT_SLEEPABLE();
1.155 ad 986: }
1.56 sommerfe 987: #endif
1.1 pk 988:
1.134 ad 989: mutex_enter(&pp->pr_lock);
1.25 thorpej 990: pr_enter(pp, file, line);
1.20 thorpej 991:
992: startover:
993: /*
994: * Check to see if we've reached the hard limit. If we have,
995: * and we can wait, then wait until an item has been returned to
996: * the pool.
997: */
998: #ifdef DIAGNOSTIC
1.34 thorpej 999: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 1000: pr_leave(pp);
1.134 ad 1001: mutex_exit(&pp->pr_lock);
1.20 thorpej 1002: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
1003: }
1004: #endif
1.34 thorpej 1005: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 1006: if (pp->pr_drain_hook != NULL) {
1007: /*
1008: * Since the drain hook is going to free things
1009: * back to the pool, unlock, call the hook, re-lock,
1010: * and check the hardlimit condition again.
1011: */
1012: pr_leave(pp);
1.134 ad 1013: mutex_exit(&pp->pr_lock);
1.68 thorpej 1014: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.134 ad 1015: mutex_enter(&pp->pr_lock);
1.68 thorpej 1016: pr_enter(pp, file, line);
1017: if (pp->pr_nout < pp->pr_hardlimit)
1018: goto startover;
1019: }
1020:
1.29 sommerfe 1021: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 1022: /*
1023: * XXX: A warning isn't logged in this case. Should
1024: * it be?
1025: */
1026: pp->pr_flags |= PR_WANTED;
1.25 thorpej 1027: pr_leave(pp);
1.134 ad 1028: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.25 thorpej 1029: pr_enter(pp, file, line);
1.20 thorpej 1030: goto startover;
1031: }
1.31 thorpej 1032:
1033: /*
1034: * Log a message that the hard limit has been hit.
1035: */
1036: if (pp->pr_hardlimit_warning != NULL &&
1037: ratecheck(&pp->pr_hardlimit_warning_last,
1038: &pp->pr_hardlimit_ratecap))
1039: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 1040:
1041: pp->pr_nfail++;
1042:
1.25 thorpej 1043: pr_leave(pp);
1.134 ad 1044: mutex_exit(&pp->pr_lock);
1.20 thorpej 1045: return (NULL);
1046: }
1047:
1.3 pk 1048: /*
1049: * The convention we use is that if `curpage' is not NULL, then
1050: * it points at a non-empty bucket. In particular, `curpage'
1051: * never points at a page header which has PR_PHINPAGE set and
1052: * has no items in its bucket.
1053: */
1.20 thorpej 1054: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 1055: int error;
1056:
1.20 thorpej 1057: #ifdef DIAGNOSTIC
1058: if (pp->pr_nitems != 0) {
1.134 ad 1059: mutex_exit(&pp->pr_lock);
1.20 thorpej 1060: printf("pool_get: %s: curpage NULL, nitems %u\n",
1061: pp->pr_wchan, pp->pr_nitems);
1.80 provos 1062: panic("pool_get: nitems inconsistent");
1.20 thorpej 1063: }
1064: #endif
1065:
1.21 thorpej 1066: /*
1067: * Call the back-end page allocator for more memory.
1068: * Release the pool lock, as the back-end page allocator
1069: * may block.
1070: */
1.25 thorpej 1071: pr_leave(pp);
1.113 yamt 1072: error = pool_grow(pp, flags);
1073: pr_enter(pp, file, line);
1074: if (error != 0) {
1.21 thorpej 1075: /*
1.55 thorpej 1076: * We were unable to allocate a page or item
1077: * header, but we released the lock during
1078: * allocation, so perhaps items were freed
1079: * back to the pool. Check for this case.
1.21 thorpej 1080: */
1081: if (pp->pr_curpage != NULL)
1082: goto startover;
1.15 pk 1083:
1.117 yamt 1084: pp->pr_nfail++;
1.25 thorpej 1085: pr_leave(pp);
1.134 ad 1086: mutex_exit(&pp->pr_lock);
1.117 yamt 1087: return (NULL);
1.1 pk 1088: }
1.3 pk 1089:
1.20 thorpej 1090: /* Start the allocation process over. */
1091: goto startover;
1.3 pk 1092: }
1.97 yamt 1093: if (pp->pr_roflags & PR_NOTOUCH) {
1094: #ifdef DIAGNOSTIC
1095: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1096: pr_leave(pp);
1.134 ad 1097: mutex_exit(&pp->pr_lock);
1.97 yamt 1098: panic("pool_get: %s: page empty", pp->pr_wchan);
1099: }
1100: #endif
1101: v = pr_item_notouch_get(pp, ph);
1102: #ifdef POOL_DIAGNOSTIC
1103: pr_log(pp, v, PRLOG_GET, file, line);
1104: #endif
1105: } else {
1.102 chs 1106: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 1107: if (__predict_false(v == NULL)) {
1108: pr_leave(pp);
1.134 ad 1109: mutex_exit(&pp->pr_lock);
1.97 yamt 1110: panic("pool_get: %s: page empty", pp->pr_wchan);
1111: }
1.20 thorpej 1112: #ifdef DIAGNOSTIC
1.97 yamt 1113: if (__predict_false(pp->pr_nitems == 0)) {
1114: pr_leave(pp);
1.134 ad 1115: mutex_exit(&pp->pr_lock);
1.97 yamt 1116: printf("pool_get: %s: items on itemlist, nitems %u\n",
1117: pp->pr_wchan, pp->pr_nitems);
1118: panic("pool_get: nitems inconsistent");
1119: }
1.65 enami 1120: #endif
1.56 sommerfe 1121:
1.65 enami 1122: #ifdef POOL_DIAGNOSTIC
1.97 yamt 1123: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 1124: #endif
1.3 pk 1125:
1.65 enami 1126: #ifdef DIAGNOSTIC
1.97 yamt 1127: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1128: pr_printlog(pp, pi, printf);
1129: panic("pool_get(%s): free list modified: "
1130: "magic=%x; page %p; item addr %p\n",
1131: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1132: }
1.3 pk 1133: #endif
1134:
1.97 yamt 1135: /*
1136: * Remove from item list.
1137: */
1.102 chs 1138: LIST_REMOVE(pi, pi_list);
1.97 yamt 1139: }
1.20 thorpej 1140: pp->pr_nitems--;
1141: pp->pr_nout++;
1.6 thorpej 1142: if (ph->ph_nmissing == 0) {
1143: #ifdef DIAGNOSTIC
1.34 thorpej 1144: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 1145: panic("pool_get: nidle inconsistent");
1146: #endif
1147: pp->pr_nidle--;
1.88 chs 1148:
1149: /*
1150: * This page was previously empty. Move it to the list of
1151: * partially-full pages. This page is already curpage.
1152: */
1153: LIST_REMOVE(ph, ph_pagelist);
1154: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 1155: }
1.3 pk 1156: ph->ph_nmissing++;
1.97 yamt 1157: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 1158: #ifdef DIAGNOSTIC
1.97 yamt 1159: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 1160: !LIST_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 1161: pr_leave(pp);
1.134 ad 1162: mutex_exit(&pp->pr_lock);
1.21 thorpej 1163: panic("pool_get: %s: nmissing inconsistent",
1164: pp->pr_wchan);
1165: }
1166: #endif
1.3 pk 1167: /*
1.88 chs 1168: * This page is now full. Move it to the full list
1169: * and select a new current page.
1.3 pk 1170: */
1.88 chs 1171: LIST_REMOVE(ph, ph_pagelist);
1172: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1173: pool_update_curpage(pp);
1.1 pk 1174: }
1.3 pk 1175:
1176: pp->pr_nget++;
1.111 christos 1177: pr_leave(pp);
1.20 thorpej 1178:
1179: /*
1180: * If we have a low water mark and we are now below that low
1181: * water mark, add more items to the pool.
1182: */
1.53 thorpej 1183: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1184: /*
1185: * XXX: Should we log a warning? Should we set up a timeout
1186: * to try again in a second or so? The latter could break
1187: * a caller's assumptions about interrupt protection, etc.
1188: */
1189: }
1190:
1.134 ad 1191: mutex_exit(&pp->pr_lock);
1.125 ad 1192: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
1193: FREECHECK_OUT(&pp->pr_freecheck, v);
1.1 pk 1194: return (v);
1195: }
1196:
1197: /*
1.43 thorpej 1198: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1199: */
1.43 thorpej 1200: static void
1.101 thorpej 1201: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 1202: {
1203: struct pool_item *pi = v;
1.3 pk 1204: struct pool_item_header *ph;
1205:
1.134 ad 1206: KASSERT(mutex_owned(&pp->pr_lock));
1.125 ad 1207: FREECHECK_IN(&pp->pr_freecheck, v);
1.134 ad 1208: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 1209:
1.30 thorpej 1210: #ifdef DIAGNOSTIC
1.34 thorpej 1211: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1212: printf("pool %s: putting with none out\n",
1213: pp->pr_wchan);
1214: panic("pool_put");
1215: }
1216: #endif
1.3 pk 1217:
1.121 yamt 1218: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.25 thorpej 1219: pr_printlog(pp, NULL, printf);
1.3 pk 1220: panic("pool_put: %s: page header missing", pp->pr_wchan);
1221: }
1.28 thorpej 1222:
1.3 pk 1223: /*
1224: * Return to item list.
1225: */
1.97 yamt 1226: if (pp->pr_roflags & PR_NOTOUCH) {
1227: pr_item_notouch_put(pp, ph, v);
1228: } else {
1.2 pk 1229: #ifdef DIAGNOSTIC
1.97 yamt 1230: pi->pi_magic = PI_MAGIC;
1.3 pk 1231: #endif
1.32 chs 1232: #ifdef DEBUG
1.97 yamt 1233: {
1234: int i, *ip = v;
1.32 chs 1235:
1.97 yamt 1236: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1237: *ip++ = PI_MAGIC;
1238: }
1.32 chs 1239: }
1240: #endif
1241:
1.102 chs 1242: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 1243: }
1.79 thorpej 1244: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1245: ph->ph_nmissing--;
1246: pp->pr_nput++;
1.20 thorpej 1247: pp->pr_nitems++;
1248: pp->pr_nout--;
1.3 pk 1249:
1250: /* Cancel "pool empty" condition if it exists */
1251: if (pp->pr_curpage == NULL)
1252: pp->pr_curpage = ph;
1253:
1254: if (pp->pr_flags & PR_WANTED) {
1255: pp->pr_flags &= ~PR_WANTED;
1.15 pk 1256: if (ph->ph_nmissing == 0)
1257: pp->pr_nidle++;
1.134 ad 1258: cv_broadcast(&pp->pr_cv);
1.3 pk 1259: return;
1260: }
1261:
1262: /*
1.88 chs 1263: * If this page is now empty, do one of two things:
1.21 thorpej 1264: *
1.88 chs 1265: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1266: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1267: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1268: * CLAIM.
1.21 thorpej 1269: *
1.88 chs 1270: * (2) Otherwise, move the page to the empty page list.
1271: *
1272: * Either way, select a new current page (so we use a partially-full
1273: * page if one is available).
1.3 pk 1274: */
1275: if (ph->ph_nmissing == 0) {
1.6 thorpej 1276: pp->pr_nidle++;
1.90 thorpej 1277: if (pp->pr_npages > pp->pr_minpages &&
1.152 yamt 1278: pp->pr_npages > pp->pr_maxpages) {
1.101 thorpej 1279: pr_rmpage(pp, ph, pq);
1.3 pk 1280: } else {
1.88 chs 1281: LIST_REMOVE(ph, ph_pagelist);
1282: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1283:
1.21 thorpej 1284: /*
1285: * Update the timestamp on the page. A page must
1286: * be idle for some period of time before it can
1287: * be reclaimed by the pagedaemon. This minimizes
1288: * ping-pong'ing for memory.
1.151 yamt 1289: *
1290: * note for 64-bit time_t: truncating to 32-bit is not
1291: * a problem for our usage.
1.21 thorpej 1292: */
1.151 yamt 1293: ph->ph_time = time_uptime;
1.1 pk 1294: }
1.88 chs 1295: pool_update_curpage(pp);
1.1 pk 1296: }
1.88 chs 1297:
1.21 thorpej 1298: /*
1.88 chs 1299: * If the page was previously completely full, move it to the
1300: * partially-full list and make it the current page. The next
1301: * allocation will get the item from this page, instead of
1302: * further fragmenting the pool.
1.21 thorpej 1303: */
1304: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1305: LIST_REMOVE(ph, ph_pagelist);
1306: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1307: pp->pr_curpage = ph;
1308: }
1.43 thorpej 1309: }
1310:
1311: /*
1.134 ad 1312: * Return resource to the pool.
1.43 thorpej 1313: */
1.59 thorpej 1314: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1315: void
1316: _pool_put(struct pool *pp, void *v, const char *file, long line)
1317: {
1.101 thorpej 1318: struct pool_pagelist pq;
1319:
1320: LIST_INIT(&pq);
1.43 thorpej 1321:
1.134 ad 1322: mutex_enter(&pp->pr_lock);
1.43 thorpej 1323: pr_enter(pp, file, line);
1324:
1.56 sommerfe 1325: pr_log(pp, v, PRLOG_PUT, file, line);
1326:
1.101 thorpej 1327: pool_do_put(pp, v, &pq);
1.21 thorpej 1328:
1.25 thorpej 1329: pr_leave(pp);
1.134 ad 1330: mutex_exit(&pp->pr_lock);
1.101 thorpej 1331:
1.102 chs 1332: pr_pagelist_free(pp, &pq);
1.1 pk 1333: }
1.57 sommerfe 1334: #undef pool_put
1.59 thorpej 1335: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1336:
1.56 sommerfe 1337: void
1338: pool_put(struct pool *pp, void *v)
1339: {
1.101 thorpej 1340: struct pool_pagelist pq;
1341:
1342: LIST_INIT(&pq);
1.56 sommerfe 1343:
1.134 ad 1344: mutex_enter(&pp->pr_lock);
1.101 thorpej 1345: pool_do_put(pp, v, &pq);
1.134 ad 1346: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1347:
1.102 chs 1348: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1349: }
1.57 sommerfe 1350:
1.59 thorpej 1351: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1352: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1353: #endif
1.74 thorpej 1354:
1355: /*
1.113 yamt 1356: * pool_grow: grow a pool by a page.
1357: *
1358: * => called with pool locked.
1359: * => unlock and relock the pool.
1360: * => return with pool locked.
1361: */
1362:
1363: static int
1364: pool_grow(struct pool *pp, int flags)
1365: {
1366: struct pool_item_header *ph = NULL;
1367: char *cp;
1368:
1.134 ad 1369: mutex_exit(&pp->pr_lock);
1.113 yamt 1370: cp = pool_allocator_alloc(pp, flags);
1371: if (__predict_true(cp != NULL)) {
1372: ph = pool_alloc_item_header(pp, cp, flags);
1373: }
1374: if (__predict_false(cp == NULL || ph == NULL)) {
1375: if (cp != NULL) {
1376: pool_allocator_free(pp, cp);
1377: }
1.134 ad 1378: mutex_enter(&pp->pr_lock);
1.113 yamt 1379: return ENOMEM;
1380: }
1381:
1.134 ad 1382: mutex_enter(&pp->pr_lock);
1.113 yamt 1383: pool_prime_page(pp, cp, ph);
1384: pp->pr_npagealloc++;
1385: return 0;
1386: }
1387:
1388: /*
1.74 thorpej 1389: * Add N items to the pool.
1390: */
1391: int
1392: pool_prime(struct pool *pp, int n)
1393: {
1.75 simonb 1394: int newpages;
1.113 yamt 1395: int error = 0;
1.74 thorpej 1396:
1.134 ad 1397: mutex_enter(&pp->pr_lock);
1.74 thorpej 1398:
1399: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1400:
1401: while (newpages-- > 0) {
1.113 yamt 1402: error = pool_grow(pp, PR_NOWAIT);
1403: if (error) {
1.74 thorpej 1404: break;
1405: }
1406: pp->pr_minpages++;
1407: }
1408:
1409: if (pp->pr_minpages >= pp->pr_maxpages)
1410: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1411:
1.134 ad 1412: mutex_exit(&pp->pr_lock);
1.113 yamt 1413: return error;
1.74 thorpej 1414: }
1.55 thorpej 1415:
1416: /*
1.3 pk 1417: * Add a page worth of items to the pool.
1.21 thorpej 1418: *
1419: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1420: */
1.55 thorpej 1421: static void
1.128 christos 1422: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1423: {
1424: struct pool_item *pi;
1.128 christos 1425: void *cp = storage;
1.125 ad 1426: const unsigned int align = pp->pr_align;
1427: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1428: int n;
1.36 pk 1429:
1.134 ad 1430: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 1431:
1.66 thorpej 1432: #ifdef DIAGNOSTIC
1.121 yamt 1433: if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1.150 skrll 1434: ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1435: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1436: #endif
1.3 pk 1437:
1438: /*
1439: * Insert page header.
1440: */
1.88 chs 1441: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1442: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1443: ph->ph_page = storage;
1444: ph->ph_nmissing = 0;
1.151 yamt 1445: ph->ph_time = time_uptime;
1.88 chs 1446: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1447: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1448:
1.6 thorpej 1449: pp->pr_nidle++;
1450:
1.3 pk 1451: /*
1452: * Color this page.
1453: */
1.141 yamt 1454: ph->ph_off = pp->pr_curcolor;
1455: cp = (char *)cp + ph->ph_off;
1.3 pk 1456: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1457: pp->pr_curcolor = 0;
1458:
1459: /*
1460: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1461: */
1462: if (ioff != 0)
1.128 christos 1463: cp = (char *)cp + align - ioff;
1.3 pk 1464:
1.125 ad 1465: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1466:
1.3 pk 1467: /*
1468: * Insert remaining chunks on the bucket list.
1469: */
1470: n = pp->pr_itemsperpage;
1.20 thorpej 1471: pp->pr_nitems += n;
1.3 pk 1472:
1.97 yamt 1473: if (pp->pr_roflags & PR_NOTOUCH) {
1.141 yamt 1474: pr_item_notouch_init(pp, ph);
1.97 yamt 1475: } else {
1476: while (n--) {
1477: pi = (struct pool_item *)cp;
1.78 thorpej 1478:
1.97 yamt 1479: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1480:
1.97 yamt 1481: /* Insert on page list */
1.102 chs 1482: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1483: #ifdef DIAGNOSTIC
1.97 yamt 1484: pi->pi_magic = PI_MAGIC;
1.3 pk 1485: #endif
1.128 christos 1486: cp = (char *)cp + pp->pr_size;
1.125 ad 1487:
1488: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1489: }
1.3 pk 1490: }
1491:
1492: /*
1493: * If the pool was depleted, point at the new page.
1494: */
1495: if (pp->pr_curpage == NULL)
1496: pp->pr_curpage = ph;
1497:
1498: if (++pp->pr_npages > pp->pr_hiwat)
1499: pp->pr_hiwat = pp->pr_npages;
1500: }
1501:
1.20 thorpej 1502: /*
1.52 thorpej 1503: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1504: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1505: *
1.21 thorpej 1506: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1507: *
1.73 thorpej 1508: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1509: * with it locked.
1510: */
1511: static int
1.42 thorpej 1512: pool_catchup(struct pool *pp)
1.20 thorpej 1513: {
1514: int error = 0;
1515:
1.54 thorpej 1516: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1517: error = pool_grow(pp, PR_NOWAIT);
1518: if (error) {
1.20 thorpej 1519: break;
1520: }
1521: }
1.113 yamt 1522: return error;
1.20 thorpej 1523: }
1524:
1.88 chs 1525: static void
1526: pool_update_curpage(struct pool *pp)
1527: {
1528:
1529: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1530: if (pp->pr_curpage == NULL) {
1531: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1532: }
1533: }
1534:
1.3 pk 1535: void
1.42 thorpej 1536: pool_setlowat(struct pool *pp, int n)
1.3 pk 1537: {
1.15 pk 1538:
1.134 ad 1539: mutex_enter(&pp->pr_lock);
1.21 thorpej 1540:
1.3 pk 1541: pp->pr_minitems = n;
1.15 pk 1542: pp->pr_minpages = (n == 0)
1543: ? 0
1.18 thorpej 1544: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1545:
1546: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1547: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1548: /*
1549: * XXX: Should we log a warning? Should we set up a timeout
1550: * to try again in a second or so? The latter could break
1551: * a caller's assumptions about interrupt protection, etc.
1552: */
1553: }
1.21 thorpej 1554:
1.134 ad 1555: mutex_exit(&pp->pr_lock);
1.3 pk 1556: }
1557:
1558: void
1.42 thorpej 1559: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1560: {
1.15 pk 1561:
1.134 ad 1562: mutex_enter(&pp->pr_lock);
1.21 thorpej 1563:
1.15 pk 1564: pp->pr_maxpages = (n == 0)
1565: ? 0
1.18 thorpej 1566: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1567:
1.134 ad 1568: mutex_exit(&pp->pr_lock);
1.3 pk 1569: }
1570:
1.20 thorpej 1571: void
1.42 thorpej 1572: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1573: {
1574:
1.134 ad 1575: mutex_enter(&pp->pr_lock);
1.20 thorpej 1576:
1577: pp->pr_hardlimit = n;
1578: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1579: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1580: pp->pr_hardlimit_warning_last.tv_sec = 0;
1581: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1582:
1583: /*
1.21 thorpej 1584: * In-line version of pool_sethiwat(), because we don't want to
1585: * release the lock.
1.20 thorpej 1586: */
1587: pp->pr_maxpages = (n == 0)
1588: ? 0
1589: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1590:
1.134 ad 1591: mutex_exit(&pp->pr_lock);
1.20 thorpej 1592: }
1.3 pk 1593:
1594: /*
1595: * Release all complete pages that have not been used recently.
1596: */
1.66 thorpej 1597: int
1.59 thorpej 1598: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1599: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1600: #else
1601: pool_reclaim(struct pool *pp)
1602: #endif
1.3 pk 1603: {
1604: struct pool_item_header *ph, *phnext;
1.61 chs 1605: struct pool_pagelist pq;
1.151 yamt 1606: uint32_t curtime;
1.134 ad 1607: bool klock;
1608: int rv;
1.3 pk 1609:
1.68 thorpej 1610: if (pp->pr_drain_hook != NULL) {
1611: /*
1612: * The drain hook must be called with the pool unlocked.
1613: */
1614: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1615: }
1616:
1.134 ad 1617: /*
1.156.2.1! yamt 1618: * XXXSMP Because we do not want to cause non-MPSAFE code
! 1619: * to block.
1.134 ad 1620: */
1621: if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1622: pp->pr_ipl == IPL_SOFTSERIAL) {
1623: KERNEL_LOCK(1, NULL);
1624: klock = true;
1625: } else
1626: klock = false;
1627:
1628: /* Reclaim items from the pool's cache (if any). */
1629: if (pp->pr_cache != NULL)
1630: pool_cache_invalidate(pp->pr_cache);
1631:
1632: if (mutex_tryenter(&pp->pr_lock) == 0) {
1633: if (klock) {
1634: KERNEL_UNLOCK_ONE(NULL);
1635: }
1.66 thorpej 1636: return (0);
1.134 ad 1637: }
1.25 thorpej 1638: pr_enter(pp, file, line);
1.68 thorpej 1639:
1.88 chs 1640: LIST_INIT(&pq);
1.43 thorpej 1641:
1.151 yamt 1642: curtime = time_uptime;
1.21 thorpej 1643:
1.88 chs 1644: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1645: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1646:
1647: /* Check our minimum page claim */
1648: if (pp->pr_npages <= pp->pr_minpages)
1649: break;
1650:
1.88 chs 1651: KASSERT(ph->ph_nmissing == 0);
1.151 yamt 1652: if (curtime - ph->ph_time < pool_inactive_time
1.117 yamt 1653: && !pa_starved_p(pp->pr_alloc))
1.88 chs 1654: continue;
1.21 thorpej 1655:
1.88 chs 1656: /*
1657: * If freeing this page would put us below
1658: * the low water mark, stop now.
1659: */
1660: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1661: pp->pr_minitems)
1662: break;
1.21 thorpej 1663:
1.88 chs 1664: pr_rmpage(pp, ph, &pq);
1.3 pk 1665: }
1666:
1.25 thorpej 1667: pr_leave(pp);
1.134 ad 1668: mutex_exit(&pp->pr_lock);
1669:
1670: if (LIST_EMPTY(&pq))
1671: rv = 0;
1672: else {
1673: pr_pagelist_free(pp, &pq);
1674: rv = 1;
1675: }
1676:
1677: if (klock) {
1678: KERNEL_UNLOCK_ONE(NULL);
1679: }
1.66 thorpej 1680:
1.134 ad 1681: return (rv);
1.3 pk 1682: }
1683:
1684: /*
1.134 ad 1685: * Drain pools, one at a time. This is a two stage process;
1686: * drain_start kicks off a cross call to drain CPU-level caches
1687: * if the pool has an associated pool_cache. drain_end waits
1688: * for those cross calls to finish, and then drains the cache
1689: * (if any) and pool.
1.131 ad 1690: *
1.134 ad 1691: * Note, must never be called from interrupt context.
1.3 pk 1692: */
1693: void
1.134 ad 1694: pool_drain_start(struct pool **ppp, uint64_t *wp)
1.3 pk 1695: {
1696: struct pool *pp;
1.134 ad 1697:
1.145 ad 1698: KASSERT(!TAILQ_EMPTY(&pool_head));
1.3 pk 1699:
1.61 chs 1700: pp = NULL;
1.134 ad 1701:
1702: /* Find next pool to drain, and add a reference. */
1703: mutex_enter(&pool_head_lock);
1704: do {
1705: if (drainpp == NULL) {
1.145 ad 1706: drainpp = TAILQ_FIRST(&pool_head);
1.134 ad 1707: }
1708: if (drainpp != NULL) {
1709: pp = drainpp;
1.145 ad 1710: drainpp = TAILQ_NEXT(pp, pr_poollist);
1.134 ad 1711: }
1712: /*
1713: * Skip completely idle pools. We depend on at least
1714: * one pool in the system being active.
1715: */
1716: } while (pp == NULL || pp->pr_npages == 0);
1717: pp->pr_refcnt++;
1718: mutex_exit(&pool_head_lock);
1719:
1720: /* If there is a pool_cache, drain CPU level caches. */
1721: *ppp = pp;
1722: if (pp->pr_cache != NULL) {
1723: *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall,
1724: pp->pr_cache, NULL);
1725: }
1726: }
1727:
1728: void
1729: pool_drain_end(struct pool *pp, uint64_t where)
1730: {
1731:
1732: if (pp == NULL)
1733: return;
1734:
1735: KASSERT(pp->pr_refcnt > 0);
1736:
1737: /* Wait for remote draining to complete. */
1738: if (pp->pr_cache != NULL)
1739: xc_wait(where);
1740:
1741: /* Drain the cache (if any) and pool.. */
1742: pool_reclaim(pp);
1743:
1744: /* Finally, unlock the pool. */
1745: mutex_enter(&pool_head_lock);
1746: pp->pr_refcnt--;
1747: cv_broadcast(&pool_busy);
1748: mutex_exit(&pool_head_lock);
1.3 pk 1749: }
1750:
1751: /*
1752: * Diagnostic helpers.
1753: */
1754: void
1.42 thorpej 1755: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1756: {
1757:
1.25 thorpej 1758: pool_print1(pp, modif, printf);
1.21 thorpej 1759: }
1760:
1.25 thorpej 1761: void
1.108 yamt 1762: pool_printall(const char *modif, void (*pr)(const char *, ...))
1763: {
1764: struct pool *pp;
1765:
1.145 ad 1766: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.108 yamt 1767: pool_printit(pp, modif, pr);
1768: }
1769: }
1770:
1771: void
1.42 thorpej 1772: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1773: {
1774:
1775: if (pp == NULL) {
1776: (*pr)("Must specify a pool to print.\n");
1777: return;
1778: }
1779:
1780: pool_print1(pp, modif, pr);
1781: }
1782:
1.21 thorpej 1783: static void
1.124 yamt 1784: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1785: void (*pr)(const char *, ...))
1.88 chs 1786: {
1787: struct pool_item_header *ph;
1788: #ifdef DIAGNOSTIC
1789: struct pool_item *pi;
1790: #endif
1791:
1792: LIST_FOREACH(ph, pl, ph_pagelist) {
1.151 yamt 1793: (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1794: ph->ph_page, ph->ph_nmissing, ph->ph_time);
1.88 chs 1795: #ifdef DIAGNOSTIC
1.97 yamt 1796: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1797: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1798: if (pi->pi_magic != PI_MAGIC) {
1799: (*pr)("\t\t\titem %p, magic 0x%x\n",
1800: pi, pi->pi_magic);
1801: }
1.88 chs 1802: }
1803: }
1804: #endif
1805: }
1806: }
1807:
1808: static void
1.42 thorpej 1809: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1810: {
1.25 thorpej 1811: struct pool_item_header *ph;
1.134 ad 1812: pool_cache_t pc;
1813: pcg_t *pcg;
1814: pool_cache_cpu_t *cc;
1815: uint64_t cpuhit, cpumiss;
1.44 thorpej 1816: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1817: char c;
1818:
1819: while ((c = *modif++) != '\0') {
1820: if (c == 'l')
1821: print_log = 1;
1822: if (c == 'p')
1823: print_pagelist = 1;
1.44 thorpej 1824: if (c == 'c')
1825: print_cache = 1;
1.25 thorpej 1826: }
1827:
1.134 ad 1828: if ((pc = pp->pr_cache) != NULL) {
1829: (*pr)("POOL CACHE");
1830: } else {
1831: (*pr)("POOL");
1832: }
1833:
1834: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1835: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1836: pp->pr_roflags);
1.66 thorpej 1837: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1838: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1839: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1840: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1841: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1842:
1.134 ad 1843: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1844: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1845: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1846: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1847:
1848: if (print_pagelist == 0)
1849: goto skip_pagelist;
1850:
1.88 chs 1851: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1852: (*pr)("\n\tempty page list:\n");
1.97 yamt 1853: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1854: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1855: (*pr)("\n\tfull page list:\n");
1.97 yamt 1856: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1857: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1858: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1859: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1860:
1.25 thorpej 1861: if (pp->pr_curpage == NULL)
1862: (*pr)("\tno current page\n");
1863: else
1864: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1865:
1866: skip_pagelist:
1867: if (print_log == 0)
1868: goto skip_log;
1869:
1870: (*pr)("\n");
1871: if ((pp->pr_roflags & PR_LOGGING) == 0)
1872: (*pr)("\tno log\n");
1.122 christos 1873: else {
1.25 thorpej 1874: pr_printlog(pp, NULL, pr);
1.122 christos 1875: }
1.3 pk 1876:
1.25 thorpej 1877: skip_log:
1.44 thorpej 1878:
1.102 chs 1879: #define PR_GROUPLIST(pcg) \
1880: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1.142 ad 1881: for (i = 0; i < pcg->pcg_size; i++) { \
1.102 chs 1882: if (pcg->pcg_objects[i].pcgo_pa != \
1883: POOL_PADDR_INVALID) { \
1884: (*pr)("\t\t\t%p, 0x%llx\n", \
1885: pcg->pcg_objects[i].pcgo_va, \
1886: (unsigned long long) \
1887: pcg->pcg_objects[i].pcgo_pa); \
1888: } else { \
1889: (*pr)("\t\t\t%p\n", \
1890: pcg->pcg_objects[i].pcgo_va); \
1891: } \
1892: }
1893:
1.134 ad 1894: if (pc != NULL) {
1895: cpuhit = 0;
1896: cpumiss = 0;
1897: for (i = 0; i < MAXCPUS; i++) {
1898: if ((cc = pc->pc_cpus[i]) == NULL)
1899: continue;
1900: cpuhit += cc->cc_hits;
1901: cpumiss += cc->cc_misses;
1902: }
1903: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1904: (*pr)("\tcache layer hits %llu misses %llu\n",
1905: pc->pc_hits, pc->pc_misses);
1906: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1907: pc->pc_hits + pc->pc_misses - pc->pc_contended,
1908: pc->pc_contended);
1909: (*pr)("\tcache layer empty groups %u full groups %u\n",
1910: pc->pc_nempty, pc->pc_nfull);
1911: if (print_cache) {
1912: (*pr)("\tfull cache groups:\n");
1913: for (pcg = pc->pc_fullgroups; pcg != NULL;
1914: pcg = pcg->pcg_next) {
1915: PR_GROUPLIST(pcg);
1916: }
1917: (*pr)("\tempty cache groups:\n");
1918: for (pcg = pc->pc_emptygroups; pcg != NULL;
1919: pcg = pcg->pcg_next) {
1920: PR_GROUPLIST(pcg);
1921: }
1.103 chs 1922: }
1.44 thorpej 1923: }
1.102 chs 1924: #undef PR_GROUPLIST
1.44 thorpej 1925:
1.88 chs 1926: pr_enter_check(pp, pr);
1927: }
1928:
1929: static int
1930: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1931: {
1932: struct pool_item *pi;
1.128 christos 1933: void *page;
1.88 chs 1934: int n;
1935:
1.121 yamt 1936: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1937: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1938: if (page != ph->ph_page &&
1939: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1940: if (label != NULL)
1941: printf("%s: ", label);
1942: printf("pool(%p:%s): page inconsistency: page %p;"
1943: " at page head addr %p (p %p)\n", pp,
1944: pp->pr_wchan, ph->ph_page,
1945: ph, page);
1946: return 1;
1947: }
1.88 chs 1948: }
1.3 pk 1949:
1.97 yamt 1950: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1951: return 0;
1952:
1.102 chs 1953: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1954: pi != NULL;
1.102 chs 1955: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1956:
1957: #ifdef DIAGNOSTIC
1958: if (pi->pi_magic != PI_MAGIC) {
1959: if (label != NULL)
1960: printf("%s: ", label);
1961: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1962: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1963: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1964: n, pi);
1.88 chs 1965: panic("pool");
1966: }
1967: #endif
1.121 yamt 1968: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1969: continue;
1970: }
1.128 christos 1971: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1972: if (page == ph->ph_page)
1973: continue;
1974:
1975: if (label != NULL)
1976: printf("%s: ", label);
1977: printf("pool(%p:%s): page inconsistency: page %p;"
1978: " item ordinal %d; addr %p (p %p)\n", pp,
1979: pp->pr_wchan, ph->ph_page,
1980: n, pi, page);
1981: return 1;
1982: }
1983: return 0;
1.3 pk 1984: }
1985:
1.88 chs 1986:
1.3 pk 1987: int
1.42 thorpej 1988: pool_chk(struct pool *pp, const char *label)
1.3 pk 1989: {
1990: struct pool_item_header *ph;
1991: int r = 0;
1992:
1.134 ad 1993: mutex_enter(&pp->pr_lock);
1.88 chs 1994: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1995: r = pool_chk_page(pp, label, ph);
1996: if (r) {
1997: goto out;
1998: }
1999: }
2000: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2001: r = pool_chk_page(pp, label, ph);
2002: if (r) {
1.3 pk 2003: goto out;
2004: }
1.88 chs 2005: }
2006: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2007: r = pool_chk_page(pp, label, ph);
2008: if (r) {
1.3 pk 2009: goto out;
2010: }
2011: }
1.88 chs 2012:
1.3 pk 2013: out:
1.134 ad 2014: mutex_exit(&pp->pr_lock);
1.3 pk 2015: return (r);
1.43 thorpej 2016: }
2017:
2018: /*
2019: * pool_cache_init:
2020: *
2021: * Initialize a pool cache.
1.134 ad 2022: */
2023: pool_cache_t
2024: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
2025: const char *wchan, struct pool_allocator *palloc, int ipl,
2026: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
2027: {
2028: pool_cache_t pc;
2029:
2030: pc = pool_get(&cache_pool, PR_WAITOK);
2031: if (pc == NULL)
2032: return NULL;
2033:
2034: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
2035: palloc, ipl, ctor, dtor, arg);
2036:
2037: return pc;
2038: }
2039:
2040: /*
2041: * pool_cache_bootstrap:
1.43 thorpej 2042: *
1.134 ad 2043: * Kernel-private version of pool_cache_init(). The caller
2044: * provides initial storage.
1.43 thorpej 2045: */
2046: void
1.134 ad 2047: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
2048: u_int align_offset, u_int flags, const char *wchan,
2049: struct pool_allocator *palloc, int ipl,
2050: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 2051: void *arg)
2052: {
1.134 ad 2053: CPU_INFO_ITERATOR cii;
1.145 ad 2054: pool_cache_t pc1;
1.134 ad 2055: struct cpu_info *ci;
2056: struct pool *pp;
2057:
2058: pp = &pc->pc_pool;
2059: if (palloc == NULL && ipl == IPL_NONE)
2060: palloc = &pool_allocator_nointr;
2061: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.156.2.1! yamt 2062: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1.43 thorpej 2063:
1.134 ad 2064: if (ctor == NULL) {
2065: ctor = (int (*)(void *, void *, int))nullop;
2066: }
2067: if (dtor == NULL) {
2068: dtor = (void (*)(void *, void *))nullop;
2069: }
1.43 thorpej 2070:
1.134 ad 2071: pc->pc_emptygroups = NULL;
2072: pc->pc_fullgroups = NULL;
2073: pc->pc_partgroups = NULL;
1.43 thorpej 2074: pc->pc_ctor = ctor;
2075: pc->pc_dtor = dtor;
2076: pc->pc_arg = arg;
1.134 ad 2077: pc->pc_hits = 0;
1.48 thorpej 2078: pc->pc_misses = 0;
1.134 ad 2079: pc->pc_nempty = 0;
2080: pc->pc_npart = 0;
2081: pc->pc_nfull = 0;
2082: pc->pc_contended = 0;
2083: pc->pc_refcnt = 0;
1.136 yamt 2084: pc->pc_freecheck = NULL;
1.134 ad 2085:
1.142 ad 2086: if ((flags & PR_LARGECACHE) != 0) {
2087: pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
2088: } else {
2089: pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
2090: }
2091:
1.134 ad 2092: /* Allocate per-CPU caches. */
2093: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
2094: pc->pc_ncpu = 0;
1.139 ad 2095: if (ncpu < 2) {
1.137 ad 2096: /* XXX For sparc: boot CPU is not attached yet. */
2097: pool_cache_cpu_init1(curcpu(), pc);
2098: } else {
2099: for (CPU_INFO_FOREACH(cii, ci)) {
2100: pool_cache_cpu_init1(ci, pc);
2101: }
1.134 ad 2102: }
1.145 ad 2103:
2104: /* Add to list of all pools. */
2105: if (__predict_true(!cold))
1.134 ad 2106: mutex_enter(&pool_head_lock);
1.145 ad 2107: TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
2108: if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
2109: break;
2110: }
2111: if (pc1 == NULL)
2112: TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
2113: else
2114: TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
2115: if (__predict_true(!cold))
1.134 ad 2116: mutex_exit(&pool_head_lock);
1.145 ad 2117:
2118: membar_sync();
2119: pp->pr_cache = pc;
1.43 thorpej 2120: }
2121:
2122: /*
2123: * pool_cache_destroy:
2124: *
2125: * Destroy a pool cache.
2126: */
2127: void
1.134 ad 2128: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 2129: {
1.134 ad 2130: struct pool *pp = &pc->pc_pool;
2131: pool_cache_cpu_t *cc;
2132: pcg_t *pcg;
2133: int i;
2134:
2135: /* Remove it from the global list. */
2136: mutex_enter(&pool_head_lock);
2137: while (pc->pc_refcnt != 0)
2138: cv_wait(&pool_busy, &pool_head_lock);
1.145 ad 2139: TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1.134 ad 2140: mutex_exit(&pool_head_lock);
1.43 thorpej 2141:
2142: /* First, invalidate the entire cache. */
2143: pool_cache_invalidate(pc);
2144:
1.134 ad 2145: /* Disassociate it from the pool. */
2146: mutex_enter(&pp->pr_lock);
2147: pp->pr_cache = NULL;
2148: mutex_exit(&pp->pr_lock);
2149:
2150: /* Destroy per-CPU data */
2151: for (i = 0; i < MAXCPUS; i++) {
2152: if ((cc = pc->pc_cpus[i]) == NULL)
2153: continue;
2154: if ((pcg = cc->cc_current) != NULL) {
2155: pcg->pcg_next = NULL;
2156: pool_cache_invalidate_groups(pc, pcg);
2157: }
2158: if ((pcg = cc->cc_previous) != NULL) {
2159: pcg->pcg_next = NULL;
2160: pool_cache_invalidate_groups(pc, pcg);
2161: }
2162: if (cc != &pc->pc_cpu0)
2163: pool_put(&cache_cpu_pool, cc);
2164: }
2165:
2166: /* Finally, destroy it. */
2167: mutex_destroy(&pc->pc_lock);
2168: pool_destroy(pp);
2169: pool_put(&cache_pool, pc);
2170: }
2171:
2172: /*
2173: * pool_cache_cpu_init1:
2174: *
2175: * Called for each pool_cache whenever a new CPU is attached.
2176: */
2177: static void
2178: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
2179: {
2180: pool_cache_cpu_t *cc;
1.137 ad 2181: int index;
1.134 ad 2182:
1.137 ad 2183: index = ci->ci_index;
2184:
2185: KASSERT(index < MAXCPUS);
1.134 ad 2186:
1.137 ad 2187: if ((cc = pc->pc_cpus[index]) != NULL) {
2188: KASSERT(cc->cc_cpuindex == index);
1.134 ad 2189: return;
2190: }
2191:
2192: /*
2193: * The first CPU is 'free'. This needs to be the case for
2194: * bootstrap - we may not be able to allocate yet.
2195: */
2196: if (pc->pc_ncpu == 0) {
2197: cc = &pc->pc_cpu0;
2198: pc->pc_ncpu = 1;
2199: } else {
2200: mutex_enter(&pc->pc_lock);
2201: pc->pc_ncpu++;
2202: mutex_exit(&pc->pc_lock);
2203: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
2204: }
2205:
2206: cc->cc_ipl = pc->pc_pool.pr_ipl;
2207: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
2208: cc->cc_cache = pc;
1.137 ad 2209: cc->cc_cpuindex = index;
1.134 ad 2210: cc->cc_hits = 0;
2211: cc->cc_misses = 0;
2212: cc->cc_current = NULL;
2213: cc->cc_previous = NULL;
2214:
1.137 ad 2215: pc->pc_cpus[index] = cc;
1.43 thorpej 2216: }
2217:
1.134 ad 2218: /*
2219: * pool_cache_cpu_init:
2220: *
2221: * Called whenever a new CPU is attached.
2222: */
2223: void
2224: pool_cache_cpu_init(struct cpu_info *ci)
1.43 thorpej 2225: {
1.134 ad 2226: pool_cache_t pc;
2227:
2228: mutex_enter(&pool_head_lock);
1.145 ad 2229: TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1.134 ad 2230: pc->pc_refcnt++;
2231: mutex_exit(&pool_head_lock);
1.43 thorpej 2232:
1.134 ad 2233: pool_cache_cpu_init1(ci, pc);
1.43 thorpej 2234:
1.134 ad 2235: mutex_enter(&pool_head_lock);
2236: pc->pc_refcnt--;
2237: cv_broadcast(&pool_busy);
2238: }
2239: mutex_exit(&pool_head_lock);
1.43 thorpej 2240: }
2241:
1.134 ad 2242: /*
2243: * pool_cache_reclaim:
2244: *
2245: * Reclaim memory from a pool cache.
2246: */
2247: bool
2248: pool_cache_reclaim(pool_cache_t pc)
1.43 thorpej 2249: {
2250:
1.134 ad 2251: return pool_reclaim(&pc->pc_pool);
2252: }
1.43 thorpej 2253:
1.136 yamt 2254: static void
2255: pool_cache_destruct_object1(pool_cache_t pc, void *object)
2256: {
2257:
2258: (*pc->pc_dtor)(pc->pc_arg, object);
2259: pool_put(&pc->pc_pool, object);
2260: }
2261:
1.134 ad 2262: /*
2263: * pool_cache_destruct_object:
2264: *
2265: * Force destruction of an object and its release back into
2266: * the pool.
2267: */
2268: void
2269: pool_cache_destruct_object(pool_cache_t pc, void *object)
2270: {
2271:
1.136 yamt 2272: FREECHECK_IN(&pc->pc_freecheck, object);
2273:
2274: pool_cache_destruct_object1(pc, object);
1.43 thorpej 2275: }
2276:
1.134 ad 2277: /*
2278: * pool_cache_invalidate_groups:
2279: *
2280: * Invalidate a chain of groups and destruct all objects.
2281: */
1.102 chs 2282: static void
1.134 ad 2283: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1.102 chs 2284: {
1.134 ad 2285: void *object;
2286: pcg_t *next;
2287: int i;
2288:
2289: for (; pcg != NULL; pcg = next) {
2290: next = pcg->pcg_next;
2291:
2292: for (i = 0; i < pcg->pcg_avail; i++) {
2293: object = pcg->pcg_objects[i].pcgo_va;
1.136 yamt 2294: pool_cache_destruct_object1(pc, object);
1.134 ad 2295: }
1.102 chs 2296:
1.142 ad 2297: if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
2298: pool_put(&pcg_large_pool, pcg);
2299: } else {
2300: KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
2301: pool_put(&pcg_normal_pool, pcg);
2302: }
1.102 chs 2303: }
2304: }
2305:
1.43 thorpej 2306: /*
1.134 ad 2307: * pool_cache_invalidate:
1.43 thorpej 2308: *
1.134 ad 2309: * Invalidate a pool cache (destruct and release all of the
2310: * cached objects). Does not reclaim objects from the pool.
1.43 thorpej 2311: */
1.134 ad 2312: void
2313: pool_cache_invalidate(pool_cache_t pc)
2314: {
2315: pcg_t *full, *empty, *part;
2316:
2317: mutex_enter(&pc->pc_lock);
2318: full = pc->pc_fullgroups;
2319: empty = pc->pc_emptygroups;
2320: part = pc->pc_partgroups;
2321: pc->pc_fullgroups = NULL;
2322: pc->pc_emptygroups = NULL;
2323: pc->pc_partgroups = NULL;
2324: pc->pc_nfull = 0;
2325: pc->pc_nempty = 0;
2326: pc->pc_npart = 0;
2327: mutex_exit(&pc->pc_lock);
2328:
2329: pool_cache_invalidate_groups(pc, full);
2330: pool_cache_invalidate_groups(pc, empty);
2331: pool_cache_invalidate_groups(pc, part);
2332: }
2333:
2334: void
2335: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2336: {
2337:
2338: pool_set_drain_hook(&pc->pc_pool, fn, arg);
2339: }
2340:
2341: void
2342: pool_cache_setlowat(pool_cache_t pc, int n)
2343: {
2344:
2345: pool_setlowat(&pc->pc_pool, n);
2346: }
2347:
2348: void
2349: pool_cache_sethiwat(pool_cache_t pc, int n)
2350: {
2351:
2352: pool_sethiwat(&pc->pc_pool, n);
2353: }
2354:
2355: void
2356: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2357: {
2358:
2359: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2360: }
2361:
2362: static inline pool_cache_cpu_t *
2363: pool_cache_cpu_enter(pool_cache_t pc, int *s)
2364: {
2365: pool_cache_cpu_t *cc;
2366:
2367: /*
2368: * Prevent other users of the cache from accessing our
2369: * CPU-local data. To avoid touching shared state, we
2370: * pull the neccessary information from CPU local data.
2371: */
1.156.2.1! yamt 2372: KPREEMPT_DISABLE(curlwp);
1.137 ad 2373: cc = pc->pc_cpus[curcpu()->ci_index];
1.134 ad 2374: KASSERT(cc->cc_cache == pc);
1.137 ad 2375: if (cc->cc_ipl != IPL_NONE) {
1.134 ad 2376: *s = splraiseipl(cc->cc_iplcookie);
2377: }
2378:
2379: return cc;
2380: }
2381:
2382: static inline void
2383: pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s)
2384: {
2385:
2386: /* No longer need exclusive access to the per-CPU data. */
1.137 ad 2387: if (cc->cc_ipl != IPL_NONE) {
1.134 ad 2388: splx(*s);
2389: }
1.156.2.1! yamt 2390: KPREEMPT_ENABLE(curlwp);
1.134 ad 2391: }
2392:
2393: #if __GNUC_PREREQ__(3, 0)
2394: __attribute ((noinline))
2395: #endif
2396: pool_cache_cpu_t *
2397: pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp,
2398: paddr_t *pap, int flags)
1.43 thorpej 2399: {
1.134 ad 2400: pcg_t *pcg, *cur;
2401: uint64_t ncsw;
2402: pool_cache_t pc;
1.43 thorpej 2403: void *object;
1.58 thorpej 2404:
1.134 ad 2405: pc = cc->cc_cache;
2406: cc->cc_misses++;
1.43 thorpej 2407:
1.134 ad 2408: /*
2409: * Nothing was available locally. Try and grab a group
2410: * from the cache.
2411: */
2412: if (!mutex_tryenter(&pc->pc_lock)) {
2413: ncsw = curlwp->l_ncsw;
2414: mutex_enter(&pc->pc_lock);
2415: pc->pc_contended++;
1.43 thorpej 2416:
1.134 ad 2417: /*
2418: * If we context switched while locking, then
2419: * our view of the per-CPU data is invalid:
2420: * retry.
2421: */
2422: if (curlwp->l_ncsw != ncsw) {
2423: mutex_exit(&pc->pc_lock);
2424: pool_cache_cpu_exit(cc, s);
2425: return pool_cache_cpu_enter(pc, s);
1.43 thorpej 2426: }
1.102 chs 2427: }
1.43 thorpej 2428:
1.134 ad 2429: if ((pcg = pc->pc_fullgroups) != NULL) {
1.43 thorpej 2430: /*
1.134 ad 2431: * If there's a full group, release our empty
2432: * group back to the cache. Install the full
2433: * group as cc_current and return.
1.43 thorpej 2434: */
1.134 ad 2435: if ((cur = cc->cc_current) != NULL) {
2436: KASSERT(cur->pcg_avail == 0);
2437: cur->pcg_next = pc->pc_emptygroups;
2438: pc->pc_emptygroups = cur;
2439: pc->pc_nempty++;
1.87 thorpej 2440: }
1.142 ad 2441: KASSERT(pcg->pcg_avail == pcg->pcg_size);
1.134 ad 2442: cc->cc_current = pcg;
2443: pc->pc_fullgroups = pcg->pcg_next;
2444: pc->pc_hits++;
2445: pc->pc_nfull--;
2446: mutex_exit(&pc->pc_lock);
2447: return cc;
2448: }
2449:
2450: /*
2451: * Nothing available locally or in cache. Take the slow
2452: * path: fetch a new object from the pool and construct
2453: * it.
2454: */
2455: pc->pc_misses++;
2456: mutex_exit(&pc->pc_lock);
2457: pool_cache_cpu_exit(cc, s);
2458:
2459: object = pool_get(&pc->pc_pool, flags);
2460: *objectp = object;
2461: if (object == NULL)
2462: return NULL;
1.125 ad 2463:
1.134 ad 2464: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
2465: pool_put(&pc->pc_pool, object);
2466: *objectp = NULL;
2467: return NULL;
1.43 thorpej 2468: }
2469:
1.134 ad 2470: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2471: (pc->pc_pool.pr_align - 1)) == 0);
1.43 thorpej 2472:
1.134 ad 2473: if (pap != NULL) {
2474: #ifdef POOL_VTOPHYS
2475: *pap = POOL_VTOPHYS(object);
2476: #else
2477: *pap = POOL_PADDR_INVALID;
2478: #endif
1.102 chs 2479: }
1.43 thorpej 2480:
1.125 ad 2481: FREECHECK_OUT(&pc->pc_freecheck, object);
1.134 ad 2482: return NULL;
1.43 thorpej 2483: }
2484:
2485: /*
1.134 ad 2486: * pool_cache_get{,_paddr}:
1.43 thorpej 2487: *
1.134 ad 2488: * Get an object from a pool cache (optionally returning
2489: * the physical address of the object).
1.43 thorpej 2490: */
1.134 ad 2491: void *
2492: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.43 thorpej 2493: {
1.134 ad 2494: pool_cache_cpu_t *cc;
2495: pcg_t *pcg;
2496: void *object;
1.60 thorpej 2497: int s;
1.43 thorpej 2498:
1.134 ad 2499: #ifdef LOCKDEBUG
1.155 ad 2500: if (flags & PR_WAITOK) {
1.154 yamt 2501: ASSERT_SLEEPABLE();
1.155 ad 2502: }
1.134 ad 2503: #endif
1.125 ad 2504:
1.134 ad 2505: cc = pool_cache_cpu_enter(pc, &s);
2506: do {
2507: /* Try and allocate an object from the current group. */
2508: pcg = cc->cc_current;
2509: if (pcg != NULL && pcg->pcg_avail > 0) {
2510: object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2511: if (pap != NULL)
2512: *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
1.148 yamt 2513: #if defined(DIAGNOSTIC)
1.134 ad 2514: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
1.148 yamt 2515: #endif /* defined(DIAGNOSTIC) */
1.142 ad 2516: KASSERT(pcg->pcg_avail <= pcg->pcg_size);
1.134 ad 2517: KASSERT(object != NULL);
2518: cc->cc_hits++;
2519: pool_cache_cpu_exit(cc, &s);
2520: FREECHECK_OUT(&pc->pc_freecheck, object);
2521: return object;
1.43 thorpej 2522: }
2523:
2524: /*
1.134 ad 2525: * That failed. If the previous group isn't empty, swap
2526: * it with the current group and allocate from there.
1.43 thorpej 2527: */
1.134 ad 2528: pcg = cc->cc_previous;
2529: if (pcg != NULL && pcg->pcg_avail > 0) {
2530: cc->cc_previous = cc->cc_current;
2531: cc->cc_current = pcg;
2532: continue;
1.43 thorpej 2533: }
2534:
1.134 ad 2535: /*
2536: * Can't allocate from either group: try the slow path.
2537: * If get_slow() allocated an object for us, or if
2538: * no more objects are available, it will return NULL.
2539: * Otherwise, we need to retry.
2540: */
2541: cc = pool_cache_get_slow(cc, &s, &object, pap, flags);
2542: } while (cc != NULL);
1.43 thorpej 2543:
1.134 ad 2544: return object;
1.51 thorpej 2545: }
2546:
1.134 ad 2547: #if __GNUC_PREREQ__(3, 0)
2548: __attribute ((noinline))
2549: #endif
2550: pool_cache_cpu_t *
2551: pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa)
1.51 thorpej 2552: {
1.134 ad 2553: pcg_t *pcg, *cur;
2554: uint64_t ncsw;
2555: pool_cache_t pc;
1.142 ad 2556: u_int nobj;
1.51 thorpej 2557:
1.134 ad 2558: pc = cc->cc_cache;
2559: cc->cc_misses++;
1.43 thorpej 2560:
1.134 ad 2561: /*
2562: * No free slots locally. Try to grab an empty, unused
2563: * group from the cache.
2564: */
2565: if (!mutex_tryenter(&pc->pc_lock)) {
2566: ncsw = curlwp->l_ncsw;
2567: mutex_enter(&pc->pc_lock);
2568: pc->pc_contended++;
1.102 chs 2569:
1.134 ad 2570: /*
2571: * If we context switched while locking, then
2572: * our view of the per-CPU data is invalid:
2573: * retry.
2574: */
2575: if (curlwp->l_ncsw != ncsw) {
2576: mutex_exit(&pc->pc_lock);
2577: pool_cache_cpu_exit(cc, s);
2578: return pool_cache_cpu_enter(pc, s);
2579: }
2580: }
1.130 ad 2581:
1.134 ad 2582: if ((pcg = pc->pc_emptygroups) != NULL) {
2583: /*
2584: * If there's a empty group, release our full
2585: * group back to the cache. Install the empty
1.146 ad 2586: * group and return.
1.134 ad 2587: */
2588: KASSERT(pcg->pcg_avail == 0);
2589: pc->pc_emptygroups = pcg->pcg_next;
1.146 ad 2590: if (cc->cc_previous == NULL) {
2591: cc->cc_previous = pcg;
2592: } else {
2593: if ((cur = cc->cc_current) != NULL) {
2594: KASSERT(cur->pcg_avail == pcg->pcg_size);
2595: cur->pcg_next = pc->pc_fullgroups;
2596: pc->pc_fullgroups = cur;
2597: pc->pc_nfull++;
2598: }
2599: cc->cc_current = pcg;
2600: }
1.134 ad 2601: pc->pc_hits++;
2602: pc->pc_nempty--;
2603: mutex_exit(&pc->pc_lock);
2604: return cc;
1.102 chs 2605: }
1.105 christos 2606:
1.134 ad 2607: /*
2608: * Nothing available locally or in cache. Take the
2609: * slow path and try to allocate a new group that we
2610: * can release to.
2611: */
2612: pc->pc_misses++;
2613: mutex_exit(&pc->pc_lock);
2614: pool_cache_cpu_exit(cc, s);
1.105 christos 2615:
1.134 ad 2616: /*
2617: * If we can't allocate a new group, just throw the
2618: * object away.
2619: */
1.142 ad 2620: nobj = pc->pc_pcgsize;
1.146 ad 2621: if (pool_cache_disable) {
2622: pcg = NULL;
2623: } else if (nobj == PCG_NOBJECTS_LARGE) {
1.142 ad 2624: pcg = pool_get(&pcg_large_pool, PR_NOWAIT);
2625: } else {
2626: pcg = pool_get(&pcg_normal_pool, PR_NOWAIT);
2627: }
1.134 ad 2628: if (pcg == NULL) {
2629: pool_cache_destruct_object(pc, object);
2630: return NULL;
2631: }
2632: pcg->pcg_avail = 0;
1.142 ad 2633: pcg->pcg_size = nobj;
1.105 christos 2634:
1.134 ad 2635: /*
2636: * Add the empty group to the cache and try again.
2637: */
2638: mutex_enter(&pc->pc_lock);
2639: pcg->pcg_next = pc->pc_emptygroups;
2640: pc->pc_emptygroups = pcg;
2641: pc->pc_nempty++;
2642: mutex_exit(&pc->pc_lock);
1.103 chs 2643:
1.134 ad 2644: return pool_cache_cpu_enter(pc, s);
2645: }
1.102 chs 2646:
1.43 thorpej 2647: /*
1.134 ad 2648: * pool_cache_put{,_paddr}:
1.43 thorpej 2649: *
1.134 ad 2650: * Put an object back to the pool cache (optionally caching the
2651: * physical address of the object).
1.43 thorpej 2652: */
1.101 thorpej 2653: void
1.134 ad 2654: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2655: {
1.134 ad 2656: pool_cache_cpu_t *cc;
2657: pcg_t *pcg;
2658: int s;
1.101 thorpej 2659:
1.134 ad 2660: FREECHECK_IN(&pc->pc_freecheck, object);
1.101 thorpej 2661:
1.134 ad 2662: cc = pool_cache_cpu_enter(pc, &s);
2663: do {
2664: /* If the current group isn't full, release it there. */
2665: pcg = cc->cc_current;
1.142 ad 2666: if (pcg != NULL && pcg->pcg_avail < pcg->pcg_size) {
1.134 ad 2667: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2668: pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2669: pcg->pcg_avail++;
2670: cc->cc_hits++;
2671: pool_cache_cpu_exit(cc, &s);
2672: return;
2673: }
1.43 thorpej 2674:
1.134 ad 2675: /*
2676: * That failed. If the previous group is empty, swap
2677: * it with the current group and try again.
2678: */
2679: pcg = cc->cc_previous;
2680: if (pcg != NULL && pcg->pcg_avail == 0) {
2681: cc->cc_previous = cc->cc_current;
2682: cc->cc_current = pcg;
2683: continue;
2684: }
1.43 thorpej 2685:
1.134 ad 2686: /*
2687: * Can't free to either group: try the slow path.
2688: * If put_slow() releases the object for us, it
2689: * will return NULL. Otherwise we need to retry.
2690: */
2691: cc = pool_cache_put_slow(cc, &s, object, pa);
2692: } while (cc != NULL);
1.43 thorpej 2693: }
2694:
2695: /*
1.134 ad 2696: * pool_cache_xcall:
1.43 thorpej 2697: *
1.134 ad 2698: * Transfer objects from the per-CPU cache to the global cache.
2699: * Run within a cross-call thread.
1.43 thorpej 2700: */
2701: static void
1.134 ad 2702: pool_cache_xcall(pool_cache_t pc)
1.43 thorpej 2703: {
1.134 ad 2704: pool_cache_cpu_t *cc;
2705: pcg_t *prev, *cur, **list;
2706: int s = 0; /* XXXgcc */
2707:
2708: cc = pool_cache_cpu_enter(pc, &s);
2709: cur = cc->cc_current;
2710: cc->cc_current = NULL;
2711: prev = cc->cc_previous;
2712: cc->cc_previous = NULL;
2713: pool_cache_cpu_exit(cc, &s);
2714:
2715: /*
2716: * XXXSMP Go to splvm to prevent kernel_lock from being taken,
2717: * because locks at IPL_SOFTXXX are still spinlocks. Does not
2718: * apply to IPL_SOFTBIO. Cross-call threads do not take the
2719: * kernel_lock.
1.101 thorpej 2720: */
1.134 ad 2721: s = splvm();
2722: mutex_enter(&pc->pc_lock);
2723: if (cur != NULL) {
1.142 ad 2724: if (cur->pcg_avail == cur->pcg_size) {
1.134 ad 2725: list = &pc->pc_fullgroups;
2726: pc->pc_nfull++;
2727: } else if (cur->pcg_avail == 0) {
2728: list = &pc->pc_emptygroups;
2729: pc->pc_nempty++;
2730: } else {
2731: list = &pc->pc_partgroups;
2732: pc->pc_npart++;
2733: }
2734: cur->pcg_next = *list;
2735: *list = cur;
2736: }
2737: if (prev != NULL) {
1.142 ad 2738: if (prev->pcg_avail == prev->pcg_size) {
1.134 ad 2739: list = &pc->pc_fullgroups;
2740: pc->pc_nfull++;
2741: } else if (prev->pcg_avail == 0) {
2742: list = &pc->pc_emptygroups;
2743: pc->pc_nempty++;
2744: } else {
2745: list = &pc->pc_partgroups;
2746: pc->pc_npart++;
2747: }
2748: prev->pcg_next = *list;
2749: *list = prev;
2750: }
2751: mutex_exit(&pc->pc_lock);
2752: splx(s);
1.3 pk 2753: }
1.66 thorpej 2754:
2755: /*
2756: * Pool backend allocators.
2757: *
2758: * Each pool has a backend allocator that handles allocation, deallocation,
2759: * and any additional draining that might be needed.
2760: *
2761: * We provide two standard allocators:
2762: *
2763: * pool_allocator_kmem - the default when no allocator is specified
2764: *
2765: * pool_allocator_nointr - used for pools that will not be accessed
2766: * in interrupt context.
2767: */
2768: void *pool_page_alloc(struct pool *, int);
2769: void pool_page_free(struct pool *, void *);
2770:
1.112 bjh21 2771: #ifdef POOL_SUBPAGE
2772: struct pool_allocator pool_allocator_kmem_fullpage = {
2773: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2774: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2775: };
2776: #else
1.66 thorpej 2777: struct pool_allocator pool_allocator_kmem = {
2778: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2779: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2780: };
1.112 bjh21 2781: #endif
1.66 thorpej 2782:
2783: void *pool_page_alloc_nointr(struct pool *, int);
2784: void pool_page_free_nointr(struct pool *, void *);
2785:
1.112 bjh21 2786: #ifdef POOL_SUBPAGE
2787: struct pool_allocator pool_allocator_nointr_fullpage = {
2788: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2789: .pa_backingmapptr = &kernel_map,
1.112 bjh21 2790: };
2791: #else
1.66 thorpej 2792: struct pool_allocator pool_allocator_nointr = {
2793: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2794: .pa_backingmapptr = &kernel_map,
1.66 thorpej 2795: };
1.112 bjh21 2796: #endif
1.66 thorpej 2797:
2798: #ifdef POOL_SUBPAGE
2799: void *pool_subpage_alloc(struct pool *, int);
2800: void pool_subpage_free(struct pool *, void *);
2801:
1.112 bjh21 2802: struct pool_allocator pool_allocator_kmem = {
2803: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2804: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2805: };
2806:
2807: void *pool_subpage_alloc_nointr(struct pool *, int);
2808: void pool_subpage_free_nointr(struct pool *, void *);
2809:
2810: struct pool_allocator pool_allocator_nointr = {
2811: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2812: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2813: };
2814: #endif /* POOL_SUBPAGE */
2815:
1.117 yamt 2816: static void *
2817: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2818: {
1.117 yamt 2819: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2820: void *res;
2821:
1.117 yamt 2822: res = (*pa->pa_alloc)(pp, flags);
2823: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2824: /*
1.117 yamt 2825: * We only run the drain hook here if PR_NOWAIT.
2826: * In other cases, the hook will be run in
2827: * pool_reclaim().
1.66 thorpej 2828: */
1.117 yamt 2829: if (pp->pr_drain_hook != NULL) {
2830: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2831: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2832: }
1.117 yamt 2833: }
2834: return res;
1.66 thorpej 2835: }
2836:
1.117 yamt 2837: static void
1.66 thorpej 2838: pool_allocator_free(struct pool *pp, void *v)
2839: {
2840: struct pool_allocator *pa = pp->pr_alloc;
2841:
2842: (*pa->pa_free)(pp, v);
2843: }
2844:
2845: void *
1.124 yamt 2846: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2847: {
1.127 thorpej 2848: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2849:
1.100 yamt 2850: return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
1.66 thorpej 2851: }
2852:
2853: void
1.124 yamt 2854: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2855: {
2856:
1.98 yamt 2857: uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2858: }
2859:
2860: static void *
1.124 yamt 2861: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2862: {
1.127 thorpej 2863: bool waitok = (flags & PR_WAITOK) ? true : false;
1.98 yamt 2864:
1.100 yamt 2865: return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
1.98 yamt 2866: }
2867:
2868: static void
1.124 yamt 2869: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2870: {
2871:
1.100 yamt 2872: uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
1.66 thorpej 2873: }
2874:
2875: #ifdef POOL_SUBPAGE
2876: /* Sub-page allocator, for machines with large hardware pages. */
2877: void *
2878: pool_subpage_alloc(struct pool *pp, int flags)
2879: {
1.134 ad 2880: return pool_get(&psppool, flags);
1.66 thorpej 2881: }
2882:
2883: void
2884: pool_subpage_free(struct pool *pp, void *v)
2885: {
2886: pool_put(&psppool, v);
2887: }
2888:
2889: /* We don't provide a real nointr allocator. Maybe later. */
2890: void *
1.112 bjh21 2891: pool_subpage_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2892: {
2893:
2894: return (pool_subpage_alloc(pp, flags));
2895: }
2896:
2897: void
1.112 bjh21 2898: pool_subpage_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2899: {
2900:
2901: pool_subpage_free(pp, v);
2902: }
1.112 bjh21 2903: #endif /* POOL_SUBPAGE */
1.66 thorpej 2904: void *
1.124 yamt 2905: pool_page_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2906: {
1.127 thorpej 2907: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2908:
1.100 yamt 2909: return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
1.66 thorpej 2910: }
2911:
2912: void
1.124 yamt 2913: pool_page_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2914: {
2915:
1.98 yamt 2916: uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
1.66 thorpej 2917: }
1.141 yamt 2918:
2919: #if defined(DDB)
2920: static bool
2921: pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2922: {
2923:
2924: return (uintptr_t)ph->ph_page <= addr &&
2925: addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2926: }
2927:
1.143 yamt 2928: static bool
2929: pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2930: {
2931:
2932: return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2933: }
2934:
2935: static bool
2936: pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2937: {
2938: int i;
2939:
2940: if (pcg == NULL) {
2941: return false;
2942: }
1.144 yamt 2943: for (i = 0; i < pcg->pcg_avail; i++) {
1.143 yamt 2944: if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2945: return true;
2946: }
2947: }
2948: return false;
2949: }
2950:
2951: static bool
2952: pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2953: {
2954:
2955: if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2956: unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2957: pool_item_bitmap_t *bitmap =
2958: ph->ph_bitmap + (idx / BITMAP_SIZE);
2959: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2960:
2961: return (*bitmap & mask) == 0;
2962: } else {
2963: struct pool_item *pi;
2964:
2965: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2966: if (pool_in_item(pp, pi, addr)) {
2967: return false;
2968: }
2969: }
2970: return true;
2971: }
2972: }
2973:
1.141 yamt 2974: void
2975: pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2976: {
2977: struct pool *pp;
2978:
1.145 ad 2979: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.141 yamt 2980: struct pool_item_header *ph;
2981: uintptr_t item;
1.143 yamt 2982: bool allocated = true;
2983: bool incache = false;
2984: bool incpucache = false;
2985: char cpucachestr[32];
1.141 yamt 2986:
2987: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2988: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2989: if (pool_in_page(pp, ph, addr)) {
2990: goto found;
2991: }
2992: }
2993: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2994: if (pool_in_page(pp, ph, addr)) {
1.143 yamt 2995: allocated =
2996: pool_allocated(pp, ph, addr);
2997: goto found;
2998: }
2999: }
3000: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
3001: if (pool_in_page(pp, ph, addr)) {
3002: allocated = false;
1.141 yamt 3003: goto found;
3004: }
3005: }
3006: continue;
3007: } else {
3008: ph = pr_find_pagehead_noalign(pp, (void *)addr);
3009: if (ph == NULL || !pool_in_page(pp, ph, addr)) {
3010: continue;
3011: }
1.143 yamt 3012: allocated = pool_allocated(pp, ph, addr);
1.141 yamt 3013: }
3014: found:
1.143 yamt 3015: if (allocated && pp->pr_cache) {
3016: pool_cache_t pc = pp->pr_cache;
3017: struct pool_cache_group *pcg;
3018: int i;
3019:
3020: for (pcg = pc->pc_fullgroups; pcg != NULL;
3021: pcg = pcg->pcg_next) {
3022: if (pool_in_cg(pp, pcg, addr)) {
3023: incache = true;
3024: goto print;
3025: }
3026: }
3027: for (i = 0; i < MAXCPUS; i++) {
3028: pool_cache_cpu_t *cc;
3029:
3030: if ((cc = pc->pc_cpus[i]) == NULL) {
3031: continue;
3032: }
3033: if (pool_in_cg(pp, cc->cc_current, addr) ||
3034: pool_in_cg(pp, cc->cc_previous, addr)) {
3035: struct cpu_info *ci =
3036: cpu_lookup_byindex(i);
3037:
3038: incpucache = true;
3039: snprintf(cpucachestr,
3040: sizeof(cpucachestr),
3041: "cached by CPU %u",
1.153 martin 3042: ci->ci_index);
1.143 yamt 3043: goto print;
3044: }
3045: }
3046: }
3047: print:
1.141 yamt 3048: item = (uintptr_t)ph->ph_page + ph->ph_off;
3049: item = item + rounddown(addr - item, pp->pr_size);
1.143 yamt 3050: (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
1.141 yamt 3051: (void *)addr, item, (size_t)(addr - item),
1.143 yamt 3052: pp->pr_wchan,
3053: incpucache ? cpucachestr :
3054: incache ? "cached" : allocated ? "allocated" : "free");
1.141 yamt 3055: }
3056: }
3057: #endif /* defined(DDB) */
CVSweb <webmaster@jp.NetBSD.org>