Annotation of src/sys/kern/subr_pool.c, Revision 1.141
1.141 ! yamt 1: /* $NetBSD: subr_pool.c,v 1.140 2007/12/13 01:22:50 yamt Exp $ */
1.1 pk 2:
3: /*-
1.134 ad 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.134 ad 9: * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.141 ! yamt 41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.140 2007/12/13 01:22:50 yamt Exp $");
1.24 scottr 42:
1.141 ! yamt 43: #include "opt_ddb.h"
1.25 thorpej 44: #include "opt_pool.h"
1.24 scottr 45: #include "opt_poollog.h"
1.28 thorpej 46: #include "opt_lockdebug.h"
1.1 pk 47:
48: #include <sys/param.h>
49: #include <sys/systm.h>
1.135 yamt 50: #include <sys/bitops.h>
1.1 pk 51: #include <sys/proc.h>
52: #include <sys/errno.h>
53: #include <sys/kernel.h>
54: #include <sys/malloc.h>
55: #include <sys/lock.h>
56: #include <sys/pool.h>
1.20 thorpej 57: #include <sys/syslog.h>
1.125 ad 58: #include <sys/debug.h>
1.134 ad 59: #include <sys/lockdebug.h>
60: #include <sys/xcall.h>
61: #include <sys/cpu.h>
1.3 pk 62:
63: #include <uvm/uvm.h>
64:
1.1 pk 65: /*
66: * Pool resource management utility.
1.3 pk 67: *
1.88 chs 68: * Memory is allocated in pages which are split into pieces according to
69: * the pool item size. Each page is kept on one of three lists in the
70: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
71: * for empty, full and partially-full pages respectively. The individual
72: * pool items are on a linked list headed by `ph_itemlist' in each page
73: * header. The memory for building the page list is either taken from
74: * the allocated pages themselves (for small pool items) or taken from
75: * an internal pool of page headers (`phpool').
1.1 pk 76: */
77:
1.3 pk 78: /* List of all pools */
1.102 chs 79: LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
1.3 pk 80:
1.134 ad 81: /* List of all caches. */
82: LIST_HEAD(,pool_cache) pool_cache_head =
83: LIST_HEAD_INITIALIZER(pool_cache_head);
84:
1.3 pk 85: /* Private pool for page header structures */
1.97 yamt 86: #define PHPOOL_MAX 8
87: static struct pool phpool[PHPOOL_MAX];
1.135 yamt 88: #define PHPOOL_FREELIST_NELEM(idx) \
89: (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
1.3 pk 90:
1.62 bjh21 91: #ifdef POOL_SUBPAGE
92: /* Pool of subpages for use by normal pools. */
93: static struct pool psppool;
94: #endif
95:
1.117 yamt 96: static SLIST_HEAD(, pool_allocator) pa_deferinitq =
97: SLIST_HEAD_INITIALIZER(pa_deferinitq);
98:
1.98 yamt 99: static void *pool_page_alloc_meta(struct pool *, int);
100: static void pool_page_free_meta(struct pool *, void *);
101:
102: /* allocator for pool metadata */
1.134 ad 103: struct pool_allocator pool_allocator_meta = {
1.117 yamt 104: pool_page_alloc_meta, pool_page_free_meta,
105: .pa_backingmapptr = &kmem_map,
1.98 yamt 106: };
107:
1.3 pk 108: /* # of seconds to retain page after last use */
109: int pool_inactive_time = 10;
110:
111: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 112: static struct pool *drainpp;
113:
1.134 ad 114: /* This lock protects both pool_head and drainpp. */
115: static kmutex_t pool_head_lock;
116: static kcondvar_t pool_busy;
1.3 pk 117:
1.135 yamt 118: typedef uint32_t pool_item_bitmap_t;
119: #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
120: #define BITMAP_MASK (BITMAP_SIZE - 1)
1.99 yamt 121:
1.3 pk 122: struct pool_item_header {
123: /* Page headers */
1.88 chs 124: LIST_ENTRY(pool_item_header)
1.3 pk 125: ph_pagelist; /* pool page list */
1.88 chs 126: SPLAY_ENTRY(pool_item_header)
127: ph_node; /* Off-page page headers */
1.128 christos 128: void * ph_page; /* this page's address */
1.3 pk 129: struct timeval ph_time; /* last referenced */
1.135 yamt 130: uint16_t ph_nmissing; /* # of chunks in use */
1.141 ! yamt 131: uint16_t ph_off; /* start offset in page */
1.97 yamt 132: union {
133: /* !PR_NOTOUCH */
134: struct {
1.102 chs 135: LIST_HEAD(, pool_item)
1.97 yamt 136: phu_itemlist; /* chunk list for this page */
137: } phu_normal;
138: /* PR_NOTOUCH */
139: struct {
1.141 ! yamt 140: pool_item_bitmap_t phu_bitmap[1];
1.97 yamt 141: } phu_notouch;
142: } ph_u;
1.3 pk 143: };
1.97 yamt 144: #define ph_itemlist ph_u.phu_normal.phu_itemlist
1.135 yamt 145: #define ph_bitmap ph_u.phu_notouch.phu_bitmap
1.3 pk 146:
1.1 pk 147: struct pool_item {
1.3 pk 148: #ifdef DIAGNOSTIC
1.82 thorpej 149: u_int pi_magic;
1.33 chs 150: #endif
1.134 ad 151: #define PI_MAGIC 0xdeaddeadU
1.3 pk 152: /* Other entries use only this list entry */
1.102 chs 153: LIST_ENTRY(pool_item) pi_list;
1.3 pk 154: };
155:
1.53 thorpej 156: #define POOL_NEEDS_CATCHUP(pp) \
157: ((pp)->pr_nitems < (pp)->pr_minitems)
158:
1.43 thorpej 159: /*
160: * Pool cache management.
161: *
162: * Pool caches provide a way for constructed objects to be cached by the
163: * pool subsystem. This can lead to performance improvements by avoiding
164: * needless object construction/destruction; it is deferred until absolutely
165: * necessary.
166: *
1.134 ad 167: * Caches are grouped into cache groups. Each cache group references up
168: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
169: * object from the pool, it calls the object's constructor and places it
170: * into a cache group. When a cache group frees an object back to the
171: * pool, it first calls the object's destructor. This allows the object
172: * to persist in constructed form while freed to the cache.
173: *
174: * The pool references each cache, so that when a pool is drained by the
175: * pagedaemon, it can drain each individual cache as well. Each time a
176: * cache is drained, the most idle cache group is freed to the pool in
177: * its entirety.
1.43 thorpej 178: *
179: * Pool caches are layed on top of pools. By layering them, we can avoid
180: * the complexity of cache management for pools which would not benefit
181: * from it.
182: */
183:
184: static struct pool pcgpool;
1.134 ad 185: static struct pool cache_pool;
186: static struct pool cache_cpu_pool;
1.3 pk 187:
1.134 ad 188: static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *,
189: void *, paddr_t);
190: static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *,
191: void **, paddr_t *, int);
192: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
193: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
194: static void pool_cache_xcall(pool_cache_t);
1.3 pk 195:
1.42 thorpej 196: static int pool_catchup(struct pool *);
1.128 christos 197: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 198: struct pool_item_header *);
1.88 chs 199: static void pool_update_curpage(struct pool *);
1.66 thorpej 200:
1.113 yamt 201: static int pool_grow(struct pool *, int);
1.117 yamt 202: static void *pool_allocator_alloc(struct pool *, int);
203: static void pool_allocator_free(struct pool *, void *);
1.3 pk 204:
1.97 yamt 205: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 206: void (*)(const char *, ...));
1.42 thorpej 207: static void pool_print1(struct pool *, const char *,
208: void (*)(const char *, ...));
1.3 pk 209:
1.88 chs 210: static int pool_chk_page(struct pool *, const char *,
211: struct pool_item_header *);
212:
1.3 pk 213: /*
1.52 thorpej 214: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 215: */
216: struct pool_log {
217: const char *pl_file;
218: long pl_line;
219: int pl_action;
1.25 thorpej 220: #define PRLOG_GET 1
221: #define PRLOG_PUT 2
1.3 pk 222: void *pl_addr;
1.1 pk 223: };
224:
1.86 matt 225: #ifdef POOL_DIAGNOSTIC
1.3 pk 226: /* Number of entries in pool log buffers */
1.17 thorpej 227: #ifndef POOL_LOGSIZE
228: #define POOL_LOGSIZE 10
229: #endif
230:
231: int pool_logsize = POOL_LOGSIZE;
1.1 pk 232:
1.110 perry 233: static inline void
1.42 thorpej 234: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 235: {
236: int n = pp->pr_curlogentry;
237: struct pool_log *pl;
238:
1.20 thorpej 239: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 240: return;
241:
242: /*
243: * Fill in the current entry. Wrap around and overwrite
244: * the oldest entry if necessary.
245: */
246: pl = &pp->pr_log[n];
247: pl->pl_file = file;
248: pl->pl_line = line;
249: pl->pl_action = action;
250: pl->pl_addr = v;
251: if (++n >= pp->pr_logsize)
252: n = 0;
253: pp->pr_curlogentry = n;
254: }
255:
256: static void
1.42 thorpej 257: pr_printlog(struct pool *pp, struct pool_item *pi,
258: void (*pr)(const char *, ...))
1.3 pk 259: {
260: int i = pp->pr_logsize;
261: int n = pp->pr_curlogentry;
262:
1.20 thorpej 263: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 264: return;
265:
266: /*
267: * Print all entries in this pool's log.
268: */
269: while (i-- > 0) {
270: struct pool_log *pl = &pp->pr_log[n];
271: if (pl->pl_action != 0) {
1.25 thorpej 272: if (pi == NULL || pi == pl->pl_addr) {
273: (*pr)("\tlog entry %d:\n", i);
274: (*pr)("\t\taction = %s, addr = %p\n",
275: pl->pl_action == PRLOG_GET ? "get" : "put",
276: pl->pl_addr);
277: (*pr)("\t\tfile: %s at line %lu\n",
278: pl->pl_file, pl->pl_line);
279: }
1.3 pk 280: }
281: if (++n >= pp->pr_logsize)
282: n = 0;
283: }
284: }
1.25 thorpej 285:
1.110 perry 286: static inline void
1.42 thorpej 287: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 288: {
289:
1.34 thorpej 290: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 291: printf("pool %s: reentrancy at file %s line %ld\n",
292: pp->pr_wchan, file, line);
293: printf(" previous entry at file %s line %ld\n",
294: pp->pr_entered_file, pp->pr_entered_line);
295: panic("pr_enter");
296: }
297:
298: pp->pr_entered_file = file;
299: pp->pr_entered_line = line;
300: }
301:
1.110 perry 302: static inline void
1.42 thorpej 303: pr_leave(struct pool *pp)
1.25 thorpej 304: {
305:
1.34 thorpej 306: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 307: printf("pool %s not entered?\n", pp->pr_wchan);
308: panic("pr_leave");
309: }
310:
311: pp->pr_entered_file = NULL;
312: pp->pr_entered_line = 0;
313: }
314:
1.110 perry 315: static inline void
1.42 thorpej 316: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 317: {
318:
319: if (pp->pr_entered_file != NULL)
320: (*pr)("\n\tcurrently entered from file %s line %ld\n",
321: pp->pr_entered_file, pp->pr_entered_line);
322: }
1.3 pk 323: #else
1.25 thorpej 324: #define pr_log(pp, v, action, file, line)
325: #define pr_printlog(pp, pi, pr)
326: #define pr_enter(pp, file, line)
327: #define pr_leave(pp)
328: #define pr_enter_check(pp, pr)
1.59 thorpej 329: #endif /* POOL_DIAGNOSTIC */
1.3 pk 330:
1.135 yamt 331: static inline unsigned int
1.97 yamt 332: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
333: const void *v)
334: {
335: const char *cp = v;
1.135 yamt 336: unsigned int idx;
1.97 yamt 337:
338: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 339: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 340: KASSERT(idx < pp->pr_itemsperpage);
341: return idx;
342: }
343:
1.110 perry 344: static inline void
1.97 yamt 345: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
346: void *obj)
347: {
1.135 yamt 348: unsigned int idx = pr_item_notouch_index(pp, ph, obj);
349: pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
350: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
1.97 yamt 351:
1.135 yamt 352: KASSERT((*bitmap & mask) == 0);
353: *bitmap |= mask;
1.97 yamt 354: }
355:
1.110 perry 356: static inline void *
1.97 yamt 357: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
358: {
1.135 yamt 359: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
360: unsigned int idx;
361: int i;
1.97 yamt 362:
1.135 yamt 363: for (i = 0; ; i++) {
364: int bit;
1.97 yamt 365:
1.135 yamt 366: KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
367: bit = ffs32(bitmap[i]);
368: if (bit) {
369: pool_item_bitmap_t mask;
370:
371: bit--;
372: idx = (i * BITMAP_SIZE) + bit;
373: mask = 1 << bit;
374: KASSERT((bitmap[i] & mask) != 0);
375: bitmap[i] &= ~mask;
376: break;
377: }
378: }
379: KASSERT(idx < pp->pr_itemsperpage);
1.128 christos 380: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 381: }
382:
1.135 yamt 383: static inline void
1.141 ! yamt 384: pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
1.135 yamt 385: {
386: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
387: const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
388: int i;
389:
390: for (i = 0; i < n; i++) {
391: bitmap[i] = (pool_item_bitmap_t)-1;
392: }
393: }
394:
1.110 perry 395: static inline int
1.88 chs 396: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
397: {
1.121 yamt 398:
399: /*
400: * we consider pool_item_header with smaller ph_page bigger.
401: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
402: */
403:
1.88 chs 404: if (a->ph_page < b->ph_page)
1.121 yamt 405: return (1);
406: else if (a->ph_page > b->ph_page)
1.88 chs 407: return (-1);
408: else
409: return (0);
410: }
411:
412: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
413: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
414:
1.141 ! yamt 415: static inline struct pool_item_header *
! 416: pr_find_pagehead_noalign(struct pool *pp, void *v)
! 417: {
! 418: struct pool_item_header *ph, tmp;
! 419:
! 420: tmp.ph_page = (void *)(uintptr_t)v;
! 421: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
! 422: if (ph == NULL) {
! 423: ph = SPLAY_ROOT(&pp->pr_phtree);
! 424: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
! 425: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
! 426: }
! 427: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
! 428: }
! 429:
! 430: return ph;
! 431: }
! 432:
1.3 pk 433: /*
1.121 yamt 434: * Return the pool page header based on item address.
1.3 pk 435: */
1.110 perry 436: static inline struct pool_item_header *
1.121 yamt 437: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 438: {
1.88 chs 439: struct pool_item_header *ph, tmp;
1.3 pk 440:
1.121 yamt 441: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.141 ! yamt 442: ph = pr_find_pagehead_noalign(pp, v);
1.121 yamt 443: } else {
1.128 christos 444: void *page =
445: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 446:
447: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 448: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 449: } else {
450: tmp.ph_page = page;
451: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
452: }
453: }
1.3 pk 454:
1.121 yamt 455: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 456: ((char *)ph->ph_page <= (char *)v &&
457: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 458: return ph;
1.3 pk 459: }
460:
1.101 thorpej 461: static void
462: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
463: {
464: struct pool_item_header *ph;
465:
466: while ((ph = LIST_FIRST(pq)) != NULL) {
467: LIST_REMOVE(ph, ph_pagelist);
468: pool_allocator_free(pp, ph->ph_page);
1.134 ad 469: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 470: pool_put(pp->pr_phpool, ph);
471: }
472: }
473:
1.3 pk 474: /*
475: * Remove a page from the pool.
476: */
1.110 perry 477: static inline void
1.61 chs 478: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
479: struct pool_pagelist *pq)
1.3 pk 480: {
481:
1.134 ad 482: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 483:
1.3 pk 484: /*
1.7 thorpej 485: * If the page was idle, decrement the idle page count.
1.3 pk 486: */
1.6 thorpej 487: if (ph->ph_nmissing == 0) {
488: #ifdef DIAGNOSTIC
489: if (pp->pr_nidle == 0)
490: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 491: if (pp->pr_nitems < pp->pr_itemsperpage)
492: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 493: #endif
494: pp->pr_nidle--;
495: }
1.7 thorpej 496:
1.20 thorpej 497: pp->pr_nitems -= pp->pr_itemsperpage;
498:
1.7 thorpej 499: /*
1.101 thorpej 500: * Unlink the page from the pool and queue it for release.
1.7 thorpej 501: */
1.88 chs 502: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 503: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
504: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 505: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
506:
1.7 thorpej 507: pp->pr_npages--;
508: pp->pr_npagefree++;
1.6 thorpej 509:
1.88 chs 510: pool_update_curpage(pp);
1.3 pk 511: }
512:
1.126 thorpej 513: static bool
1.117 yamt 514: pa_starved_p(struct pool_allocator *pa)
515: {
516:
517: if (pa->pa_backingmap != NULL) {
518: return vm_map_starved_p(pa->pa_backingmap);
519: }
1.127 thorpej 520: return false;
1.117 yamt 521: }
522:
523: static int
1.124 yamt 524: pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
1.117 yamt 525: {
526: struct pool *pp = obj;
527: struct pool_allocator *pa = pp->pr_alloc;
528:
529: KASSERT(&pp->pr_reclaimerentry == ce);
530: pool_reclaim(pp);
531: if (!pa_starved_p(pa)) {
532: return CALLBACK_CHAIN_ABORT;
533: }
534: return CALLBACK_CHAIN_CONTINUE;
535: }
536:
537: static void
538: pool_reclaim_register(struct pool *pp)
539: {
540: struct vm_map *map = pp->pr_alloc->pa_backingmap;
541: int s;
542:
543: if (map == NULL) {
544: return;
545: }
546:
547: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
548: callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
549: &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
550: splx(s);
551: }
552:
553: static void
554: pool_reclaim_unregister(struct pool *pp)
555: {
556: struct vm_map *map = pp->pr_alloc->pa_backingmap;
557: int s;
558:
559: if (map == NULL) {
560: return;
561: }
562:
563: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
564: callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
565: &pp->pr_reclaimerentry);
566: splx(s);
567: }
568:
569: static void
570: pa_reclaim_register(struct pool_allocator *pa)
571: {
572: struct vm_map *map = *pa->pa_backingmapptr;
573: struct pool *pp;
574:
575: KASSERT(pa->pa_backingmap == NULL);
576: if (map == NULL) {
577: SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
578: return;
579: }
580: pa->pa_backingmap = map;
581: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
582: pool_reclaim_register(pp);
583: }
584: }
585:
1.3 pk 586: /*
1.94 simonb 587: * Initialize all the pools listed in the "pools" link set.
588: */
589: void
1.117 yamt 590: pool_subsystem_init(void)
1.94 simonb 591: {
1.117 yamt 592: struct pool_allocator *pa;
1.94 simonb 593: __link_set_decl(pools, struct link_pool_init);
594: struct link_pool_init * const *pi;
595:
1.134 ad 596: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
597: cv_init(&pool_busy, "poolbusy");
598:
1.94 simonb 599: __link_set_foreach(pi, pools)
600: pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
601: (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
1.129 ad 602: (*pi)->palloc, (*pi)->ipl);
1.117 yamt 603:
604: while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
605: KASSERT(pa->pa_backingmapptr != NULL);
606: KASSERT(*pa->pa_backingmapptr != NULL);
607: SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
608: pa_reclaim_register(pa);
609: }
1.134 ad 610:
611: pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE,
612: 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);
613:
614: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE,
615: 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
1.94 simonb 616: }
617:
618: /*
1.3 pk 619: * Initialize the given pool resource structure.
620: *
621: * We export this routine to allow other kernel parts to declare
622: * static pools that must be initialized before malloc() is available.
623: */
624: void
1.42 thorpej 625: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.129 ad 626: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 627: {
1.116 simonb 628: #ifdef DEBUG
629: struct pool *pp1;
630: #endif
1.92 enami 631: size_t trysize, phsize;
1.134 ad 632: int off, slack;
1.3 pk 633:
1.116 simonb 634: #ifdef DEBUG
635: /*
636: * Check that the pool hasn't already been initialised and
637: * added to the list of all pools.
638: */
639: LIST_FOREACH(pp1, &pool_head, pr_poollist) {
640: if (pp == pp1)
641: panic("pool_init: pool %s already initialised",
642: wchan);
643: }
644: #endif
645:
1.25 thorpej 646: #ifdef POOL_DIAGNOSTIC
647: /*
648: * Always log if POOL_DIAGNOSTIC is defined.
649: */
650: if (pool_logsize != 0)
651: flags |= PR_LOGGING;
652: #endif
653:
1.66 thorpej 654: if (palloc == NULL)
655: palloc = &pool_allocator_kmem;
1.112 bjh21 656: #ifdef POOL_SUBPAGE
657: if (size > palloc->pa_pagesz) {
658: if (palloc == &pool_allocator_kmem)
659: palloc = &pool_allocator_kmem_fullpage;
660: else if (palloc == &pool_allocator_nointr)
661: palloc = &pool_allocator_nointr_fullpage;
662: }
1.66 thorpej 663: #endif /* POOL_SUBPAGE */
664: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
1.112 bjh21 665: if (palloc->pa_pagesz == 0)
1.66 thorpej 666: palloc->pa_pagesz = PAGE_SIZE;
667:
668: TAILQ_INIT(&palloc->pa_list);
669:
1.134 ad 670: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 671: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
672: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.117 yamt 673:
674: if (palloc->pa_backingmapptr != NULL) {
675: pa_reclaim_register(palloc);
676: }
1.66 thorpej 677: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 678: }
1.3 pk 679:
680: if (align == 0)
681: align = ALIGN(1);
1.14 thorpej 682:
1.120 yamt 683: if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
1.14 thorpej 684: size = sizeof(struct pool_item);
1.3 pk 685:
1.78 thorpej 686: size = roundup(size, align);
1.66 thorpej 687: #ifdef DIAGNOSTIC
688: if (size > palloc->pa_pagesz)
1.121 yamt 689: panic("pool_init: pool item size (%zu) too large", size);
1.66 thorpej 690: #endif
1.35 pk 691:
1.3 pk 692: /*
693: * Initialize the pool structure.
694: */
1.88 chs 695: LIST_INIT(&pp->pr_emptypages);
696: LIST_INIT(&pp->pr_fullpages);
697: LIST_INIT(&pp->pr_partpages);
1.134 ad 698: pp->pr_cache = NULL;
1.3 pk 699: pp->pr_curpage = NULL;
700: pp->pr_npages = 0;
701: pp->pr_minitems = 0;
702: pp->pr_minpages = 0;
703: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 704: pp->pr_roflags = flags;
705: pp->pr_flags = 0;
1.35 pk 706: pp->pr_size = size;
1.3 pk 707: pp->pr_align = align;
708: pp->pr_wchan = wchan;
1.66 thorpej 709: pp->pr_alloc = palloc;
1.20 thorpej 710: pp->pr_nitems = 0;
711: pp->pr_nout = 0;
712: pp->pr_hardlimit = UINT_MAX;
713: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 714: pp->pr_hardlimit_ratecap.tv_sec = 0;
715: pp->pr_hardlimit_ratecap.tv_usec = 0;
716: pp->pr_hardlimit_warning_last.tv_sec = 0;
717: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 718: pp->pr_drain_hook = NULL;
719: pp->pr_drain_hook_arg = NULL;
1.125 ad 720: pp->pr_freecheck = NULL;
1.3 pk 721:
722: /*
723: * Decide whether to put the page header off page to avoid
1.92 enami 724: * wasting too large a part of the page or too big item.
725: * Off-page page headers go on a hash table, so we can match
726: * a returned item with its header based on the page address.
727: * We use 1/16 of the page size and about 8 times of the item
728: * size as the threshold (XXX: tune)
729: *
730: * However, we'll put the header into the page if we can put
731: * it without wasting any items.
732: *
733: * Silently enforce `0 <= ioff < align'.
1.3 pk 734: */
1.92 enami 735: pp->pr_itemoffset = ioff %= align;
736: /* See the comment below about reserved bytes. */
737: trysize = palloc->pa_pagesz - ((align - ioff) % align);
738: phsize = ALIGN(sizeof(struct pool_item_header));
1.121 yamt 739: if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 740: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
741: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 742: /* Use the end of the page for the page header */
1.20 thorpej 743: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 744: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 745: } else {
1.3 pk 746: /* The page header will be taken from our page header pool */
747: pp->pr_phoffset = 0;
1.66 thorpej 748: off = palloc->pa_pagesz;
1.88 chs 749: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 750: }
1.1 pk 751:
1.3 pk 752: /*
753: * Alignment is to take place at `ioff' within the item. This means
754: * we must reserve up to `align - 1' bytes on the page to allow
755: * appropriate positioning of each item.
756: */
757: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 758: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 759: if ((pp->pr_roflags & PR_NOTOUCH)) {
760: int idx;
761:
762: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
763: idx++) {
764: /* nothing */
765: }
766: if (idx >= PHPOOL_MAX) {
767: /*
768: * if you see this panic, consider to tweak
769: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
770: */
771: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
772: pp->pr_wchan, pp->pr_itemsperpage);
773: }
774: pp->pr_phpool = &phpool[idx];
775: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
776: pp->pr_phpool = &phpool[0];
777: }
778: #if defined(DIAGNOSTIC)
779: else {
780: pp->pr_phpool = NULL;
781: }
782: #endif
1.3 pk 783:
784: /*
785: * Use the slack between the chunks and the page header
786: * for "cache coloring".
787: */
788: slack = off - pp->pr_itemsperpage * pp->pr_size;
789: pp->pr_maxcolor = (slack / align) * align;
790: pp->pr_curcolor = 0;
791:
792: pp->pr_nget = 0;
793: pp->pr_nfail = 0;
794: pp->pr_nput = 0;
795: pp->pr_npagealloc = 0;
796: pp->pr_npagefree = 0;
1.1 pk 797: pp->pr_hiwat = 0;
1.8 thorpej 798: pp->pr_nidle = 0;
1.134 ad 799: pp->pr_refcnt = 0;
1.3 pk 800:
1.59 thorpej 801: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 802: if (flags & PR_LOGGING) {
803: if (kmem_map == NULL ||
804: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
805: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 806: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 807: pp->pr_curlogentry = 0;
808: pp->pr_logsize = pool_logsize;
809: }
1.59 thorpej 810: #endif
1.25 thorpej 811:
812: pp->pr_entered_file = NULL;
813: pp->pr_entered_line = 0;
1.3 pk 814:
1.138 ad 815: /*
816: * XXXAD hack to prevent IP input processing from blocking.
817: */
818: if (ipl == IPL_SOFTNET) {
819: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, IPL_VM);
820: } else {
821: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
822: }
1.134 ad 823: cv_init(&pp->pr_cv, wchan);
824: pp->pr_ipl = ipl;
1.1 pk 825:
1.3 pk 826: /*
1.43 thorpej 827: * Initialize private page header pool and cache magazine pool if we
828: * haven't done so yet.
1.23 thorpej 829: * XXX LOCKING.
1.3 pk 830: */
1.97 yamt 831: if (phpool[0].pr_size == 0) {
832: int idx;
833: for (idx = 0; idx < PHPOOL_MAX; idx++) {
834: static char phpool_names[PHPOOL_MAX][6+1+6+1];
835: int nelem;
836: size_t sz;
837:
838: nelem = PHPOOL_FREELIST_NELEM(idx);
839: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
840: "phpool-%d", nelem);
841: sz = sizeof(struct pool_item_header);
842: if (nelem) {
1.135 yamt 843: sz = offsetof(struct pool_item_header,
844: ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
1.97 yamt 845: }
846: pool_init(&phpool[idx], sz, 0, 0, 0,
1.129 ad 847: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.97 yamt 848: }
1.62 bjh21 849: #ifdef POOL_SUBPAGE
850: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.129 ad 851: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
1.62 bjh21 852: #endif
1.134 ad 853: pool_init(&pcgpool, sizeof(pcg_t), CACHE_LINE_SIZE, 0, 0,
854: "cachegrp", &pool_allocator_meta, IPL_VM);
1.1 pk 855: }
856:
1.134 ad 857: if (__predict_true(!cold)) {
858: /* Insert into the list of all pools. */
859: mutex_enter(&pool_head_lock);
860: LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
861: mutex_exit(&pool_head_lock);
862:
863: /* Insert this into the list of pools using this allocator. */
864: mutex_enter(&palloc->pa_lock);
865: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
866: mutex_exit(&palloc->pa_lock);
867: } else {
868: LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
869: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
870: }
1.66 thorpej 871:
1.117 yamt 872: pool_reclaim_register(pp);
1.1 pk 873: }
874:
875: /*
876: * De-commision a pool resource.
877: */
878: void
1.42 thorpej 879: pool_destroy(struct pool *pp)
1.1 pk 880: {
1.101 thorpej 881: struct pool_pagelist pq;
1.3 pk 882: struct pool_item_header *ph;
1.43 thorpej 883:
1.101 thorpej 884: /* Remove from global pool list */
1.134 ad 885: mutex_enter(&pool_head_lock);
886: while (pp->pr_refcnt != 0)
887: cv_wait(&pool_busy, &pool_head_lock);
1.102 chs 888: LIST_REMOVE(pp, pr_poollist);
1.101 thorpej 889: if (drainpp == pp)
890: drainpp = NULL;
1.134 ad 891: mutex_exit(&pool_head_lock);
1.101 thorpej 892:
893: /* Remove this pool from its allocator's list of pools. */
1.117 yamt 894: pool_reclaim_unregister(pp);
1.134 ad 895: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 896: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.134 ad 897: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 898:
1.134 ad 899: mutex_enter(&pp->pr_lock);
1.101 thorpej 900:
1.134 ad 901: KASSERT(pp->pr_cache == NULL);
1.3 pk 902:
903: #ifdef DIAGNOSTIC
1.20 thorpej 904: if (pp->pr_nout != 0) {
1.25 thorpej 905: pr_printlog(pp, NULL, printf);
1.80 provos 906: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 907: pp->pr_nout);
1.3 pk 908: }
909: #endif
1.1 pk 910:
1.101 thorpej 911: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
912: KASSERT(LIST_EMPTY(&pp->pr_partpages));
913:
1.3 pk 914: /* Remove all pages */
1.101 thorpej 915: LIST_INIT(&pq);
1.88 chs 916: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 917: pr_rmpage(pp, ph, &pq);
918:
1.134 ad 919: mutex_exit(&pp->pr_lock);
1.3 pk 920:
1.101 thorpej 921: pr_pagelist_free(pp, &pq);
1.3 pk 922:
1.59 thorpej 923: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 924: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 925: free(pp->pr_log, M_TEMP);
1.59 thorpej 926: #endif
1.134 ad 927:
928: cv_destroy(&pp->pr_cv);
929: mutex_destroy(&pp->pr_lock);
1.1 pk 930: }
931:
1.68 thorpej 932: void
933: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
934: {
935:
936: /* XXX no locking -- must be used just after pool_init() */
937: #ifdef DIAGNOSTIC
938: if (pp->pr_drain_hook != NULL)
939: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
940: #endif
941: pp->pr_drain_hook = fn;
942: pp->pr_drain_hook_arg = arg;
943: }
944:
1.88 chs 945: static struct pool_item_header *
1.128 christos 946: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 947: {
948: struct pool_item_header *ph;
949:
950: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 951: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.134 ad 952: else
1.97 yamt 953: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 954:
955: return (ph);
956: }
1.1 pk 957:
958: /*
1.134 ad 959: * Grab an item from the pool.
1.1 pk 960: */
1.3 pk 961: void *
1.59 thorpej 962: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 963: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 964: #else
965: pool_get(struct pool *pp, int flags)
966: #endif
1.1 pk 967: {
968: struct pool_item *pi;
1.3 pk 969: struct pool_item_header *ph;
1.55 thorpej 970: void *v;
1.1 pk 971:
1.2 pk 972: #ifdef DIAGNOSTIC
1.95 atatat 973: if (__predict_false(pp->pr_itemsperpage == 0))
974: panic("pool_get: pool %p: pr_itemsperpage is zero, "
975: "pool not initialized?", pp);
1.84 thorpej 976: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 977: (flags & PR_WAITOK) != 0))
1.77 matt 978: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 979:
1.102 chs 980: #endif /* DIAGNOSTIC */
1.58 thorpej 981: #ifdef LOCKDEBUG
982: if (flags & PR_WAITOK)
1.119 yamt 983: ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");
1.56 sommerfe 984: #endif
1.1 pk 985:
1.134 ad 986: mutex_enter(&pp->pr_lock);
1.25 thorpej 987: pr_enter(pp, file, line);
1.20 thorpej 988:
989: startover:
990: /*
991: * Check to see if we've reached the hard limit. If we have,
992: * and we can wait, then wait until an item has been returned to
993: * the pool.
994: */
995: #ifdef DIAGNOSTIC
1.34 thorpej 996: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 997: pr_leave(pp);
1.134 ad 998: mutex_exit(&pp->pr_lock);
1.20 thorpej 999: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
1000: }
1001: #endif
1.34 thorpej 1002: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 1003: if (pp->pr_drain_hook != NULL) {
1004: /*
1005: * Since the drain hook is going to free things
1006: * back to the pool, unlock, call the hook, re-lock,
1007: * and check the hardlimit condition again.
1008: */
1009: pr_leave(pp);
1.134 ad 1010: mutex_exit(&pp->pr_lock);
1.68 thorpej 1011: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.134 ad 1012: mutex_enter(&pp->pr_lock);
1.68 thorpej 1013: pr_enter(pp, file, line);
1014: if (pp->pr_nout < pp->pr_hardlimit)
1015: goto startover;
1016: }
1017:
1.29 sommerfe 1018: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 1019: /*
1020: * XXX: A warning isn't logged in this case. Should
1021: * it be?
1022: */
1023: pp->pr_flags |= PR_WANTED;
1.25 thorpej 1024: pr_leave(pp);
1.134 ad 1025: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.25 thorpej 1026: pr_enter(pp, file, line);
1.20 thorpej 1027: goto startover;
1028: }
1.31 thorpej 1029:
1030: /*
1031: * Log a message that the hard limit has been hit.
1032: */
1033: if (pp->pr_hardlimit_warning != NULL &&
1034: ratecheck(&pp->pr_hardlimit_warning_last,
1035: &pp->pr_hardlimit_ratecap))
1036: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 1037:
1038: pp->pr_nfail++;
1039:
1.25 thorpej 1040: pr_leave(pp);
1.134 ad 1041: mutex_exit(&pp->pr_lock);
1.20 thorpej 1042: return (NULL);
1043: }
1044:
1.3 pk 1045: /*
1046: * The convention we use is that if `curpage' is not NULL, then
1047: * it points at a non-empty bucket. In particular, `curpage'
1048: * never points at a page header which has PR_PHINPAGE set and
1049: * has no items in its bucket.
1050: */
1.20 thorpej 1051: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 1052: int error;
1053:
1.20 thorpej 1054: #ifdef DIAGNOSTIC
1055: if (pp->pr_nitems != 0) {
1.134 ad 1056: mutex_exit(&pp->pr_lock);
1.20 thorpej 1057: printf("pool_get: %s: curpage NULL, nitems %u\n",
1058: pp->pr_wchan, pp->pr_nitems);
1.80 provos 1059: panic("pool_get: nitems inconsistent");
1.20 thorpej 1060: }
1061: #endif
1062:
1.21 thorpej 1063: /*
1064: * Call the back-end page allocator for more memory.
1065: * Release the pool lock, as the back-end page allocator
1066: * may block.
1067: */
1.25 thorpej 1068: pr_leave(pp);
1.113 yamt 1069: error = pool_grow(pp, flags);
1070: pr_enter(pp, file, line);
1071: if (error != 0) {
1.21 thorpej 1072: /*
1.55 thorpej 1073: * We were unable to allocate a page or item
1074: * header, but we released the lock during
1075: * allocation, so perhaps items were freed
1076: * back to the pool. Check for this case.
1.21 thorpej 1077: */
1078: if (pp->pr_curpage != NULL)
1079: goto startover;
1.15 pk 1080:
1.117 yamt 1081: pp->pr_nfail++;
1.25 thorpej 1082: pr_leave(pp);
1.134 ad 1083: mutex_exit(&pp->pr_lock);
1.117 yamt 1084: return (NULL);
1.1 pk 1085: }
1.3 pk 1086:
1.20 thorpej 1087: /* Start the allocation process over. */
1088: goto startover;
1.3 pk 1089: }
1.97 yamt 1090: if (pp->pr_roflags & PR_NOTOUCH) {
1091: #ifdef DIAGNOSTIC
1092: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1093: pr_leave(pp);
1.134 ad 1094: mutex_exit(&pp->pr_lock);
1.97 yamt 1095: panic("pool_get: %s: page empty", pp->pr_wchan);
1096: }
1097: #endif
1098: v = pr_item_notouch_get(pp, ph);
1099: #ifdef POOL_DIAGNOSTIC
1100: pr_log(pp, v, PRLOG_GET, file, line);
1101: #endif
1102: } else {
1.102 chs 1103: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 1104: if (__predict_false(v == NULL)) {
1105: pr_leave(pp);
1.134 ad 1106: mutex_exit(&pp->pr_lock);
1.97 yamt 1107: panic("pool_get: %s: page empty", pp->pr_wchan);
1108: }
1.20 thorpej 1109: #ifdef DIAGNOSTIC
1.97 yamt 1110: if (__predict_false(pp->pr_nitems == 0)) {
1111: pr_leave(pp);
1.134 ad 1112: mutex_exit(&pp->pr_lock);
1.97 yamt 1113: printf("pool_get: %s: items on itemlist, nitems %u\n",
1114: pp->pr_wchan, pp->pr_nitems);
1115: panic("pool_get: nitems inconsistent");
1116: }
1.65 enami 1117: #endif
1.56 sommerfe 1118:
1.65 enami 1119: #ifdef POOL_DIAGNOSTIC
1.97 yamt 1120: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 1121: #endif
1.3 pk 1122:
1.65 enami 1123: #ifdef DIAGNOSTIC
1.97 yamt 1124: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1125: pr_printlog(pp, pi, printf);
1126: panic("pool_get(%s): free list modified: "
1127: "magic=%x; page %p; item addr %p\n",
1128: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1129: }
1.3 pk 1130: #endif
1131:
1.97 yamt 1132: /*
1133: * Remove from item list.
1134: */
1.102 chs 1135: LIST_REMOVE(pi, pi_list);
1.97 yamt 1136: }
1.20 thorpej 1137: pp->pr_nitems--;
1138: pp->pr_nout++;
1.6 thorpej 1139: if (ph->ph_nmissing == 0) {
1140: #ifdef DIAGNOSTIC
1.34 thorpej 1141: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 1142: panic("pool_get: nidle inconsistent");
1143: #endif
1144: pp->pr_nidle--;
1.88 chs 1145:
1146: /*
1147: * This page was previously empty. Move it to the list of
1148: * partially-full pages. This page is already curpage.
1149: */
1150: LIST_REMOVE(ph, ph_pagelist);
1151: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 1152: }
1.3 pk 1153: ph->ph_nmissing++;
1.97 yamt 1154: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 1155: #ifdef DIAGNOSTIC
1.97 yamt 1156: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 1157: !LIST_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 1158: pr_leave(pp);
1.134 ad 1159: mutex_exit(&pp->pr_lock);
1.21 thorpej 1160: panic("pool_get: %s: nmissing inconsistent",
1161: pp->pr_wchan);
1162: }
1163: #endif
1.3 pk 1164: /*
1.88 chs 1165: * This page is now full. Move it to the full list
1166: * and select a new current page.
1.3 pk 1167: */
1.88 chs 1168: LIST_REMOVE(ph, ph_pagelist);
1169: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1170: pool_update_curpage(pp);
1.1 pk 1171: }
1.3 pk 1172:
1173: pp->pr_nget++;
1.111 christos 1174: pr_leave(pp);
1.20 thorpej 1175:
1176: /*
1177: * If we have a low water mark and we are now below that low
1178: * water mark, add more items to the pool.
1179: */
1.53 thorpej 1180: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1181: /*
1182: * XXX: Should we log a warning? Should we set up a timeout
1183: * to try again in a second or so? The latter could break
1184: * a caller's assumptions about interrupt protection, etc.
1185: */
1186: }
1187:
1.134 ad 1188: mutex_exit(&pp->pr_lock);
1.125 ad 1189: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
1190: FREECHECK_OUT(&pp->pr_freecheck, v);
1.1 pk 1191: return (v);
1192: }
1193:
1194: /*
1.43 thorpej 1195: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1196: */
1.43 thorpej 1197: static void
1.101 thorpej 1198: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 1199: {
1200: struct pool_item *pi = v;
1.3 pk 1201: struct pool_item_header *ph;
1202:
1.134 ad 1203: KASSERT(mutex_owned(&pp->pr_lock));
1.125 ad 1204: FREECHECK_IN(&pp->pr_freecheck, v);
1.134 ad 1205: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 1206:
1.30 thorpej 1207: #ifdef DIAGNOSTIC
1.34 thorpej 1208: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1209: printf("pool %s: putting with none out\n",
1210: pp->pr_wchan);
1211: panic("pool_put");
1212: }
1213: #endif
1.3 pk 1214:
1.121 yamt 1215: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.25 thorpej 1216: pr_printlog(pp, NULL, printf);
1.3 pk 1217: panic("pool_put: %s: page header missing", pp->pr_wchan);
1218: }
1.28 thorpej 1219:
1.3 pk 1220: /*
1221: * Return to item list.
1222: */
1.97 yamt 1223: if (pp->pr_roflags & PR_NOTOUCH) {
1224: pr_item_notouch_put(pp, ph, v);
1225: } else {
1.2 pk 1226: #ifdef DIAGNOSTIC
1.97 yamt 1227: pi->pi_magic = PI_MAGIC;
1.3 pk 1228: #endif
1.32 chs 1229: #ifdef DEBUG
1.97 yamt 1230: {
1231: int i, *ip = v;
1.32 chs 1232:
1.97 yamt 1233: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1234: *ip++ = PI_MAGIC;
1235: }
1.32 chs 1236: }
1237: #endif
1238:
1.102 chs 1239: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 1240: }
1.79 thorpej 1241: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1242: ph->ph_nmissing--;
1243: pp->pr_nput++;
1.20 thorpej 1244: pp->pr_nitems++;
1245: pp->pr_nout--;
1.3 pk 1246:
1247: /* Cancel "pool empty" condition if it exists */
1248: if (pp->pr_curpage == NULL)
1249: pp->pr_curpage = ph;
1250:
1251: if (pp->pr_flags & PR_WANTED) {
1252: pp->pr_flags &= ~PR_WANTED;
1.15 pk 1253: if (ph->ph_nmissing == 0)
1254: pp->pr_nidle++;
1.134 ad 1255: cv_broadcast(&pp->pr_cv);
1.3 pk 1256: return;
1257: }
1258:
1259: /*
1.88 chs 1260: * If this page is now empty, do one of two things:
1.21 thorpej 1261: *
1.88 chs 1262: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1263: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1264: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1265: * CLAIM.
1.21 thorpej 1266: *
1.88 chs 1267: * (2) Otherwise, move the page to the empty page list.
1268: *
1269: * Either way, select a new current page (so we use a partially-full
1270: * page if one is available).
1.3 pk 1271: */
1272: if (ph->ph_nmissing == 0) {
1.6 thorpej 1273: pp->pr_nidle++;
1.90 thorpej 1274: if (pp->pr_npages > pp->pr_minpages &&
1275: (pp->pr_npages > pp->pr_maxpages ||
1.117 yamt 1276: pa_starved_p(pp->pr_alloc))) {
1.101 thorpej 1277: pr_rmpage(pp, ph, pq);
1.3 pk 1278: } else {
1.88 chs 1279: LIST_REMOVE(ph, ph_pagelist);
1280: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1281:
1.21 thorpej 1282: /*
1283: * Update the timestamp on the page. A page must
1284: * be idle for some period of time before it can
1285: * be reclaimed by the pagedaemon. This minimizes
1286: * ping-pong'ing for memory.
1287: */
1.118 kardel 1288: getmicrotime(&ph->ph_time);
1.1 pk 1289: }
1.88 chs 1290: pool_update_curpage(pp);
1.1 pk 1291: }
1.88 chs 1292:
1.21 thorpej 1293: /*
1.88 chs 1294: * If the page was previously completely full, move it to the
1295: * partially-full list and make it the current page. The next
1296: * allocation will get the item from this page, instead of
1297: * further fragmenting the pool.
1.21 thorpej 1298: */
1299: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1300: LIST_REMOVE(ph, ph_pagelist);
1301: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1302: pp->pr_curpage = ph;
1303: }
1.43 thorpej 1304: }
1305:
1306: /*
1.134 ad 1307: * Return resource to the pool.
1.43 thorpej 1308: */
1.59 thorpej 1309: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1310: void
1311: _pool_put(struct pool *pp, void *v, const char *file, long line)
1312: {
1.101 thorpej 1313: struct pool_pagelist pq;
1314:
1315: LIST_INIT(&pq);
1.43 thorpej 1316:
1.134 ad 1317: mutex_enter(&pp->pr_lock);
1.43 thorpej 1318: pr_enter(pp, file, line);
1319:
1.56 sommerfe 1320: pr_log(pp, v, PRLOG_PUT, file, line);
1321:
1.101 thorpej 1322: pool_do_put(pp, v, &pq);
1.21 thorpej 1323:
1.25 thorpej 1324: pr_leave(pp);
1.134 ad 1325: mutex_exit(&pp->pr_lock);
1.101 thorpej 1326:
1.102 chs 1327: pr_pagelist_free(pp, &pq);
1.1 pk 1328: }
1.57 sommerfe 1329: #undef pool_put
1.59 thorpej 1330: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1331:
1.56 sommerfe 1332: void
1333: pool_put(struct pool *pp, void *v)
1334: {
1.101 thorpej 1335: struct pool_pagelist pq;
1336:
1337: LIST_INIT(&pq);
1.56 sommerfe 1338:
1.134 ad 1339: mutex_enter(&pp->pr_lock);
1.101 thorpej 1340: pool_do_put(pp, v, &pq);
1.134 ad 1341: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1342:
1.102 chs 1343: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1344: }
1.57 sommerfe 1345:
1.59 thorpej 1346: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1347: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1348: #endif
1.74 thorpej 1349:
1350: /*
1.113 yamt 1351: * pool_grow: grow a pool by a page.
1352: *
1353: * => called with pool locked.
1354: * => unlock and relock the pool.
1355: * => return with pool locked.
1356: */
1357:
1358: static int
1359: pool_grow(struct pool *pp, int flags)
1360: {
1361: struct pool_item_header *ph = NULL;
1362: char *cp;
1363:
1.134 ad 1364: mutex_exit(&pp->pr_lock);
1.113 yamt 1365: cp = pool_allocator_alloc(pp, flags);
1366: if (__predict_true(cp != NULL)) {
1367: ph = pool_alloc_item_header(pp, cp, flags);
1368: }
1369: if (__predict_false(cp == NULL || ph == NULL)) {
1370: if (cp != NULL) {
1371: pool_allocator_free(pp, cp);
1372: }
1.134 ad 1373: mutex_enter(&pp->pr_lock);
1.113 yamt 1374: return ENOMEM;
1375: }
1376:
1.134 ad 1377: mutex_enter(&pp->pr_lock);
1.113 yamt 1378: pool_prime_page(pp, cp, ph);
1379: pp->pr_npagealloc++;
1380: return 0;
1381: }
1382:
1383: /*
1.74 thorpej 1384: * Add N items to the pool.
1385: */
1386: int
1387: pool_prime(struct pool *pp, int n)
1388: {
1.75 simonb 1389: int newpages;
1.113 yamt 1390: int error = 0;
1.74 thorpej 1391:
1.134 ad 1392: mutex_enter(&pp->pr_lock);
1.74 thorpej 1393:
1394: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1395:
1396: while (newpages-- > 0) {
1.113 yamt 1397: error = pool_grow(pp, PR_NOWAIT);
1398: if (error) {
1.74 thorpej 1399: break;
1400: }
1401: pp->pr_minpages++;
1402: }
1403:
1404: if (pp->pr_minpages >= pp->pr_maxpages)
1405: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1406:
1.134 ad 1407: mutex_exit(&pp->pr_lock);
1.113 yamt 1408: return error;
1.74 thorpej 1409: }
1.55 thorpej 1410:
1411: /*
1.3 pk 1412: * Add a page worth of items to the pool.
1.21 thorpej 1413: *
1414: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1415: */
1.55 thorpej 1416: static void
1.128 christos 1417: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1418: {
1419: struct pool_item *pi;
1.128 christos 1420: void *cp = storage;
1.125 ad 1421: const unsigned int align = pp->pr_align;
1422: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1423: int n;
1.36 pk 1424:
1.134 ad 1425: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 1426:
1.66 thorpej 1427: #ifdef DIAGNOSTIC
1.121 yamt 1428: if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1429: ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1430: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1431: #endif
1.3 pk 1432:
1433: /*
1434: * Insert page header.
1435: */
1.88 chs 1436: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1437: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1438: ph->ph_page = storage;
1439: ph->ph_nmissing = 0;
1.118 kardel 1440: getmicrotime(&ph->ph_time);
1.88 chs 1441: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1442: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1443:
1.6 thorpej 1444: pp->pr_nidle++;
1445:
1.3 pk 1446: /*
1447: * Color this page.
1448: */
1.141 ! yamt 1449: ph->ph_off = pp->pr_curcolor;
! 1450: cp = (char *)cp + ph->ph_off;
1.3 pk 1451: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1452: pp->pr_curcolor = 0;
1453:
1454: /*
1455: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1456: */
1457: if (ioff != 0)
1.128 christos 1458: cp = (char *)cp + align - ioff;
1.3 pk 1459:
1.125 ad 1460: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1461:
1.3 pk 1462: /*
1463: * Insert remaining chunks on the bucket list.
1464: */
1465: n = pp->pr_itemsperpage;
1.20 thorpej 1466: pp->pr_nitems += n;
1.3 pk 1467:
1.97 yamt 1468: if (pp->pr_roflags & PR_NOTOUCH) {
1.141 ! yamt 1469: pr_item_notouch_init(pp, ph);
1.97 yamt 1470: } else {
1471: while (n--) {
1472: pi = (struct pool_item *)cp;
1.78 thorpej 1473:
1.97 yamt 1474: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1475:
1.97 yamt 1476: /* Insert on page list */
1.102 chs 1477: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1478: #ifdef DIAGNOSTIC
1.97 yamt 1479: pi->pi_magic = PI_MAGIC;
1.3 pk 1480: #endif
1.128 christos 1481: cp = (char *)cp + pp->pr_size;
1.125 ad 1482:
1483: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1484: }
1.3 pk 1485: }
1486:
1487: /*
1488: * If the pool was depleted, point at the new page.
1489: */
1490: if (pp->pr_curpage == NULL)
1491: pp->pr_curpage = ph;
1492:
1493: if (++pp->pr_npages > pp->pr_hiwat)
1494: pp->pr_hiwat = pp->pr_npages;
1495: }
1496:
1.20 thorpej 1497: /*
1.52 thorpej 1498: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1499: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1500: *
1.21 thorpej 1501: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1502: *
1.73 thorpej 1503: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1504: * with it locked.
1505: */
1506: static int
1.42 thorpej 1507: pool_catchup(struct pool *pp)
1.20 thorpej 1508: {
1509: int error = 0;
1510:
1.54 thorpej 1511: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1512: error = pool_grow(pp, PR_NOWAIT);
1513: if (error) {
1.20 thorpej 1514: break;
1515: }
1516: }
1.113 yamt 1517: return error;
1.20 thorpej 1518: }
1519:
1.88 chs 1520: static void
1521: pool_update_curpage(struct pool *pp)
1522: {
1523:
1524: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1525: if (pp->pr_curpage == NULL) {
1526: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1527: }
1528: }
1529:
1.3 pk 1530: void
1.42 thorpej 1531: pool_setlowat(struct pool *pp, int n)
1.3 pk 1532: {
1.15 pk 1533:
1.134 ad 1534: mutex_enter(&pp->pr_lock);
1.21 thorpej 1535:
1.3 pk 1536: pp->pr_minitems = n;
1.15 pk 1537: pp->pr_minpages = (n == 0)
1538: ? 0
1.18 thorpej 1539: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1540:
1541: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1542: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1543: /*
1544: * XXX: Should we log a warning? Should we set up a timeout
1545: * to try again in a second or so? The latter could break
1546: * a caller's assumptions about interrupt protection, etc.
1547: */
1548: }
1.21 thorpej 1549:
1.134 ad 1550: mutex_exit(&pp->pr_lock);
1.3 pk 1551: }
1552:
1553: void
1.42 thorpej 1554: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1555: {
1.15 pk 1556:
1.134 ad 1557: mutex_enter(&pp->pr_lock);
1.21 thorpej 1558:
1.15 pk 1559: pp->pr_maxpages = (n == 0)
1560: ? 0
1.18 thorpej 1561: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1562:
1.134 ad 1563: mutex_exit(&pp->pr_lock);
1.3 pk 1564: }
1565:
1.20 thorpej 1566: void
1.42 thorpej 1567: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1568: {
1569:
1.134 ad 1570: mutex_enter(&pp->pr_lock);
1.20 thorpej 1571:
1572: pp->pr_hardlimit = n;
1573: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1574: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1575: pp->pr_hardlimit_warning_last.tv_sec = 0;
1576: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1577:
1578: /*
1.21 thorpej 1579: * In-line version of pool_sethiwat(), because we don't want to
1580: * release the lock.
1.20 thorpej 1581: */
1582: pp->pr_maxpages = (n == 0)
1583: ? 0
1584: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1585:
1.134 ad 1586: mutex_exit(&pp->pr_lock);
1.20 thorpej 1587: }
1.3 pk 1588:
1589: /*
1590: * Release all complete pages that have not been used recently.
1591: */
1.66 thorpej 1592: int
1.59 thorpej 1593: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1594: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1595: #else
1596: pool_reclaim(struct pool *pp)
1597: #endif
1.3 pk 1598: {
1599: struct pool_item_header *ph, *phnext;
1.61 chs 1600: struct pool_pagelist pq;
1.102 chs 1601: struct timeval curtime, diff;
1.134 ad 1602: bool klock;
1603: int rv;
1.3 pk 1604:
1.68 thorpej 1605: if (pp->pr_drain_hook != NULL) {
1606: /*
1607: * The drain hook must be called with the pool unlocked.
1608: */
1609: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1610: }
1611:
1.134 ad 1612: /*
1613: * XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks,
1614: * and we are called from the pagedaemon without kernel_lock.
1615: * Does not apply to IPL_SOFTBIO.
1616: */
1617: if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1618: pp->pr_ipl == IPL_SOFTSERIAL) {
1619: KERNEL_LOCK(1, NULL);
1620: klock = true;
1621: } else
1622: klock = false;
1623:
1624: /* Reclaim items from the pool's cache (if any). */
1625: if (pp->pr_cache != NULL)
1626: pool_cache_invalidate(pp->pr_cache);
1627:
1628: if (mutex_tryenter(&pp->pr_lock) == 0) {
1629: if (klock) {
1630: KERNEL_UNLOCK_ONE(NULL);
1631: }
1.66 thorpej 1632: return (0);
1.134 ad 1633: }
1.25 thorpej 1634: pr_enter(pp, file, line);
1.68 thorpej 1635:
1.88 chs 1636: LIST_INIT(&pq);
1.43 thorpej 1637:
1.118 kardel 1638: getmicrotime(&curtime);
1.21 thorpej 1639:
1.88 chs 1640: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1641: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1642:
1643: /* Check our minimum page claim */
1644: if (pp->pr_npages <= pp->pr_minpages)
1645: break;
1646:
1.88 chs 1647: KASSERT(ph->ph_nmissing == 0);
1648: timersub(&curtime, &ph->ph_time, &diff);
1.117 yamt 1649: if (diff.tv_sec < pool_inactive_time
1650: && !pa_starved_p(pp->pr_alloc))
1.88 chs 1651: continue;
1.21 thorpej 1652:
1.88 chs 1653: /*
1654: * If freeing this page would put us below
1655: * the low water mark, stop now.
1656: */
1657: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1658: pp->pr_minitems)
1659: break;
1.21 thorpej 1660:
1.88 chs 1661: pr_rmpage(pp, ph, &pq);
1.3 pk 1662: }
1663:
1.25 thorpej 1664: pr_leave(pp);
1.134 ad 1665: mutex_exit(&pp->pr_lock);
1666:
1667: if (LIST_EMPTY(&pq))
1668: rv = 0;
1669: else {
1670: pr_pagelist_free(pp, &pq);
1671: rv = 1;
1672: }
1673:
1674: if (klock) {
1675: KERNEL_UNLOCK_ONE(NULL);
1676: }
1.66 thorpej 1677:
1.134 ad 1678: return (rv);
1.3 pk 1679: }
1680:
1681: /*
1.134 ad 1682: * Drain pools, one at a time. This is a two stage process;
1683: * drain_start kicks off a cross call to drain CPU-level caches
1684: * if the pool has an associated pool_cache. drain_end waits
1685: * for those cross calls to finish, and then drains the cache
1686: * (if any) and pool.
1.131 ad 1687: *
1.134 ad 1688: * Note, must never be called from interrupt context.
1.3 pk 1689: */
1690: void
1.134 ad 1691: pool_drain_start(struct pool **ppp, uint64_t *wp)
1.3 pk 1692: {
1693: struct pool *pp;
1.134 ad 1694:
1695: KASSERT(!LIST_EMPTY(&pool_head));
1.3 pk 1696:
1.61 chs 1697: pp = NULL;
1.134 ad 1698:
1699: /* Find next pool to drain, and add a reference. */
1700: mutex_enter(&pool_head_lock);
1701: do {
1702: if (drainpp == NULL) {
1703: drainpp = LIST_FIRST(&pool_head);
1704: }
1705: if (drainpp != NULL) {
1706: pp = drainpp;
1707: drainpp = LIST_NEXT(pp, pr_poollist);
1708: }
1709: /*
1710: * Skip completely idle pools. We depend on at least
1711: * one pool in the system being active.
1712: */
1713: } while (pp == NULL || pp->pr_npages == 0);
1714: pp->pr_refcnt++;
1715: mutex_exit(&pool_head_lock);
1716:
1717: /* If there is a pool_cache, drain CPU level caches. */
1718: *ppp = pp;
1719: if (pp->pr_cache != NULL) {
1720: *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall,
1721: pp->pr_cache, NULL);
1722: }
1723: }
1724:
1725: void
1726: pool_drain_end(struct pool *pp, uint64_t where)
1727: {
1728:
1729: if (pp == NULL)
1730: return;
1731:
1732: KASSERT(pp->pr_refcnt > 0);
1733:
1734: /* Wait for remote draining to complete. */
1735: if (pp->pr_cache != NULL)
1736: xc_wait(where);
1737:
1738: /* Drain the cache (if any) and pool.. */
1739: pool_reclaim(pp);
1740:
1741: /* Finally, unlock the pool. */
1742: mutex_enter(&pool_head_lock);
1743: pp->pr_refcnt--;
1744: cv_broadcast(&pool_busy);
1745: mutex_exit(&pool_head_lock);
1.3 pk 1746: }
1747:
1748: /*
1749: * Diagnostic helpers.
1750: */
1751: void
1.42 thorpej 1752: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1753: {
1754:
1.25 thorpej 1755: pool_print1(pp, modif, printf);
1.21 thorpej 1756: }
1757:
1.25 thorpej 1758: void
1.108 yamt 1759: pool_printall(const char *modif, void (*pr)(const char *, ...))
1760: {
1761: struct pool *pp;
1762:
1763: LIST_FOREACH(pp, &pool_head, pr_poollist) {
1764: pool_printit(pp, modif, pr);
1765: }
1766: }
1767:
1768: void
1.42 thorpej 1769: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1770: {
1771:
1772: if (pp == NULL) {
1773: (*pr)("Must specify a pool to print.\n");
1774: return;
1775: }
1776:
1777: pool_print1(pp, modif, pr);
1778: }
1779:
1.21 thorpej 1780: static void
1.124 yamt 1781: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1782: void (*pr)(const char *, ...))
1.88 chs 1783: {
1784: struct pool_item_header *ph;
1785: #ifdef DIAGNOSTIC
1786: struct pool_item *pi;
1787: #endif
1788:
1789: LIST_FOREACH(ph, pl, ph_pagelist) {
1790: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1791: ph->ph_page, ph->ph_nmissing,
1792: (u_long)ph->ph_time.tv_sec,
1793: (u_long)ph->ph_time.tv_usec);
1794: #ifdef DIAGNOSTIC
1.97 yamt 1795: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1796: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1797: if (pi->pi_magic != PI_MAGIC) {
1798: (*pr)("\t\t\titem %p, magic 0x%x\n",
1799: pi, pi->pi_magic);
1800: }
1.88 chs 1801: }
1802: }
1803: #endif
1804: }
1805: }
1806:
1807: static void
1.42 thorpej 1808: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1809: {
1.25 thorpej 1810: struct pool_item_header *ph;
1.134 ad 1811: pool_cache_t pc;
1812: pcg_t *pcg;
1813: pool_cache_cpu_t *cc;
1814: uint64_t cpuhit, cpumiss;
1.44 thorpej 1815: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1816: char c;
1817:
1818: while ((c = *modif++) != '\0') {
1819: if (c == 'l')
1820: print_log = 1;
1821: if (c == 'p')
1822: print_pagelist = 1;
1.44 thorpej 1823: if (c == 'c')
1824: print_cache = 1;
1.25 thorpej 1825: }
1826:
1.134 ad 1827: if ((pc = pp->pr_cache) != NULL) {
1828: (*pr)("POOL CACHE");
1829: } else {
1830: (*pr)("POOL");
1831: }
1832:
1833: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1834: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1835: pp->pr_roflags);
1.66 thorpej 1836: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1837: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1838: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1839: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1840: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1841:
1.134 ad 1842: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1843: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1844: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1845: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1846:
1847: if (print_pagelist == 0)
1848: goto skip_pagelist;
1849:
1.88 chs 1850: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1851: (*pr)("\n\tempty page list:\n");
1.97 yamt 1852: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1853: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1854: (*pr)("\n\tfull page list:\n");
1.97 yamt 1855: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1856: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1857: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1858: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1859:
1.25 thorpej 1860: if (pp->pr_curpage == NULL)
1861: (*pr)("\tno current page\n");
1862: else
1863: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1864:
1865: skip_pagelist:
1866: if (print_log == 0)
1867: goto skip_log;
1868:
1869: (*pr)("\n");
1870: if ((pp->pr_roflags & PR_LOGGING) == 0)
1871: (*pr)("\tno log\n");
1.122 christos 1872: else {
1.25 thorpej 1873: pr_printlog(pp, NULL, pr);
1.122 christos 1874: }
1.3 pk 1875:
1.25 thorpej 1876: skip_log:
1.44 thorpej 1877:
1.102 chs 1878: #define PR_GROUPLIST(pcg) \
1879: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1880: for (i = 0; i < PCG_NOBJECTS; i++) { \
1881: if (pcg->pcg_objects[i].pcgo_pa != \
1882: POOL_PADDR_INVALID) { \
1883: (*pr)("\t\t\t%p, 0x%llx\n", \
1884: pcg->pcg_objects[i].pcgo_va, \
1885: (unsigned long long) \
1886: pcg->pcg_objects[i].pcgo_pa); \
1887: } else { \
1888: (*pr)("\t\t\t%p\n", \
1889: pcg->pcg_objects[i].pcgo_va); \
1890: } \
1891: }
1892:
1.134 ad 1893: if (pc != NULL) {
1894: cpuhit = 0;
1895: cpumiss = 0;
1896: for (i = 0; i < MAXCPUS; i++) {
1897: if ((cc = pc->pc_cpus[i]) == NULL)
1898: continue;
1899: cpuhit += cc->cc_hits;
1900: cpumiss += cc->cc_misses;
1901: }
1902: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1903: (*pr)("\tcache layer hits %llu misses %llu\n",
1904: pc->pc_hits, pc->pc_misses);
1905: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1906: pc->pc_hits + pc->pc_misses - pc->pc_contended,
1907: pc->pc_contended);
1908: (*pr)("\tcache layer empty groups %u full groups %u\n",
1909: pc->pc_nempty, pc->pc_nfull);
1910: if (print_cache) {
1911: (*pr)("\tfull cache groups:\n");
1912: for (pcg = pc->pc_fullgroups; pcg != NULL;
1913: pcg = pcg->pcg_next) {
1914: PR_GROUPLIST(pcg);
1915: }
1916: (*pr)("\tempty cache groups:\n");
1917: for (pcg = pc->pc_emptygroups; pcg != NULL;
1918: pcg = pcg->pcg_next) {
1919: PR_GROUPLIST(pcg);
1920: }
1.103 chs 1921: }
1.44 thorpej 1922: }
1.102 chs 1923: #undef PR_GROUPLIST
1.44 thorpej 1924:
1.88 chs 1925: pr_enter_check(pp, pr);
1926: }
1927:
1928: static int
1929: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1930: {
1931: struct pool_item *pi;
1.128 christos 1932: void *page;
1.88 chs 1933: int n;
1934:
1.121 yamt 1935: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1936: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1937: if (page != ph->ph_page &&
1938: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1939: if (label != NULL)
1940: printf("%s: ", label);
1941: printf("pool(%p:%s): page inconsistency: page %p;"
1942: " at page head addr %p (p %p)\n", pp,
1943: pp->pr_wchan, ph->ph_page,
1944: ph, page);
1945: return 1;
1946: }
1.88 chs 1947: }
1.3 pk 1948:
1.97 yamt 1949: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1950: return 0;
1951:
1.102 chs 1952: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1953: pi != NULL;
1.102 chs 1954: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1955:
1956: #ifdef DIAGNOSTIC
1957: if (pi->pi_magic != PI_MAGIC) {
1958: if (label != NULL)
1959: printf("%s: ", label);
1960: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1961: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1962: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1963: n, pi);
1.88 chs 1964: panic("pool");
1965: }
1966: #endif
1.121 yamt 1967: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1968: continue;
1969: }
1.128 christos 1970: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1971: if (page == ph->ph_page)
1972: continue;
1973:
1974: if (label != NULL)
1975: printf("%s: ", label);
1976: printf("pool(%p:%s): page inconsistency: page %p;"
1977: " item ordinal %d; addr %p (p %p)\n", pp,
1978: pp->pr_wchan, ph->ph_page,
1979: n, pi, page);
1980: return 1;
1981: }
1982: return 0;
1.3 pk 1983: }
1984:
1.88 chs 1985:
1.3 pk 1986: int
1.42 thorpej 1987: pool_chk(struct pool *pp, const char *label)
1.3 pk 1988: {
1989: struct pool_item_header *ph;
1990: int r = 0;
1991:
1.134 ad 1992: mutex_enter(&pp->pr_lock);
1.88 chs 1993: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1994: r = pool_chk_page(pp, label, ph);
1995: if (r) {
1996: goto out;
1997: }
1998: }
1999: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2000: r = pool_chk_page(pp, label, ph);
2001: if (r) {
1.3 pk 2002: goto out;
2003: }
1.88 chs 2004: }
2005: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2006: r = pool_chk_page(pp, label, ph);
2007: if (r) {
1.3 pk 2008: goto out;
2009: }
2010: }
1.88 chs 2011:
1.3 pk 2012: out:
1.134 ad 2013: mutex_exit(&pp->pr_lock);
1.3 pk 2014: return (r);
1.43 thorpej 2015: }
2016:
2017: /*
2018: * pool_cache_init:
2019: *
2020: * Initialize a pool cache.
1.134 ad 2021: */
2022: pool_cache_t
2023: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
2024: const char *wchan, struct pool_allocator *palloc, int ipl,
2025: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
2026: {
2027: pool_cache_t pc;
2028:
2029: pc = pool_get(&cache_pool, PR_WAITOK);
2030: if (pc == NULL)
2031: return NULL;
2032:
2033: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
2034: palloc, ipl, ctor, dtor, arg);
2035:
2036: return pc;
2037: }
2038:
2039: /*
2040: * pool_cache_bootstrap:
1.43 thorpej 2041: *
1.134 ad 2042: * Kernel-private version of pool_cache_init(). The caller
2043: * provides initial storage.
1.43 thorpej 2044: */
2045: void
1.134 ad 2046: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
2047: u_int align_offset, u_int flags, const char *wchan,
2048: struct pool_allocator *palloc, int ipl,
2049: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 2050: void *arg)
2051: {
1.134 ad 2052: CPU_INFO_ITERATOR cii;
2053: struct cpu_info *ci;
2054: struct pool *pp;
2055:
2056: pp = &pc->pc_pool;
2057: if (palloc == NULL && ipl == IPL_NONE)
2058: palloc = &pool_allocator_nointr;
2059: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.43 thorpej 2060:
1.138 ad 2061: /*
2062: * XXXAD hack to prevent IP input processing from blocking.
2063: */
2064: if (ipl == IPL_SOFTNET) {
2065: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, IPL_VM);
2066: } else {
2067: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
2068: }
1.43 thorpej 2069:
1.134 ad 2070: if (ctor == NULL) {
2071: ctor = (int (*)(void *, void *, int))nullop;
2072: }
2073: if (dtor == NULL) {
2074: dtor = (void (*)(void *, void *))nullop;
2075: }
1.43 thorpej 2076:
1.134 ad 2077: pc->pc_emptygroups = NULL;
2078: pc->pc_fullgroups = NULL;
2079: pc->pc_partgroups = NULL;
1.43 thorpej 2080: pc->pc_ctor = ctor;
2081: pc->pc_dtor = dtor;
2082: pc->pc_arg = arg;
1.134 ad 2083: pc->pc_hits = 0;
1.48 thorpej 2084: pc->pc_misses = 0;
1.134 ad 2085: pc->pc_nempty = 0;
2086: pc->pc_npart = 0;
2087: pc->pc_nfull = 0;
2088: pc->pc_contended = 0;
2089: pc->pc_refcnt = 0;
1.136 yamt 2090: pc->pc_freecheck = NULL;
1.134 ad 2091:
2092: /* Allocate per-CPU caches. */
2093: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
2094: pc->pc_ncpu = 0;
1.139 ad 2095: if (ncpu < 2) {
1.137 ad 2096: /* XXX For sparc: boot CPU is not attached yet. */
2097: pool_cache_cpu_init1(curcpu(), pc);
2098: } else {
2099: for (CPU_INFO_FOREACH(cii, ci)) {
2100: pool_cache_cpu_init1(ci, pc);
2101: }
1.134 ad 2102: }
2103:
2104: if (__predict_true(!cold)) {
2105: mutex_enter(&pp->pr_lock);
2106: pp->pr_cache = pc;
2107: mutex_exit(&pp->pr_lock);
2108: mutex_enter(&pool_head_lock);
2109: LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist);
2110: mutex_exit(&pool_head_lock);
2111: } else {
2112: pp->pr_cache = pc;
2113: LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist);
2114: }
1.43 thorpej 2115: }
2116:
2117: /*
2118: * pool_cache_destroy:
2119: *
2120: * Destroy a pool cache.
2121: */
2122: void
1.134 ad 2123: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 2124: {
1.134 ad 2125: struct pool *pp = &pc->pc_pool;
2126: pool_cache_cpu_t *cc;
2127: pcg_t *pcg;
2128: int i;
2129:
2130: /* Remove it from the global list. */
2131: mutex_enter(&pool_head_lock);
2132: while (pc->pc_refcnt != 0)
2133: cv_wait(&pool_busy, &pool_head_lock);
2134: LIST_REMOVE(pc, pc_cachelist);
2135: mutex_exit(&pool_head_lock);
1.43 thorpej 2136:
2137: /* First, invalidate the entire cache. */
2138: pool_cache_invalidate(pc);
2139:
1.134 ad 2140: /* Disassociate it from the pool. */
2141: mutex_enter(&pp->pr_lock);
2142: pp->pr_cache = NULL;
2143: mutex_exit(&pp->pr_lock);
2144:
2145: /* Destroy per-CPU data */
2146: for (i = 0; i < MAXCPUS; i++) {
2147: if ((cc = pc->pc_cpus[i]) == NULL)
2148: continue;
2149: if ((pcg = cc->cc_current) != NULL) {
2150: pcg->pcg_next = NULL;
2151: pool_cache_invalidate_groups(pc, pcg);
2152: }
2153: if ((pcg = cc->cc_previous) != NULL) {
2154: pcg->pcg_next = NULL;
2155: pool_cache_invalidate_groups(pc, pcg);
2156: }
2157: if (cc != &pc->pc_cpu0)
2158: pool_put(&cache_cpu_pool, cc);
2159: }
2160:
2161: /* Finally, destroy it. */
2162: mutex_destroy(&pc->pc_lock);
2163: pool_destroy(pp);
2164: pool_put(&cache_pool, pc);
2165: }
2166:
2167: /*
2168: * pool_cache_cpu_init1:
2169: *
2170: * Called for each pool_cache whenever a new CPU is attached.
2171: */
2172: static void
2173: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
2174: {
2175: pool_cache_cpu_t *cc;
1.137 ad 2176: int index;
1.134 ad 2177:
1.137 ad 2178: index = ci->ci_index;
2179:
2180: KASSERT(index < MAXCPUS);
1.134 ad 2181: KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0);
2182:
1.137 ad 2183: if ((cc = pc->pc_cpus[index]) != NULL) {
2184: KASSERT(cc->cc_cpuindex == index);
1.134 ad 2185: return;
2186: }
2187:
2188: /*
2189: * The first CPU is 'free'. This needs to be the case for
2190: * bootstrap - we may not be able to allocate yet.
2191: */
2192: if (pc->pc_ncpu == 0) {
2193: cc = &pc->pc_cpu0;
2194: pc->pc_ncpu = 1;
2195: } else {
2196: mutex_enter(&pc->pc_lock);
2197: pc->pc_ncpu++;
2198: mutex_exit(&pc->pc_lock);
2199: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
2200: }
2201:
2202: cc->cc_ipl = pc->pc_pool.pr_ipl;
2203: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
2204: cc->cc_cache = pc;
1.137 ad 2205: cc->cc_cpuindex = index;
1.134 ad 2206: cc->cc_hits = 0;
2207: cc->cc_misses = 0;
2208: cc->cc_current = NULL;
2209: cc->cc_previous = NULL;
2210:
1.137 ad 2211: pc->pc_cpus[index] = cc;
1.43 thorpej 2212: }
2213:
1.134 ad 2214: /*
2215: * pool_cache_cpu_init:
2216: *
2217: * Called whenever a new CPU is attached.
2218: */
2219: void
2220: pool_cache_cpu_init(struct cpu_info *ci)
1.43 thorpej 2221: {
1.134 ad 2222: pool_cache_t pc;
2223:
2224: mutex_enter(&pool_head_lock);
2225: LIST_FOREACH(pc, &pool_cache_head, pc_cachelist) {
2226: pc->pc_refcnt++;
2227: mutex_exit(&pool_head_lock);
1.43 thorpej 2228:
1.134 ad 2229: pool_cache_cpu_init1(ci, pc);
1.43 thorpej 2230:
1.134 ad 2231: mutex_enter(&pool_head_lock);
2232: pc->pc_refcnt--;
2233: cv_broadcast(&pool_busy);
2234: }
2235: mutex_exit(&pool_head_lock);
1.43 thorpej 2236: }
2237:
1.134 ad 2238: /*
2239: * pool_cache_reclaim:
2240: *
2241: * Reclaim memory from a pool cache.
2242: */
2243: bool
2244: pool_cache_reclaim(pool_cache_t pc)
1.43 thorpej 2245: {
2246:
1.134 ad 2247: return pool_reclaim(&pc->pc_pool);
2248: }
1.43 thorpej 2249:
1.136 yamt 2250: static void
2251: pool_cache_destruct_object1(pool_cache_t pc, void *object)
2252: {
2253:
2254: (*pc->pc_dtor)(pc->pc_arg, object);
2255: pool_put(&pc->pc_pool, object);
2256: }
2257:
1.134 ad 2258: /*
2259: * pool_cache_destruct_object:
2260: *
2261: * Force destruction of an object and its release back into
2262: * the pool.
2263: */
2264: void
2265: pool_cache_destruct_object(pool_cache_t pc, void *object)
2266: {
2267:
1.136 yamt 2268: FREECHECK_IN(&pc->pc_freecheck, object);
2269:
2270: pool_cache_destruct_object1(pc, object);
1.43 thorpej 2271: }
2272:
1.134 ad 2273: /*
2274: * pool_cache_invalidate_groups:
2275: *
2276: * Invalidate a chain of groups and destruct all objects.
2277: */
1.102 chs 2278: static void
1.134 ad 2279: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1.102 chs 2280: {
1.134 ad 2281: void *object;
2282: pcg_t *next;
2283: int i;
2284:
2285: for (; pcg != NULL; pcg = next) {
2286: next = pcg->pcg_next;
2287:
2288: for (i = 0; i < pcg->pcg_avail; i++) {
2289: object = pcg->pcg_objects[i].pcgo_va;
1.136 yamt 2290: pool_cache_destruct_object1(pc, object);
1.134 ad 2291: }
1.102 chs 2292:
2293: pool_put(&pcgpool, pcg);
2294: }
2295: }
2296:
1.43 thorpej 2297: /*
1.134 ad 2298: * pool_cache_invalidate:
1.43 thorpej 2299: *
1.134 ad 2300: * Invalidate a pool cache (destruct and release all of the
2301: * cached objects). Does not reclaim objects from the pool.
1.43 thorpej 2302: */
1.134 ad 2303: void
2304: pool_cache_invalidate(pool_cache_t pc)
2305: {
2306: pcg_t *full, *empty, *part;
2307:
2308: mutex_enter(&pc->pc_lock);
2309: full = pc->pc_fullgroups;
2310: empty = pc->pc_emptygroups;
2311: part = pc->pc_partgroups;
2312: pc->pc_fullgroups = NULL;
2313: pc->pc_emptygroups = NULL;
2314: pc->pc_partgroups = NULL;
2315: pc->pc_nfull = 0;
2316: pc->pc_nempty = 0;
2317: pc->pc_npart = 0;
2318: mutex_exit(&pc->pc_lock);
2319:
2320: pool_cache_invalidate_groups(pc, full);
2321: pool_cache_invalidate_groups(pc, empty);
2322: pool_cache_invalidate_groups(pc, part);
2323: }
2324:
2325: void
2326: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2327: {
2328:
2329: pool_set_drain_hook(&pc->pc_pool, fn, arg);
2330: }
2331:
2332: void
2333: pool_cache_setlowat(pool_cache_t pc, int n)
2334: {
2335:
2336: pool_setlowat(&pc->pc_pool, n);
2337: }
2338:
2339: void
2340: pool_cache_sethiwat(pool_cache_t pc, int n)
2341: {
2342:
2343: pool_sethiwat(&pc->pc_pool, n);
2344: }
2345:
2346: void
2347: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2348: {
2349:
2350: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2351: }
2352:
2353: static inline pool_cache_cpu_t *
2354: pool_cache_cpu_enter(pool_cache_t pc, int *s)
2355: {
2356: pool_cache_cpu_t *cc;
2357:
2358: /*
2359: * Prevent other users of the cache from accessing our
2360: * CPU-local data. To avoid touching shared state, we
2361: * pull the neccessary information from CPU local data.
2362: */
1.137 ad 2363: crit_enter();
2364: cc = pc->pc_cpus[curcpu()->ci_index];
1.134 ad 2365: KASSERT(cc->cc_cache == pc);
1.137 ad 2366: if (cc->cc_ipl != IPL_NONE) {
1.134 ad 2367: *s = splraiseipl(cc->cc_iplcookie);
2368: }
2369: KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0);
2370:
2371: return cc;
2372: }
2373:
2374: static inline void
2375: pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s)
2376: {
2377:
2378: /* No longer need exclusive access to the per-CPU data. */
1.137 ad 2379: if (cc->cc_ipl != IPL_NONE) {
1.134 ad 2380: splx(*s);
2381: }
1.137 ad 2382: crit_exit();
1.134 ad 2383: }
2384:
2385: #if __GNUC_PREREQ__(3, 0)
2386: __attribute ((noinline))
2387: #endif
2388: pool_cache_cpu_t *
2389: pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp,
2390: paddr_t *pap, int flags)
1.43 thorpej 2391: {
1.134 ad 2392: pcg_t *pcg, *cur;
2393: uint64_t ncsw;
2394: pool_cache_t pc;
1.43 thorpej 2395: void *object;
1.58 thorpej 2396:
1.134 ad 2397: pc = cc->cc_cache;
2398: cc->cc_misses++;
1.43 thorpej 2399:
1.134 ad 2400: /*
2401: * Nothing was available locally. Try and grab a group
2402: * from the cache.
2403: */
2404: if (!mutex_tryenter(&pc->pc_lock)) {
2405: ncsw = curlwp->l_ncsw;
2406: mutex_enter(&pc->pc_lock);
2407: pc->pc_contended++;
1.43 thorpej 2408:
1.134 ad 2409: /*
2410: * If we context switched while locking, then
2411: * our view of the per-CPU data is invalid:
2412: * retry.
2413: */
2414: if (curlwp->l_ncsw != ncsw) {
2415: mutex_exit(&pc->pc_lock);
2416: pool_cache_cpu_exit(cc, s);
2417: return pool_cache_cpu_enter(pc, s);
1.43 thorpej 2418: }
1.102 chs 2419: }
1.43 thorpej 2420:
1.134 ad 2421: if ((pcg = pc->pc_fullgroups) != NULL) {
1.43 thorpej 2422: /*
1.134 ad 2423: * If there's a full group, release our empty
2424: * group back to the cache. Install the full
2425: * group as cc_current and return.
1.43 thorpej 2426: */
1.134 ad 2427: if ((cur = cc->cc_current) != NULL) {
2428: KASSERT(cur->pcg_avail == 0);
2429: cur->pcg_next = pc->pc_emptygroups;
2430: pc->pc_emptygroups = cur;
2431: pc->pc_nempty++;
1.87 thorpej 2432: }
1.134 ad 2433: KASSERT(pcg->pcg_avail == PCG_NOBJECTS);
2434: cc->cc_current = pcg;
2435: pc->pc_fullgroups = pcg->pcg_next;
2436: pc->pc_hits++;
2437: pc->pc_nfull--;
2438: mutex_exit(&pc->pc_lock);
2439: return cc;
2440: }
2441:
2442: /*
2443: * Nothing available locally or in cache. Take the slow
2444: * path: fetch a new object from the pool and construct
2445: * it.
2446: */
2447: pc->pc_misses++;
2448: mutex_exit(&pc->pc_lock);
2449: pool_cache_cpu_exit(cc, s);
2450:
2451: object = pool_get(&pc->pc_pool, flags);
2452: *objectp = object;
2453: if (object == NULL)
2454: return NULL;
1.125 ad 2455:
1.134 ad 2456: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
2457: pool_put(&pc->pc_pool, object);
2458: *objectp = NULL;
2459: return NULL;
1.43 thorpej 2460: }
2461:
1.134 ad 2462: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2463: (pc->pc_pool.pr_align - 1)) == 0);
1.43 thorpej 2464:
1.134 ad 2465: if (pap != NULL) {
2466: #ifdef POOL_VTOPHYS
2467: *pap = POOL_VTOPHYS(object);
2468: #else
2469: *pap = POOL_PADDR_INVALID;
2470: #endif
1.102 chs 2471: }
1.43 thorpej 2472:
1.125 ad 2473: FREECHECK_OUT(&pc->pc_freecheck, object);
1.134 ad 2474: return NULL;
1.43 thorpej 2475: }
2476:
2477: /*
1.134 ad 2478: * pool_cache_get{,_paddr}:
1.43 thorpej 2479: *
1.134 ad 2480: * Get an object from a pool cache (optionally returning
2481: * the physical address of the object).
1.43 thorpej 2482: */
1.134 ad 2483: void *
2484: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.43 thorpej 2485: {
1.134 ad 2486: pool_cache_cpu_t *cc;
2487: pcg_t *pcg;
2488: void *object;
1.60 thorpej 2489: int s;
1.43 thorpej 2490:
1.134 ad 2491: #ifdef LOCKDEBUG
2492: if (flags & PR_WAITOK)
2493: ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)");
2494: #endif
1.125 ad 2495:
1.134 ad 2496: cc = pool_cache_cpu_enter(pc, &s);
2497: do {
2498: /* Try and allocate an object from the current group. */
2499: pcg = cc->cc_current;
2500: if (pcg != NULL && pcg->pcg_avail > 0) {
2501: object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2502: if (pap != NULL)
2503: *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
2504: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
2505: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
2506: KASSERT(object != NULL);
2507: cc->cc_hits++;
2508: pool_cache_cpu_exit(cc, &s);
2509: FREECHECK_OUT(&pc->pc_freecheck, object);
2510: return object;
1.43 thorpej 2511: }
2512:
2513: /*
1.134 ad 2514: * That failed. If the previous group isn't empty, swap
2515: * it with the current group and allocate from there.
1.43 thorpej 2516: */
1.134 ad 2517: pcg = cc->cc_previous;
2518: if (pcg != NULL && pcg->pcg_avail > 0) {
2519: cc->cc_previous = cc->cc_current;
2520: cc->cc_current = pcg;
2521: continue;
1.43 thorpej 2522: }
2523:
1.134 ad 2524: /*
2525: * Can't allocate from either group: try the slow path.
2526: * If get_slow() allocated an object for us, or if
2527: * no more objects are available, it will return NULL.
2528: * Otherwise, we need to retry.
2529: */
2530: cc = pool_cache_get_slow(cc, &s, &object, pap, flags);
2531: } while (cc != NULL);
1.43 thorpej 2532:
1.134 ad 2533: return object;
1.51 thorpej 2534: }
2535:
1.134 ad 2536: #if __GNUC_PREREQ__(3, 0)
2537: __attribute ((noinline))
2538: #endif
2539: pool_cache_cpu_t *
2540: pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa)
1.51 thorpej 2541: {
1.134 ad 2542: pcg_t *pcg, *cur;
2543: uint64_t ncsw;
2544: pool_cache_t pc;
1.51 thorpej 2545:
1.134 ad 2546: pc = cc->cc_cache;
2547: cc->cc_misses++;
1.43 thorpej 2548:
1.134 ad 2549: /*
2550: * No free slots locally. Try to grab an empty, unused
2551: * group from the cache.
2552: */
2553: if (!mutex_tryenter(&pc->pc_lock)) {
2554: ncsw = curlwp->l_ncsw;
2555: mutex_enter(&pc->pc_lock);
2556: pc->pc_contended++;
1.102 chs 2557:
1.134 ad 2558: /*
2559: * If we context switched while locking, then
2560: * our view of the per-CPU data is invalid:
2561: * retry.
2562: */
2563: if (curlwp->l_ncsw != ncsw) {
2564: mutex_exit(&pc->pc_lock);
2565: pool_cache_cpu_exit(cc, s);
2566: return pool_cache_cpu_enter(pc, s);
2567: }
2568: }
1.130 ad 2569:
1.134 ad 2570: if ((pcg = pc->pc_emptygroups) != NULL) {
2571: /*
2572: * If there's a empty group, release our full
2573: * group back to the cache. Install the empty
2574: * group as cc_current and return.
2575: */
2576: if ((cur = cc->cc_current) != NULL) {
2577: KASSERT(cur->pcg_avail == PCG_NOBJECTS);
2578: cur->pcg_next = pc->pc_fullgroups;
2579: pc->pc_fullgroups = cur;
2580: pc->pc_nfull++;
1.102 chs 2581: }
1.134 ad 2582: KASSERT(pcg->pcg_avail == 0);
2583: cc->cc_current = pcg;
2584: pc->pc_emptygroups = pcg->pcg_next;
2585: pc->pc_hits++;
2586: pc->pc_nempty--;
2587: mutex_exit(&pc->pc_lock);
2588: return cc;
1.102 chs 2589: }
1.105 christos 2590:
1.134 ad 2591: /*
2592: * Nothing available locally or in cache. Take the
2593: * slow path and try to allocate a new group that we
2594: * can release to.
2595: */
2596: pc->pc_misses++;
2597: mutex_exit(&pc->pc_lock);
2598: pool_cache_cpu_exit(cc, s);
1.105 christos 2599:
1.134 ad 2600: /*
2601: * If we can't allocate a new group, just throw the
2602: * object away.
2603: */
2604: pcg = pool_get(&pcgpool, PR_NOWAIT);
2605: if (pcg == NULL) {
2606: pool_cache_destruct_object(pc, object);
2607: return NULL;
2608: }
2609: #ifdef DIAGNOSTIC
2610: memset(pcg, 0, sizeof(*pcg));
2611: #else
2612: pcg->pcg_avail = 0;
2613: #endif
1.105 christos 2614:
1.134 ad 2615: /*
2616: * Add the empty group to the cache and try again.
2617: */
2618: mutex_enter(&pc->pc_lock);
2619: pcg->pcg_next = pc->pc_emptygroups;
2620: pc->pc_emptygroups = pcg;
2621: pc->pc_nempty++;
2622: mutex_exit(&pc->pc_lock);
1.103 chs 2623:
1.134 ad 2624: return pool_cache_cpu_enter(pc, s);
2625: }
1.102 chs 2626:
1.43 thorpej 2627: /*
1.134 ad 2628: * pool_cache_put{,_paddr}:
1.43 thorpej 2629: *
1.134 ad 2630: * Put an object back to the pool cache (optionally caching the
2631: * physical address of the object).
1.43 thorpej 2632: */
1.101 thorpej 2633: void
1.134 ad 2634: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2635: {
1.134 ad 2636: pool_cache_cpu_t *cc;
2637: pcg_t *pcg;
2638: int s;
1.101 thorpej 2639:
1.134 ad 2640: FREECHECK_IN(&pc->pc_freecheck, object);
1.101 thorpej 2641:
1.134 ad 2642: cc = pool_cache_cpu_enter(pc, &s);
2643: do {
2644: /* If the current group isn't full, release it there. */
2645: pcg = cc->cc_current;
2646: if (pcg != NULL && pcg->pcg_avail < PCG_NOBJECTS) {
2647: KASSERT(pcg->pcg_objects[pcg->pcg_avail].pcgo_va
2648: == NULL);
2649: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2650: pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2651: pcg->pcg_avail++;
2652: cc->cc_hits++;
2653: pool_cache_cpu_exit(cc, &s);
2654: return;
2655: }
1.43 thorpej 2656:
1.134 ad 2657: /*
2658: * That failed. If the previous group is empty, swap
2659: * it with the current group and try again.
2660: */
2661: pcg = cc->cc_previous;
2662: if (pcg != NULL && pcg->pcg_avail == 0) {
2663: cc->cc_previous = cc->cc_current;
2664: cc->cc_current = pcg;
2665: continue;
2666: }
1.43 thorpej 2667:
1.134 ad 2668: /*
2669: * Can't free to either group: try the slow path.
2670: * If put_slow() releases the object for us, it
2671: * will return NULL. Otherwise we need to retry.
2672: */
2673: cc = pool_cache_put_slow(cc, &s, object, pa);
2674: } while (cc != NULL);
1.43 thorpej 2675: }
2676:
2677: /*
1.134 ad 2678: * pool_cache_xcall:
1.43 thorpej 2679: *
1.134 ad 2680: * Transfer objects from the per-CPU cache to the global cache.
2681: * Run within a cross-call thread.
1.43 thorpej 2682: */
2683: static void
1.134 ad 2684: pool_cache_xcall(pool_cache_t pc)
1.43 thorpej 2685: {
1.134 ad 2686: pool_cache_cpu_t *cc;
2687: pcg_t *prev, *cur, **list;
2688: int s = 0; /* XXXgcc */
2689:
2690: cc = pool_cache_cpu_enter(pc, &s);
2691: cur = cc->cc_current;
2692: cc->cc_current = NULL;
2693: prev = cc->cc_previous;
2694: cc->cc_previous = NULL;
2695: pool_cache_cpu_exit(cc, &s);
2696:
2697: /*
2698: * XXXSMP Go to splvm to prevent kernel_lock from being taken,
2699: * because locks at IPL_SOFTXXX are still spinlocks. Does not
2700: * apply to IPL_SOFTBIO. Cross-call threads do not take the
2701: * kernel_lock.
1.101 thorpej 2702: */
1.134 ad 2703: s = splvm();
2704: mutex_enter(&pc->pc_lock);
2705: if (cur != NULL) {
2706: if (cur->pcg_avail == PCG_NOBJECTS) {
2707: list = &pc->pc_fullgroups;
2708: pc->pc_nfull++;
2709: } else if (cur->pcg_avail == 0) {
2710: list = &pc->pc_emptygroups;
2711: pc->pc_nempty++;
2712: } else {
2713: list = &pc->pc_partgroups;
2714: pc->pc_npart++;
2715: }
2716: cur->pcg_next = *list;
2717: *list = cur;
2718: }
2719: if (prev != NULL) {
2720: if (prev->pcg_avail == PCG_NOBJECTS) {
2721: list = &pc->pc_fullgroups;
2722: pc->pc_nfull++;
2723: } else if (prev->pcg_avail == 0) {
2724: list = &pc->pc_emptygroups;
2725: pc->pc_nempty++;
2726: } else {
2727: list = &pc->pc_partgroups;
2728: pc->pc_npart++;
2729: }
2730: prev->pcg_next = *list;
2731: *list = prev;
2732: }
2733: mutex_exit(&pc->pc_lock);
2734: splx(s);
1.3 pk 2735: }
1.66 thorpej 2736:
2737: /*
2738: * Pool backend allocators.
2739: *
2740: * Each pool has a backend allocator that handles allocation, deallocation,
2741: * and any additional draining that might be needed.
2742: *
2743: * We provide two standard allocators:
2744: *
2745: * pool_allocator_kmem - the default when no allocator is specified
2746: *
2747: * pool_allocator_nointr - used for pools that will not be accessed
2748: * in interrupt context.
2749: */
2750: void *pool_page_alloc(struct pool *, int);
2751: void pool_page_free(struct pool *, void *);
2752:
1.112 bjh21 2753: #ifdef POOL_SUBPAGE
2754: struct pool_allocator pool_allocator_kmem_fullpage = {
2755: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2756: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2757: };
2758: #else
1.66 thorpej 2759: struct pool_allocator pool_allocator_kmem = {
2760: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2761: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2762: };
1.112 bjh21 2763: #endif
1.66 thorpej 2764:
2765: void *pool_page_alloc_nointr(struct pool *, int);
2766: void pool_page_free_nointr(struct pool *, void *);
2767:
1.112 bjh21 2768: #ifdef POOL_SUBPAGE
2769: struct pool_allocator pool_allocator_nointr_fullpage = {
2770: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2771: .pa_backingmapptr = &kernel_map,
1.112 bjh21 2772: };
2773: #else
1.66 thorpej 2774: struct pool_allocator pool_allocator_nointr = {
2775: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2776: .pa_backingmapptr = &kernel_map,
1.66 thorpej 2777: };
1.112 bjh21 2778: #endif
1.66 thorpej 2779:
2780: #ifdef POOL_SUBPAGE
2781: void *pool_subpage_alloc(struct pool *, int);
2782: void pool_subpage_free(struct pool *, void *);
2783:
1.112 bjh21 2784: struct pool_allocator pool_allocator_kmem = {
2785: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2786: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2787: };
2788:
2789: void *pool_subpage_alloc_nointr(struct pool *, int);
2790: void pool_subpage_free_nointr(struct pool *, void *);
2791:
2792: struct pool_allocator pool_allocator_nointr = {
2793: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2794: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2795: };
2796: #endif /* POOL_SUBPAGE */
2797:
1.117 yamt 2798: static void *
2799: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2800: {
1.117 yamt 2801: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2802: void *res;
2803:
1.117 yamt 2804: res = (*pa->pa_alloc)(pp, flags);
2805: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2806: /*
1.117 yamt 2807: * We only run the drain hook here if PR_NOWAIT.
2808: * In other cases, the hook will be run in
2809: * pool_reclaim().
1.66 thorpej 2810: */
1.117 yamt 2811: if (pp->pr_drain_hook != NULL) {
2812: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2813: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2814: }
1.117 yamt 2815: }
2816: return res;
1.66 thorpej 2817: }
2818:
1.117 yamt 2819: static void
1.66 thorpej 2820: pool_allocator_free(struct pool *pp, void *v)
2821: {
2822: struct pool_allocator *pa = pp->pr_alloc;
2823:
2824: (*pa->pa_free)(pp, v);
2825: }
2826:
2827: void *
1.124 yamt 2828: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2829: {
1.127 thorpej 2830: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2831:
1.100 yamt 2832: return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
1.66 thorpej 2833: }
2834:
2835: void
1.124 yamt 2836: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2837: {
2838:
1.98 yamt 2839: uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2840: }
2841:
2842: static void *
1.124 yamt 2843: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2844: {
1.127 thorpej 2845: bool waitok = (flags & PR_WAITOK) ? true : false;
1.98 yamt 2846:
1.100 yamt 2847: return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
1.98 yamt 2848: }
2849:
2850: static void
1.124 yamt 2851: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2852: {
2853:
1.100 yamt 2854: uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
1.66 thorpej 2855: }
2856:
2857: #ifdef POOL_SUBPAGE
2858: /* Sub-page allocator, for machines with large hardware pages. */
2859: void *
2860: pool_subpage_alloc(struct pool *pp, int flags)
2861: {
1.134 ad 2862: return pool_get(&psppool, flags);
1.66 thorpej 2863: }
2864:
2865: void
2866: pool_subpage_free(struct pool *pp, void *v)
2867: {
2868: pool_put(&psppool, v);
2869: }
2870:
2871: /* We don't provide a real nointr allocator. Maybe later. */
2872: void *
1.112 bjh21 2873: pool_subpage_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2874: {
2875:
2876: return (pool_subpage_alloc(pp, flags));
2877: }
2878:
2879: void
1.112 bjh21 2880: pool_subpage_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2881: {
2882:
2883: pool_subpage_free(pp, v);
2884: }
1.112 bjh21 2885: #endif /* POOL_SUBPAGE */
1.66 thorpej 2886: void *
1.124 yamt 2887: pool_page_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2888: {
1.127 thorpej 2889: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2890:
1.100 yamt 2891: return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
1.66 thorpej 2892: }
2893:
2894: void
1.124 yamt 2895: pool_page_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2896: {
2897:
1.98 yamt 2898: uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
1.66 thorpej 2899: }
1.141 ! yamt 2900:
! 2901: #if defined(DDB)
! 2902: static bool
! 2903: pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
! 2904: {
! 2905:
! 2906: return (uintptr_t)ph->ph_page <= addr &&
! 2907: addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
! 2908: }
! 2909:
! 2910: void
! 2911: pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
! 2912: {
! 2913: struct pool *pp;
! 2914:
! 2915: LIST_FOREACH(pp, &pool_head, pr_poollist) {
! 2916: struct pool_item_header *ph;
! 2917: uintptr_t item;
! 2918:
! 2919: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
! 2920: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
! 2921: if (pool_in_page(pp, ph, addr)) {
! 2922: goto found;
! 2923: }
! 2924: }
! 2925: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
! 2926: if (pool_in_page(pp, ph, addr)) {
! 2927: goto found;
! 2928: }
! 2929: }
! 2930: continue;
! 2931: } else {
! 2932: ph = pr_find_pagehead_noalign(pp, (void *)addr);
! 2933: if (ph == NULL || !pool_in_page(pp, ph, addr)) {
! 2934: continue;
! 2935: }
! 2936: }
! 2937: found:
! 2938: item = (uintptr_t)ph->ph_page + ph->ph_off;
! 2939: item = item + rounddown(addr - item, pp->pr_size);
! 2940: (*pr)("%p is %p+%zu from POOL '%s'\n",
! 2941: (void *)addr, item, (size_t)(addr - item),
! 2942: pp->pr_wchan);
! 2943: }
! 2944: }
! 2945: #endif /* defined(DDB) */
CVSweb <webmaster@jp.NetBSD.org>