Annotation of src/sys/kern/subr_pool.c, Revision 1.137
1.137 ! ad 1: /* $NetBSD: subr_pool.c,v 1.136 2007/11/14 11:14:13 yamt Exp $ */
1.1 pk 2:
3: /*-
1.134 ad 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.134 ad 9: * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.137 ! ad 41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.136 2007/11/14 11:14:13 yamt Exp $");
1.24 scottr 42:
1.25 thorpej 43: #include "opt_pool.h"
1.24 scottr 44: #include "opt_poollog.h"
1.28 thorpej 45: #include "opt_lockdebug.h"
1.1 pk 46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
1.135 yamt 49: #include <sys/bitops.h>
1.1 pk 50: #include <sys/proc.h>
51: #include <sys/errno.h>
52: #include <sys/kernel.h>
53: #include <sys/malloc.h>
54: #include <sys/lock.h>
55: #include <sys/pool.h>
1.20 thorpej 56: #include <sys/syslog.h>
1.125 ad 57: #include <sys/debug.h>
1.134 ad 58: #include <sys/lockdebug.h>
59: #include <sys/xcall.h>
60: #include <sys/cpu.h>
1.3 pk 61:
62: #include <uvm/uvm.h>
63:
1.1 pk 64: /*
65: * Pool resource management utility.
1.3 pk 66: *
1.88 chs 67: * Memory is allocated in pages which are split into pieces according to
68: * the pool item size. Each page is kept on one of three lists in the
69: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
70: * for empty, full and partially-full pages respectively. The individual
71: * pool items are on a linked list headed by `ph_itemlist' in each page
72: * header. The memory for building the page list is either taken from
73: * the allocated pages themselves (for small pool items) or taken from
74: * an internal pool of page headers (`phpool').
1.1 pk 75: */
76:
1.3 pk 77: /* List of all pools */
1.102 chs 78: LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
1.3 pk 79:
1.134 ad 80: /* List of all caches. */
81: LIST_HEAD(,pool_cache) pool_cache_head =
82: LIST_HEAD_INITIALIZER(pool_cache_head);
83:
1.3 pk 84: /* Private pool for page header structures */
1.97 yamt 85: #define PHPOOL_MAX 8
86: static struct pool phpool[PHPOOL_MAX];
1.135 yamt 87: #define PHPOOL_FREELIST_NELEM(idx) \
88: (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
1.3 pk 89:
1.62 bjh21 90: #ifdef POOL_SUBPAGE
91: /* Pool of subpages for use by normal pools. */
92: static struct pool psppool;
93: #endif
94:
1.117 yamt 95: static SLIST_HEAD(, pool_allocator) pa_deferinitq =
96: SLIST_HEAD_INITIALIZER(pa_deferinitq);
97:
1.98 yamt 98: static void *pool_page_alloc_meta(struct pool *, int);
99: static void pool_page_free_meta(struct pool *, void *);
100:
101: /* allocator for pool metadata */
1.134 ad 102: struct pool_allocator pool_allocator_meta = {
1.117 yamt 103: pool_page_alloc_meta, pool_page_free_meta,
104: .pa_backingmapptr = &kmem_map,
1.98 yamt 105: };
106:
1.3 pk 107: /* # of seconds to retain page after last use */
108: int pool_inactive_time = 10;
109:
110: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 111: static struct pool *drainpp;
112:
1.134 ad 113: /* This lock protects both pool_head and drainpp. */
114: static kmutex_t pool_head_lock;
115: static kcondvar_t pool_busy;
1.3 pk 116:
1.135 yamt 117: typedef uint32_t pool_item_bitmap_t;
118: #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
119: #define BITMAP_MASK (BITMAP_SIZE - 1)
1.99 yamt 120:
1.3 pk 121: struct pool_item_header {
122: /* Page headers */
1.88 chs 123: LIST_ENTRY(pool_item_header)
1.3 pk 124: ph_pagelist; /* pool page list */
1.88 chs 125: SPLAY_ENTRY(pool_item_header)
126: ph_node; /* Off-page page headers */
1.128 christos 127: void * ph_page; /* this page's address */
1.3 pk 128: struct timeval ph_time; /* last referenced */
1.135 yamt 129: uint16_t ph_nmissing; /* # of chunks in use */
1.97 yamt 130: union {
131: /* !PR_NOTOUCH */
132: struct {
1.102 chs 133: LIST_HEAD(, pool_item)
1.97 yamt 134: phu_itemlist; /* chunk list for this page */
135: } phu_normal;
136: /* PR_NOTOUCH */
137: struct {
1.135 yamt 138: uint16_t phu_off; /* start offset in page */
139: pool_item_bitmap_t phu_bitmap[];
1.97 yamt 140: } phu_notouch;
141: } ph_u;
1.3 pk 142: };
1.97 yamt 143: #define ph_itemlist ph_u.phu_normal.phu_itemlist
144: #define ph_off ph_u.phu_notouch.phu_off
1.135 yamt 145: #define ph_bitmap ph_u.phu_notouch.phu_bitmap
1.3 pk 146:
1.1 pk 147: struct pool_item {
1.3 pk 148: #ifdef DIAGNOSTIC
1.82 thorpej 149: u_int pi_magic;
1.33 chs 150: #endif
1.134 ad 151: #define PI_MAGIC 0xdeaddeadU
1.3 pk 152: /* Other entries use only this list entry */
1.102 chs 153: LIST_ENTRY(pool_item) pi_list;
1.3 pk 154: };
155:
1.53 thorpej 156: #define POOL_NEEDS_CATCHUP(pp) \
157: ((pp)->pr_nitems < (pp)->pr_minitems)
158:
1.43 thorpej 159: /*
160: * Pool cache management.
161: *
162: * Pool caches provide a way for constructed objects to be cached by the
163: * pool subsystem. This can lead to performance improvements by avoiding
164: * needless object construction/destruction; it is deferred until absolutely
165: * necessary.
166: *
1.134 ad 167: * Caches are grouped into cache groups. Each cache group references up
168: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
169: * object from the pool, it calls the object's constructor and places it
170: * into a cache group. When a cache group frees an object back to the
171: * pool, it first calls the object's destructor. This allows the object
172: * to persist in constructed form while freed to the cache.
173: *
174: * The pool references each cache, so that when a pool is drained by the
175: * pagedaemon, it can drain each individual cache as well. Each time a
176: * cache is drained, the most idle cache group is freed to the pool in
177: * its entirety.
1.43 thorpej 178: *
179: * Pool caches are layed on top of pools. By layering them, we can avoid
180: * the complexity of cache management for pools which would not benefit
181: * from it.
182: */
183:
184: static struct pool pcgpool;
1.134 ad 185: static struct pool cache_pool;
186: static struct pool cache_cpu_pool;
1.3 pk 187:
1.134 ad 188: static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *,
189: void *, paddr_t);
190: static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *,
191: void **, paddr_t *, int);
192: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
193: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
194: static void pool_cache_xcall(pool_cache_t);
1.3 pk 195:
1.42 thorpej 196: static int pool_catchup(struct pool *);
1.128 christos 197: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 198: struct pool_item_header *);
1.88 chs 199: static void pool_update_curpage(struct pool *);
1.66 thorpej 200:
1.113 yamt 201: static int pool_grow(struct pool *, int);
1.117 yamt 202: static void *pool_allocator_alloc(struct pool *, int);
203: static void pool_allocator_free(struct pool *, void *);
1.3 pk 204:
1.97 yamt 205: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 206: void (*)(const char *, ...));
1.42 thorpej 207: static void pool_print1(struct pool *, const char *,
208: void (*)(const char *, ...));
1.3 pk 209:
1.88 chs 210: static int pool_chk_page(struct pool *, const char *,
211: struct pool_item_header *);
212:
1.3 pk 213: /*
1.52 thorpej 214: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 215: */
216: struct pool_log {
217: const char *pl_file;
218: long pl_line;
219: int pl_action;
1.25 thorpej 220: #define PRLOG_GET 1
221: #define PRLOG_PUT 2
1.3 pk 222: void *pl_addr;
1.1 pk 223: };
224:
1.86 matt 225: #ifdef POOL_DIAGNOSTIC
1.3 pk 226: /* Number of entries in pool log buffers */
1.17 thorpej 227: #ifndef POOL_LOGSIZE
228: #define POOL_LOGSIZE 10
229: #endif
230:
231: int pool_logsize = POOL_LOGSIZE;
1.1 pk 232:
1.110 perry 233: static inline void
1.42 thorpej 234: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 235: {
236: int n = pp->pr_curlogentry;
237: struct pool_log *pl;
238:
1.20 thorpej 239: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 240: return;
241:
242: /*
243: * Fill in the current entry. Wrap around and overwrite
244: * the oldest entry if necessary.
245: */
246: pl = &pp->pr_log[n];
247: pl->pl_file = file;
248: pl->pl_line = line;
249: pl->pl_action = action;
250: pl->pl_addr = v;
251: if (++n >= pp->pr_logsize)
252: n = 0;
253: pp->pr_curlogentry = n;
254: }
255:
256: static void
1.42 thorpej 257: pr_printlog(struct pool *pp, struct pool_item *pi,
258: void (*pr)(const char *, ...))
1.3 pk 259: {
260: int i = pp->pr_logsize;
261: int n = pp->pr_curlogentry;
262:
1.20 thorpej 263: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 264: return;
265:
266: /*
267: * Print all entries in this pool's log.
268: */
269: while (i-- > 0) {
270: struct pool_log *pl = &pp->pr_log[n];
271: if (pl->pl_action != 0) {
1.25 thorpej 272: if (pi == NULL || pi == pl->pl_addr) {
273: (*pr)("\tlog entry %d:\n", i);
274: (*pr)("\t\taction = %s, addr = %p\n",
275: pl->pl_action == PRLOG_GET ? "get" : "put",
276: pl->pl_addr);
277: (*pr)("\t\tfile: %s at line %lu\n",
278: pl->pl_file, pl->pl_line);
279: }
1.3 pk 280: }
281: if (++n >= pp->pr_logsize)
282: n = 0;
283: }
284: }
1.25 thorpej 285:
1.110 perry 286: static inline void
1.42 thorpej 287: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 288: {
289:
1.34 thorpej 290: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 291: printf("pool %s: reentrancy at file %s line %ld\n",
292: pp->pr_wchan, file, line);
293: printf(" previous entry at file %s line %ld\n",
294: pp->pr_entered_file, pp->pr_entered_line);
295: panic("pr_enter");
296: }
297:
298: pp->pr_entered_file = file;
299: pp->pr_entered_line = line;
300: }
301:
1.110 perry 302: static inline void
1.42 thorpej 303: pr_leave(struct pool *pp)
1.25 thorpej 304: {
305:
1.34 thorpej 306: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 307: printf("pool %s not entered?\n", pp->pr_wchan);
308: panic("pr_leave");
309: }
310:
311: pp->pr_entered_file = NULL;
312: pp->pr_entered_line = 0;
313: }
314:
1.110 perry 315: static inline void
1.42 thorpej 316: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 317: {
318:
319: if (pp->pr_entered_file != NULL)
320: (*pr)("\n\tcurrently entered from file %s line %ld\n",
321: pp->pr_entered_file, pp->pr_entered_line);
322: }
1.3 pk 323: #else
1.25 thorpej 324: #define pr_log(pp, v, action, file, line)
325: #define pr_printlog(pp, pi, pr)
326: #define pr_enter(pp, file, line)
327: #define pr_leave(pp)
328: #define pr_enter_check(pp, pr)
1.59 thorpej 329: #endif /* POOL_DIAGNOSTIC */
1.3 pk 330:
1.135 yamt 331: static inline unsigned int
1.97 yamt 332: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
333: const void *v)
334: {
335: const char *cp = v;
1.135 yamt 336: unsigned int idx;
1.97 yamt 337:
338: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 339: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 340: KASSERT(idx < pp->pr_itemsperpage);
341: return idx;
342: }
343:
1.110 perry 344: static inline void
1.97 yamt 345: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
346: void *obj)
347: {
1.135 yamt 348: unsigned int idx = pr_item_notouch_index(pp, ph, obj);
349: pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
350: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
1.97 yamt 351:
1.135 yamt 352: KASSERT((*bitmap & mask) == 0);
353: *bitmap |= mask;
1.97 yamt 354: }
355:
1.110 perry 356: static inline void *
1.97 yamt 357: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
358: {
1.135 yamt 359: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
360: unsigned int idx;
361: int i;
1.97 yamt 362:
1.135 yamt 363: for (i = 0; ; i++) {
364: int bit;
1.97 yamt 365:
1.135 yamt 366: KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
367: bit = ffs32(bitmap[i]);
368: if (bit) {
369: pool_item_bitmap_t mask;
370:
371: bit--;
372: idx = (i * BITMAP_SIZE) + bit;
373: mask = 1 << bit;
374: KASSERT((bitmap[i] & mask) != 0);
375: bitmap[i] &= ~mask;
376: break;
377: }
378: }
379: KASSERT(idx < pp->pr_itemsperpage);
1.128 christos 380: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 381: }
382:
1.135 yamt 383: static inline void
384: pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
385: {
386: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
387: const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
388: int i;
389:
390: for (i = 0; i < n; i++) {
391: bitmap[i] = (pool_item_bitmap_t)-1;
392: }
393: }
394:
1.110 perry 395: static inline int
1.88 chs 396: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
397: {
1.121 yamt 398:
399: /*
400: * we consider pool_item_header with smaller ph_page bigger.
401: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
402: */
403:
1.88 chs 404: if (a->ph_page < b->ph_page)
1.121 yamt 405: return (1);
406: else if (a->ph_page > b->ph_page)
1.88 chs 407: return (-1);
408: else
409: return (0);
410: }
411:
412: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
413: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
414:
1.3 pk 415: /*
1.121 yamt 416: * Return the pool page header based on item address.
1.3 pk 417: */
1.110 perry 418: static inline struct pool_item_header *
1.121 yamt 419: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 420: {
1.88 chs 421: struct pool_item_header *ph, tmp;
1.3 pk 422:
1.121 yamt 423: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.128 christos 424: tmp.ph_page = (void *)(uintptr_t)v;
1.121 yamt 425: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
426: if (ph == NULL) {
427: ph = SPLAY_ROOT(&pp->pr_phtree);
428: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
429: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
430: }
431: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
432: }
433: } else {
1.128 christos 434: void *page =
435: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 436:
437: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 438: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 439: } else {
440: tmp.ph_page = page;
441: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
442: }
443: }
1.3 pk 444:
1.121 yamt 445: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 446: ((char *)ph->ph_page <= (char *)v &&
447: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 448: return ph;
1.3 pk 449: }
450:
1.101 thorpej 451: static void
452: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
453: {
454: struct pool_item_header *ph;
455:
456: while ((ph = LIST_FIRST(pq)) != NULL) {
457: LIST_REMOVE(ph, ph_pagelist);
458: pool_allocator_free(pp, ph->ph_page);
1.134 ad 459: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 460: pool_put(pp->pr_phpool, ph);
461: }
462: }
463:
1.3 pk 464: /*
465: * Remove a page from the pool.
466: */
1.110 perry 467: static inline void
1.61 chs 468: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
469: struct pool_pagelist *pq)
1.3 pk 470: {
471:
1.134 ad 472: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 473:
1.3 pk 474: /*
1.7 thorpej 475: * If the page was idle, decrement the idle page count.
1.3 pk 476: */
1.6 thorpej 477: if (ph->ph_nmissing == 0) {
478: #ifdef DIAGNOSTIC
479: if (pp->pr_nidle == 0)
480: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 481: if (pp->pr_nitems < pp->pr_itemsperpage)
482: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 483: #endif
484: pp->pr_nidle--;
485: }
1.7 thorpej 486:
1.20 thorpej 487: pp->pr_nitems -= pp->pr_itemsperpage;
488:
1.7 thorpej 489: /*
1.101 thorpej 490: * Unlink the page from the pool and queue it for release.
1.7 thorpej 491: */
1.88 chs 492: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 493: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
494: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 495: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
496:
1.7 thorpej 497: pp->pr_npages--;
498: pp->pr_npagefree++;
1.6 thorpej 499:
1.88 chs 500: pool_update_curpage(pp);
1.3 pk 501: }
502:
1.126 thorpej 503: static bool
1.117 yamt 504: pa_starved_p(struct pool_allocator *pa)
505: {
506:
507: if (pa->pa_backingmap != NULL) {
508: return vm_map_starved_p(pa->pa_backingmap);
509: }
1.127 thorpej 510: return false;
1.117 yamt 511: }
512:
513: static int
1.124 yamt 514: pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
1.117 yamt 515: {
516: struct pool *pp = obj;
517: struct pool_allocator *pa = pp->pr_alloc;
518:
519: KASSERT(&pp->pr_reclaimerentry == ce);
520: pool_reclaim(pp);
521: if (!pa_starved_p(pa)) {
522: return CALLBACK_CHAIN_ABORT;
523: }
524: return CALLBACK_CHAIN_CONTINUE;
525: }
526:
527: static void
528: pool_reclaim_register(struct pool *pp)
529: {
530: struct vm_map *map = pp->pr_alloc->pa_backingmap;
531: int s;
532:
533: if (map == NULL) {
534: return;
535: }
536:
537: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
538: callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
539: &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
540: splx(s);
541: }
542:
543: static void
544: pool_reclaim_unregister(struct pool *pp)
545: {
546: struct vm_map *map = pp->pr_alloc->pa_backingmap;
547: int s;
548:
549: if (map == NULL) {
550: return;
551: }
552:
553: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
554: callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
555: &pp->pr_reclaimerentry);
556: splx(s);
557: }
558:
559: static void
560: pa_reclaim_register(struct pool_allocator *pa)
561: {
562: struct vm_map *map = *pa->pa_backingmapptr;
563: struct pool *pp;
564:
565: KASSERT(pa->pa_backingmap == NULL);
566: if (map == NULL) {
567: SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
568: return;
569: }
570: pa->pa_backingmap = map;
571: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
572: pool_reclaim_register(pp);
573: }
574: }
575:
1.3 pk 576: /*
1.94 simonb 577: * Initialize all the pools listed in the "pools" link set.
578: */
579: void
1.117 yamt 580: pool_subsystem_init(void)
1.94 simonb 581: {
1.117 yamt 582: struct pool_allocator *pa;
1.94 simonb 583: __link_set_decl(pools, struct link_pool_init);
584: struct link_pool_init * const *pi;
585:
1.134 ad 586: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
587: cv_init(&pool_busy, "poolbusy");
588:
1.94 simonb 589: __link_set_foreach(pi, pools)
590: pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
591: (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
1.129 ad 592: (*pi)->palloc, (*pi)->ipl);
1.117 yamt 593:
594: while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
595: KASSERT(pa->pa_backingmapptr != NULL);
596: KASSERT(*pa->pa_backingmapptr != NULL);
597: SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
598: pa_reclaim_register(pa);
599: }
1.134 ad 600:
601: pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE,
602: 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);
603:
604: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE,
605: 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
1.94 simonb 606: }
607:
608: /*
1.3 pk 609: * Initialize the given pool resource structure.
610: *
611: * We export this routine to allow other kernel parts to declare
612: * static pools that must be initialized before malloc() is available.
613: */
614: void
1.42 thorpej 615: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.129 ad 616: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 617: {
1.116 simonb 618: #ifdef DEBUG
619: struct pool *pp1;
620: #endif
1.92 enami 621: size_t trysize, phsize;
1.134 ad 622: int off, slack;
1.3 pk 623:
1.116 simonb 624: #ifdef DEBUG
625: /*
626: * Check that the pool hasn't already been initialised and
627: * added to the list of all pools.
628: */
629: LIST_FOREACH(pp1, &pool_head, pr_poollist) {
630: if (pp == pp1)
631: panic("pool_init: pool %s already initialised",
632: wchan);
633: }
634: #endif
635:
1.25 thorpej 636: #ifdef POOL_DIAGNOSTIC
637: /*
638: * Always log if POOL_DIAGNOSTIC is defined.
639: */
640: if (pool_logsize != 0)
641: flags |= PR_LOGGING;
642: #endif
643:
1.66 thorpej 644: if (palloc == NULL)
645: palloc = &pool_allocator_kmem;
1.112 bjh21 646: #ifdef POOL_SUBPAGE
647: if (size > palloc->pa_pagesz) {
648: if (palloc == &pool_allocator_kmem)
649: palloc = &pool_allocator_kmem_fullpage;
650: else if (palloc == &pool_allocator_nointr)
651: palloc = &pool_allocator_nointr_fullpage;
652: }
1.66 thorpej 653: #endif /* POOL_SUBPAGE */
654: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
1.112 bjh21 655: if (palloc->pa_pagesz == 0)
1.66 thorpej 656: palloc->pa_pagesz = PAGE_SIZE;
657:
658: TAILQ_INIT(&palloc->pa_list);
659:
1.134 ad 660: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 661: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
662: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.117 yamt 663:
664: if (palloc->pa_backingmapptr != NULL) {
665: pa_reclaim_register(palloc);
666: }
1.66 thorpej 667: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 668: }
1.3 pk 669:
670: if (align == 0)
671: align = ALIGN(1);
1.14 thorpej 672:
1.120 yamt 673: if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
1.14 thorpej 674: size = sizeof(struct pool_item);
1.3 pk 675:
1.78 thorpej 676: size = roundup(size, align);
1.66 thorpej 677: #ifdef DIAGNOSTIC
678: if (size > palloc->pa_pagesz)
1.121 yamt 679: panic("pool_init: pool item size (%zu) too large", size);
1.66 thorpej 680: #endif
1.35 pk 681:
1.3 pk 682: /*
683: * Initialize the pool structure.
684: */
1.88 chs 685: LIST_INIT(&pp->pr_emptypages);
686: LIST_INIT(&pp->pr_fullpages);
687: LIST_INIT(&pp->pr_partpages);
1.134 ad 688: pp->pr_cache = NULL;
1.3 pk 689: pp->pr_curpage = NULL;
690: pp->pr_npages = 0;
691: pp->pr_minitems = 0;
692: pp->pr_minpages = 0;
693: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 694: pp->pr_roflags = flags;
695: pp->pr_flags = 0;
1.35 pk 696: pp->pr_size = size;
1.3 pk 697: pp->pr_align = align;
698: pp->pr_wchan = wchan;
1.66 thorpej 699: pp->pr_alloc = palloc;
1.20 thorpej 700: pp->pr_nitems = 0;
701: pp->pr_nout = 0;
702: pp->pr_hardlimit = UINT_MAX;
703: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 704: pp->pr_hardlimit_ratecap.tv_sec = 0;
705: pp->pr_hardlimit_ratecap.tv_usec = 0;
706: pp->pr_hardlimit_warning_last.tv_sec = 0;
707: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 708: pp->pr_drain_hook = NULL;
709: pp->pr_drain_hook_arg = NULL;
1.125 ad 710: pp->pr_freecheck = NULL;
1.3 pk 711:
712: /*
713: * Decide whether to put the page header off page to avoid
1.92 enami 714: * wasting too large a part of the page or too big item.
715: * Off-page page headers go on a hash table, so we can match
716: * a returned item with its header based on the page address.
717: * We use 1/16 of the page size and about 8 times of the item
718: * size as the threshold (XXX: tune)
719: *
720: * However, we'll put the header into the page if we can put
721: * it without wasting any items.
722: *
723: * Silently enforce `0 <= ioff < align'.
1.3 pk 724: */
1.92 enami 725: pp->pr_itemoffset = ioff %= align;
726: /* See the comment below about reserved bytes. */
727: trysize = palloc->pa_pagesz - ((align - ioff) % align);
728: phsize = ALIGN(sizeof(struct pool_item_header));
1.121 yamt 729: if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 730: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
731: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 732: /* Use the end of the page for the page header */
1.20 thorpej 733: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 734: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 735: } else {
1.3 pk 736: /* The page header will be taken from our page header pool */
737: pp->pr_phoffset = 0;
1.66 thorpej 738: off = palloc->pa_pagesz;
1.88 chs 739: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 740: }
1.1 pk 741:
1.3 pk 742: /*
743: * Alignment is to take place at `ioff' within the item. This means
744: * we must reserve up to `align - 1' bytes on the page to allow
745: * appropriate positioning of each item.
746: */
747: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 748: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 749: if ((pp->pr_roflags & PR_NOTOUCH)) {
750: int idx;
751:
752: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
753: idx++) {
754: /* nothing */
755: }
756: if (idx >= PHPOOL_MAX) {
757: /*
758: * if you see this panic, consider to tweak
759: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
760: */
761: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
762: pp->pr_wchan, pp->pr_itemsperpage);
763: }
764: pp->pr_phpool = &phpool[idx];
765: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
766: pp->pr_phpool = &phpool[0];
767: }
768: #if defined(DIAGNOSTIC)
769: else {
770: pp->pr_phpool = NULL;
771: }
772: #endif
1.3 pk 773:
774: /*
775: * Use the slack between the chunks and the page header
776: * for "cache coloring".
777: */
778: slack = off - pp->pr_itemsperpage * pp->pr_size;
779: pp->pr_maxcolor = (slack / align) * align;
780: pp->pr_curcolor = 0;
781:
782: pp->pr_nget = 0;
783: pp->pr_nfail = 0;
784: pp->pr_nput = 0;
785: pp->pr_npagealloc = 0;
786: pp->pr_npagefree = 0;
1.1 pk 787: pp->pr_hiwat = 0;
1.8 thorpej 788: pp->pr_nidle = 0;
1.134 ad 789: pp->pr_refcnt = 0;
1.3 pk 790:
1.59 thorpej 791: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 792: if (flags & PR_LOGGING) {
793: if (kmem_map == NULL ||
794: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
795: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 796: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 797: pp->pr_curlogentry = 0;
798: pp->pr_logsize = pool_logsize;
799: }
1.59 thorpej 800: #endif
1.25 thorpej 801:
802: pp->pr_entered_file = NULL;
803: pp->pr_entered_line = 0;
1.3 pk 804:
1.134 ad 805: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
806: cv_init(&pp->pr_cv, wchan);
807: pp->pr_ipl = ipl;
1.1 pk 808:
1.3 pk 809: /*
1.43 thorpej 810: * Initialize private page header pool and cache magazine pool if we
811: * haven't done so yet.
1.23 thorpej 812: * XXX LOCKING.
1.3 pk 813: */
1.97 yamt 814: if (phpool[0].pr_size == 0) {
815: int idx;
816: for (idx = 0; idx < PHPOOL_MAX; idx++) {
817: static char phpool_names[PHPOOL_MAX][6+1+6+1];
818: int nelem;
819: size_t sz;
820:
821: nelem = PHPOOL_FREELIST_NELEM(idx);
822: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
823: "phpool-%d", nelem);
824: sz = sizeof(struct pool_item_header);
825: if (nelem) {
1.135 yamt 826: sz = offsetof(struct pool_item_header,
827: ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
1.97 yamt 828: }
829: pool_init(&phpool[idx], sz, 0, 0, 0,
1.129 ad 830: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.97 yamt 831: }
1.62 bjh21 832: #ifdef POOL_SUBPAGE
833: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.129 ad 834: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
1.62 bjh21 835: #endif
1.134 ad 836: pool_init(&pcgpool, sizeof(pcg_t), CACHE_LINE_SIZE, 0, 0,
837: "cachegrp", &pool_allocator_meta, IPL_VM);
1.1 pk 838: }
839:
1.134 ad 840: if (__predict_true(!cold)) {
841: /* Insert into the list of all pools. */
842: mutex_enter(&pool_head_lock);
843: LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
844: mutex_exit(&pool_head_lock);
845:
846: /* Insert this into the list of pools using this allocator. */
847: mutex_enter(&palloc->pa_lock);
848: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
849: mutex_exit(&palloc->pa_lock);
850: } else {
851: LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
852: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
853: }
1.66 thorpej 854:
1.117 yamt 855: pool_reclaim_register(pp);
1.1 pk 856: }
857:
858: /*
859: * De-commision a pool resource.
860: */
861: void
1.42 thorpej 862: pool_destroy(struct pool *pp)
1.1 pk 863: {
1.101 thorpej 864: struct pool_pagelist pq;
1.3 pk 865: struct pool_item_header *ph;
1.43 thorpej 866:
1.101 thorpej 867: /* Remove from global pool list */
1.134 ad 868: mutex_enter(&pool_head_lock);
869: while (pp->pr_refcnt != 0)
870: cv_wait(&pool_busy, &pool_head_lock);
1.102 chs 871: LIST_REMOVE(pp, pr_poollist);
1.101 thorpej 872: if (drainpp == pp)
873: drainpp = NULL;
1.134 ad 874: mutex_exit(&pool_head_lock);
1.101 thorpej 875:
876: /* Remove this pool from its allocator's list of pools. */
1.117 yamt 877: pool_reclaim_unregister(pp);
1.134 ad 878: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 879: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.134 ad 880: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 881:
1.134 ad 882: mutex_enter(&pp->pr_lock);
1.101 thorpej 883:
1.134 ad 884: KASSERT(pp->pr_cache == NULL);
1.3 pk 885:
886: #ifdef DIAGNOSTIC
1.20 thorpej 887: if (pp->pr_nout != 0) {
1.25 thorpej 888: pr_printlog(pp, NULL, printf);
1.80 provos 889: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 890: pp->pr_nout);
1.3 pk 891: }
892: #endif
1.1 pk 893:
1.101 thorpej 894: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
895: KASSERT(LIST_EMPTY(&pp->pr_partpages));
896:
1.3 pk 897: /* Remove all pages */
1.101 thorpej 898: LIST_INIT(&pq);
1.88 chs 899: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 900: pr_rmpage(pp, ph, &pq);
901:
1.134 ad 902: mutex_exit(&pp->pr_lock);
1.3 pk 903:
1.101 thorpej 904: pr_pagelist_free(pp, &pq);
1.3 pk 905:
1.59 thorpej 906: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 907: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 908: free(pp->pr_log, M_TEMP);
1.59 thorpej 909: #endif
1.134 ad 910:
911: cv_destroy(&pp->pr_cv);
912: mutex_destroy(&pp->pr_lock);
1.1 pk 913: }
914:
1.68 thorpej 915: void
916: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
917: {
918:
919: /* XXX no locking -- must be used just after pool_init() */
920: #ifdef DIAGNOSTIC
921: if (pp->pr_drain_hook != NULL)
922: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
923: #endif
924: pp->pr_drain_hook = fn;
925: pp->pr_drain_hook_arg = arg;
926: }
927:
1.88 chs 928: static struct pool_item_header *
1.128 christos 929: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 930: {
931: struct pool_item_header *ph;
932:
933: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 934: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.134 ad 935: else
1.97 yamt 936: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 937:
938: return (ph);
939: }
1.1 pk 940:
941: /*
1.134 ad 942: * Grab an item from the pool.
1.1 pk 943: */
1.3 pk 944: void *
1.59 thorpej 945: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 946: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 947: #else
948: pool_get(struct pool *pp, int flags)
949: #endif
1.1 pk 950: {
951: struct pool_item *pi;
1.3 pk 952: struct pool_item_header *ph;
1.55 thorpej 953: void *v;
1.1 pk 954:
1.2 pk 955: #ifdef DIAGNOSTIC
1.95 atatat 956: if (__predict_false(pp->pr_itemsperpage == 0))
957: panic("pool_get: pool %p: pr_itemsperpage is zero, "
958: "pool not initialized?", pp);
1.84 thorpej 959: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 960: (flags & PR_WAITOK) != 0))
1.77 matt 961: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 962:
1.102 chs 963: #endif /* DIAGNOSTIC */
1.58 thorpej 964: #ifdef LOCKDEBUG
965: if (flags & PR_WAITOK)
1.119 yamt 966: ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");
1.56 sommerfe 967: #endif
1.1 pk 968:
1.134 ad 969: mutex_enter(&pp->pr_lock);
1.25 thorpej 970: pr_enter(pp, file, line);
1.20 thorpej 971:
972: startover:
973: /*
974: * Check to see if we've reached the hard limit. If we have,
975: * and we can wait, then wait until an item has been returned to
976: * the pool.
977: */
978: #ifdef DIAGNOSTIC
1.34 thorpej 979: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 980: pr_leave(pp);
1.134 ad 981: mutex_exit(&pp->pr_lock);
1.20 thorpej 982: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
983: }
984: #endif
1.34 thorpej 985: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 986: if (pp->pr_drain_hook != NULL) {
987: /*
988: * Since the drain hook is going to free things
989: * back to the pool, unlock, call the hook, re-lock,
990: * and check the hardlimit condition again.
991: */
992: pr_leave(pp);
1.134 ad 993: mutex_exit(&pp->pr_lock);
1.68 thorpej 994: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.134 ad 995: mutex_enter(&pp->pr_lock);
1.68 thorpej 996: pr_enter(pp, file, line);
997: if (pp->pr_nout < pp->pr_hardlimit)
998: goto startover;
999: }
1000:
1.29 sommerfe 1001: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 1002: /*
1003: * XXX: A warning isn't logged in this case. Should
1004: * it be?
1005: */
1006: pp->pr_flags |= PR_WANTED;
1.25 thorpej 1007: pr_leave(pp);
1.134 ad 1008: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.25 thorpej 1009: pr_enter(pp, file, line);
1.20 thorpej 1010: goto startover;
1011: }
1.31 thorpej 1012:
1013: /*
1014: * Log a message that the hard limit has been hit.
1015: */
1016: if (pp->pr_hardlimit_warning != NULL &&
1017: ratecheck(&pp->pr_hardlimit_warning_last,
1018: &pp->pr_hardlimit_ratecap))
1019: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 1020:
1021: pp->pr_nfail++;
1022:
1.25 thorpej 1023: pr_leave(pp);
1.134 ad 1024: mutex_exit(&pp->pr_lock);
1.20 thorpej 1025: return (NULL);
1026: }
1027:
1.3 pk 1028: /*
1029: * The convention we use is that if `curpage' is not NULL, then
1030: * it points at a non-empty bucket. In particular, `curpage'
1031: * never points at a page header which has PR_PHINPAGE set and
1032: * has no items in its bucket.
1033: */
1.20 thorpej 1034: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 1035: int error;
1036:
1.20 thorpej 1037: #ifdef DIAGNOSTIC
1038: if (pp->pr_nitems != 0) {
1.134 ad 1039: mutex_exit(&pp->pr_lock);
1.20 thorpej 1040: printf("pool_get: %s: curpage NULL, nitems %u\n",
1041: pp->pr_wchan, pp->pr_nitems);
1.80 provos 1042: panic("pool_get: nitems inconsistent");
1.20 thorpej 1043: }
1044: #endif
1045:
1.21 thorpej 1046: /*
1047: * Call the back-end page allocator for more memory.
1048: * Release the pool lock, as the back-end page allocator
1049: * may block.
1050: */
1.25 thorpej 1051: pr_leave(pp);
1.113 yamt 1052: error = pool_grow(pp, flags);
1053: pr_enter(pp, file, line);
1054: if (error != 0) {
1.21 thorpej 1055: /*
1.55 thorpej 1056: * We were unable to allocate a page or item
1057: * header, but we released the lock during
1058: * allocation, so perhaps items were freed
1059: * back to the pool. Check for this case.
1.21 thorpej 1060: */
1061: if (pp->pr_curpage != NULL)
1062: goto startover;
1.15 pk 1063:
1.117 yamt 1064: pp->pr_nfail++;
1.25 thorpej 1065: pr_leave(pp);
1.134 ad 1066: mutex_exit(&pp->pr_lock);
1.117 yamt 1067: return (NULL);
1.1 pk 1068: }
1.3 pk 1069:
1.20 thorpej 1070: /* Start the allocation process over. */
1071: goto startover;
1.3 pk 1072: }
1.97 yamt 1073: if (pp->pr_roflags & PR_NOTOUCH) {
1074: #ifdef DIAGNOSTIC
1075: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1076: pr_leave(pp);
1.134 ad 1077: mutex_exit(&pp->pr_lock);
1.97 yamt 1078: panic("pool_get: %s: page empty", pp->pr_wchan);
1079: }
1080: #endif
1081: v = pr_item_notouch_get(pp, ph);
1082: #ifdef POOL_DIAGNOSTIC
1083: pr_log(pp, v, PRLOG_GET, file, line);
1084: #endif
1085: } else {
1.102 chs 1086: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 1087: if (__predict_false(v == NULL)) {
1088: pr_leave(pp);
1.134 ad 1089: mutex_exit(&pp->pr_lock);
1.97 yamt 1090: panic("pool_get: %s: page empty", pp->pr_wchan);
1091: }
1.20 thorpej 1092: #ifdef DIAGNOSTIC
1.97 yamt 1093: if (__predict_false(pp->pr_nitems == 0)) {
1094: pr_leave(pp);
1.134 ad 1095: mutex_exit(&pp->pr_lock);
1.97 yamt 1096: printf("pool_get: %s: items on itemlist, nitems %u\n",
1097: pp->pr_wchan, pp->pr_nitems);
1098: panic("pool_get: nitems inconsistent");
1099: }
1.65 enami 1100: #endif
1.56 sommerfe 1101:
1.65 enami 1102: #ifdef POOL_DIAGNOSTIC
1.97 yamt 1103: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 1104: #endif
1.3 pk 1105:
1.65 enami 1106: #ifdef DIAGNOSTIC
1.97 yamt 1107: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1108: pr_printlog(pp, pi, printf);
1109: panic("pool_get(%s): free list modified: "
1110: "magic=%x; page %p; item addr %p\n",
1111: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1112: }
1.3 pk 1113: #endif
1114:
1.97 yamt 1115: /*
1116: * Remove from item list.
1117: */
1.102 chs 1118: LIST_REMOVE(pi, pi_list);
1.97 yamt 1119: }
1.20 thorpej 1120: pp->pr_nitems--;
1121: pp->pr_nout++;
1.6 thorpej 1122: if (ph->ph_nmissing == 0) {
1123: #ifdef DIAGNOSTIC
1.34 thorpej 1124: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 1125: panic("pool_get: nidle inconsistent");
1126: #endif
1127: pp->pr_nidle--;
1.88 chs 1128:
1129: /*
1130: * This page was previously empty. Move it to the list of
1131: * partially-full pages. This page is already curpage.
1132: */
1133: LIST_REMOVE(ph, ph_pagelist);
1134: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 1135: }
1.3 pk 1136: ph->ph_nmissing++;
1.97 yamt 1137: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 1138: #ifdef DIAGNOSTIC
1.97 yamt 1139: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 1140: !LIST_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 1141: pr_leave(pp);
1.134 ad 1142: mutex_exit(&pp->pr_lock);
1.21 thorpej 1143: panic("pool_get: %s: nmissing inconsistent",
1144: pp->pr_wchan);
1145: }
1146: #endif
1.3 pk 1147: /*
1.88 chs 1148: * This page is now full. Move it to the full list
1149: * and select a new current page.
1.3 pk 1150: */
1.88 chs 1151: LIST_REMOVE(ph, ph_pagelist);
1152: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1153: pool_update_curpage(pp);
1.1 pk 1154: }
1.3 pk 1155:
1156: pp->pr_nget++;
1.111 christos 1157: pr_leave(pp);
1.20 thorpej 1158:
1159: /*
1160: * If we have a low water mark and we are now below that low
1161: * water mark, add more items to the pool.
1162: */
1.53 thorpej 1163: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1164: /*
1165: * XXX: Should we log a warning? Should we set up a timeout
1166: * to try again in a second or so? The latter could break
1167: * a caller's assumptions about interrupt protection, etc.
1168: */
1169: }
1170:
1.134 ad 1171: mutex_exit(&pp->pr_lock);
1.125 ad 1172: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
1173: FREECHECK_OUT(&pp->pr_freecheck, v);
1.1 pk 1174: return (v);
1175: }
1176:
1177: /*
1.43 thorpej 1178: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1179: */
1.43 thorpej 1180: static void
1.101 thorpej 1181: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 1182: {
1183: struct pool_item *pi = v;
1.3 pk 1184: struct pool_item_header *ph;
1185:
1.134 ad 1186: KASSERT(mutex_owned(&pp->pr_lock));
1.125 ad 1187: FREECHECK_IN(&pp->pr_freecheck, v);
1.134 ad 1188: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 1189:
1.30 thorpej 1190: #ifdef DIAGNOSTIC
1.34 thorpej 1191: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1192: printf("pool %s: putting with none out\n",
1193: pp->pr_wchan);
1194: panic("pool_put");
1195: }
1196: #endif
1.3 pk 1197:
1.121 yamt 1198: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.25 thorpej 1199: pr_printlog(pp, NULL, printf);
1.3 pk 1200: panic("pool_put: %s: page header missing", pp->pr_wchan);
1201: }
1.28 thorpej 1202:
1.3 pk 1203: /*
1204: * Return to item list.
1205: */
1.97 yamt 1206: if (pp->pr_roflags & PR_NOTOUCH) {
1207: pr_item_notouch_put(pp, ph, v);
1208: } else {
1.2 pk 1209: #ifdef DIAGNOSTIC
1.97 yamt 1210: pi->pi_magic = PI_MAGIC;
1.3 pk 1211: #endif
1.32 chs 1212: #ifdef DEBUG
1.97 yamt 1213: {
1214: int i, *ip = v;
1.32 chs 1215:
1.97 yamt 1216: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1217: *ip++ = PI_MAGIC;
1218: }
1.32 chs 1219: }
1220: #endif
1221:
1.102 chs 1222: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 1223: }
1.79 thorpej 1224: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1225: ph->ph_nmissing--;
1226: pp->pr_nput++;
1.20 thorpej 1227: pp->pr_nitems++;
1228: pp->pr_nout--;
1.3 pk 1229:
1230: /* Cancel "pool empty" condition if it exists */
1231: if (pp->pr_curpage == NULL)
1232: pp->pr_curpage = ph;
1233:
1234: if (pp->pr_flags & PR_WANTED) {
1235: pp->pr_flags &= ~PR_WANTED;
1.15 pk 1236: if (ph->ph_nmissing == 0)
1237: pp->pr_nidle++;
1.134 ad 1238: cv_broadcast(&pp->pr_cv);
1.3 pk 1239: return;
1240: }
1241:
1242: /*
1.88 chs 1243: * If this page is now empty, do one of two things:
1.21 thorpej 1244: *
1.88 chs 1245: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1246: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1247: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1248: * CLAIM.
1.21 thorpej 1249: *
1.88 chs 1250: * (2) Otherwise, move the page to the empty page list.
1251: *
1252: * Either way, select a new current page (so we use a partially-full
1253: * page if one is available).
1.3 pk 1254: */
1255: if (ph->ph_nmissing == 0) {
1.6 thorpej 1256: pp->pr_nidle++;
1.90 thorpej 1257: if (pp->pr_npages > pp->pr_minpages &&
1258: (pp->pr_npages > pp->pr_maxpages ||
1.117 yamt 1259: pa_starved_p(pp->pr_alloc))) {
1.101 thorpej 1260: pr_rmpage(pp, ph, pq);
1.3 pk 1261: } else {
1.88 chs 1262: LIST_REMOVE(ph, ph_pagelist);
1263: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1264:
1.21 thorpej 1265: /*
1266: * Update the timestamp on the page. A page must
1267: * be idle for some period of time before it can
1268: * be reclaimed by the pagedaemon. This minimizes
1269: * ping-pong'ing for memory.
1270: */
1.118 kardel 1271: getmicrotime(&ph->ph_time);
1.1 pk 1272: }
1.88 chs 1273: pool_update_curpage(pp);
1.1 pk 1274: }
1.88 chs 1275:
1.21 thorpej 1276: /*
1.88 chs 1277: * If the page was previously completely full, move it to the
1278: * partially-full list and make it the current page. The next
1279: * allocation will get the item from this page, instead of
1280: * further fragmenting the pool.
1.21 thorpej 1281: */
1282: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1283: LIST_REMOVE(ph, ph_pagelist);
1284: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1285: pp->pr_curpage = ph;
1286: }
1.43 thorpej 1287: }
1288:
1289: /*
1.134 ad 1290: * Return resource to the pool.
1.43 thorpej 1291: */
1.59 thorpej 1292: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1293: void
1294: _pool_put(struct pool *pp, void *v, const char *file, long line)
1295: {
1.101 thorpej 1296: struct pool_pagelist pq;
1297:
1298: LIST_INIT(&pq);
1.43 thorpej 1299:
1.134 ad 1300: mutex_enter(&pp->pr_lock);
1.43 thorpej 1301: pr_enter(pp, file, line);
1302:
1.56 sommerfe 1303: pr_log(pp, v, PRLOG_PUT, file, line);
1304:
1.101 thorpej 1305: pool_do_put(pp, v, &pq);
1.21 thorpej 1306:
1.25 thorpej 1307: pr_leave(pp);
1.134 ad 1308: mutex_exit(&pp->pr_lock);
1.101 thorpej 1309:
1.102 chs 1310: pr_pagelist_free(pp, &pq);
1.1 pk 1311: }
1.57 sommerfe 1312: #undef pool_put
1.59 thorpej 1313: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1314:
1.56 sommerfe 1315: void
1316: pool_put(struct pool *pp, void *v)
1317: {
1.101 thorpej 1318: struct pool_pagelist pq;
1319:
1320: LIST_INIT(&pq);
1.56 sommerfe 1321:
1.134 ad 1322: mutex_enter(&pp->pr_lock);
1.101 thorpej 1323: pool_do_put(pp, v, &pq);
1.134 ad 1324: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1325:
1.102 chs 1326: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1327: }
1.57 sommerfe 1328:
1.59 thorpej 1329: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1330: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1331: #endif
1.74 thorpej 1332:
1333: /*
1.113 yamt 1334: * pool_grow: grow a pool by a page.
1335: *
1336: * => called with pool locked.
1337: * => unlock and relock the pool.
1338: * => return with pool locked.
1339: */
1340:
1341: static int
1342: pool_grow(struct pool *pp, int flags)
1343: {
1344: struct pool_item_header *ph = NULL;
1345: char *cp;
1346:
1.134 ad 1347: mutex_exit(&pp->pr_lock);
1.113 yamt 1348: cp = pool_allocator_alloc(pp, flags);
1349: if (__predict_true(cp != NULL)) {
1350: ph = pool_alloc_item_header(pp, cp, flags);
1351: }
1352: if (__predict_false(cp == NULL || ph == NULL)) {
1353: if (cp != NULL) {
1354: pool_allocator_free(pp, cp);
1355: }
1.134 ad 1356: mutex_enter(&pp->pr_lock);
1.113 yamt 1357: return ENOMEM;
1358: }
1359:
1.134 ad 1360: mutex_enter(&pp->pr_lock);
1.113 yamt 1361: pool_prime_page(pp, cp, ph);
1362: pp->pr_npagealloc++;
1363: return 0;
1364: }
1365:
1366: /*
1.74 thorpej 1367: * Add N items to the pool.
1368: */
1369: int
1370: pool_prime(struct pool *pp, int n)
1371: {
1.75 simonb 1372: int newpages;
1.113 yamt 1373: int error = 0;
1.74 thorpej 1374:
1.134 ad 1375: mutex_enter(&pp->pr_lock);
1.74 thorpej 1376:
1377: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1378:
1379: while (newpages-- > 0) {
1.113 yamt 1380: error = pool_grow(pp, PR_NOWAIT);
1381: if (error) {
1.74 thorpej 1382: break;
1383: }
1384: pp->pr_minpages++;
1385: }
1386:
1387: if (pp->pr_minpages >= pp->pr_maxpages)
1388: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1389:
1.134 ad 1390: mutex_exit(&pp->pr_lock);
1.113 yamt 1391: return error;
1.74 thorpej 1392: }
1.55 thorpej 1393:
1394: /*
1.3 pk 1395: * Add a page worth of items to the pool.
1.21 thorpej 1396: *
1397: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1398: */
1.55 thorpej 1399: static void
1.128 christos 1400: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1401: {
1402: struct pool_item *pi;
1.128 christos 1403: void *cp = storage;
1.125 ad 1404: const unsigned int align = pp->pr_align;
1405: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1406: int n;
1.36 pk 1407:
1.134 ad 1408: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 1409:
1.66 thorpej 1410: #ifdef DIAGNOSTIC
1.121 yamt 1411: if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1412: ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1413: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1414: #endif
1.3 pk 1415:
1416: /*
1417: * Insert page header.
1418: */
1.88 chs 1419: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1420: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1421: ph->ph_page = storage;
1422: ph->ph_nmissing = 0;
1.118 kardel 1423: getmicrotime(&ph->ph_time);
1.88 chs 1424: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1425: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1426:
1.6 thorpej 1427: pp->pr_nidle++;
1428:
1.3 pk 1429: /*
1430: * Color this page.
1431: */
1.128 christos 1432: cp = (char *)cp + pp->pr_curcolor;
1.3 pk 1433: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1434: pp->pr_curcolor = 0;
1435:
1436: /*
1437: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1438: */
1439: if (ioff != 0)
1.128 christos 1440: cp = (char *)cp + align - ioff;
1.3 pk 1441:
1.125 ad 1442: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1443:
1.3 pk 1444: /*
1445: * Insert remaining chunks on the bucket list.
1446: */
1447: n = pp->pr_itemsperpage;
1.20 thorpej 1448: pp->pr_nitems += n;
1.3 pk 1449:
1.97 yamt 1450: if (pp->pr_roflags & PR_NOTOUCH) {
1.135 yamt 1451: pr_item_notouch_init(pp, ph);
1.97 yamt 1452: } else {
1453: while (n--) {
1454: pi = (struct pool_item *)cp;
1.78 thorpej 1455:
1.97 yamt 1456: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1457:
1.97 yamt 1458: /* Insert on page list */
1.102 chs 1459: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1460: #ifdef DIAGNOSTIC
1.97 yamt 1461: pi->pi_magic = PI_MAGIC;
1.3 pk 1462: #endif
1.128 christos 1463: cp = (char *)cp + pp->pr_size;
1.125 ad 1464:
1465: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1466: }
1.3 pk 1467: }
1468:
1469: /*
1470: * If the pool was depleted, point at the new page.
1471: */
1472: if (pp->pr_curpage == NULL)
1473: pp->pr_curpage = ph;
1474:
1475: if (++pp->pr_npages > pp->pr_hiwat)
1476: pp->pr_hiwat = pp->pr_npages;
1477: }
1478:
1.20 thorpej 1479: /*
1.52 thorpej 1480: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1481: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1482: *
1.21 thorpej 1483: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1484: *
1.73 thorpej 1485: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1486: * with it locked.
1487: */
1488: static int
1.42 thorpej 1489: pool_catchup(struct pool *pp)
1.20 thorpej 1490: {
1491: int error = 0;
1492:
1.54 thorpej 1493: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1494: error = pool_grow(pp, PR_NOWAIT);
1495: if (error) {
1.20 thorpej 1496: break;
1497: }
1498: }
1.113 yamt 1499: return error;
1.20 thorpej 1500: }
1501:
1.88 chs 1502: static void
1503: pool_update_curpage(struct pool *pp)
1504: {
1505:
1506: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1507: if (pp->pr_curpage == NULL) {
1508: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1509: }
1510: }
1511:
1.3 pk 1512: void
1.42 thorpej 1513: pool_setlowat(struct pool *pp, int n)
1.3 pk 1514: {
1.15 pk 1515:
1.134 ad 1516: mutex_enter(&pp->pr_lock);
1.21 thorpej 1517:
1.3 pk 1518: pp->pr_minitems = n;
1.15 pk 1519: pp->pr_minpages = (n == 0)
1520: ? 0
1.18 thorpej 1521: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1522:
1523: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1524: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1525: /*
1526: * XXX: Should we log a warning? Should we set up a timeout
1527: * to try again in a second or so? The latter could break
1528: * a caller's assumptions about interrupt protection, etc.
1529: */
1530: }
1.21 thorpej 1531:
1.134 ad 1532: mutex_exit(&pp->pr_lock);
1.3 pk 1533: }
1534:
1535: void
1.42 thorpej 1536: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1537: {
1.15 pk 1538:
1.134 ad 1539: mutex_enter(&pp->pr_lock);
1.21 thorpej 1540:
1.15 pk 1541: pp->pr_maxpages = (n == 0)
1542: ? 0
1.18 thorpej 1543: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1544:
1.134 ad 1545: mutex_exit(&pp->pr_lock);
1.3 pk 1546: }
1547:
1.20 thorpej 1548: void
1.42 thorpej 1549: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1550: {
1551:
1.134 ad 1552: mutex_enter(&pp->pr_lock);
1.20 thorpej 1553:
1554: pp->pr_hardlimit = n;
1555: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1556: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1557: pp->pr_hardlimit_warning_last.tv_sec = 0;
1558: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1559:
1560: /*
1.21 thorpej 1561: * In-line version of pool_sethiwat(), because we don't want to
1562: * release the lock.
1.20 thorpej 1563: */
1564: pp->pr_maxpages = (n == 0)
1565: ? 0
1566: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1567:
1.134 ad 1568: mutex_exit(&pp->pr_lock);
1.20 thorpej 1569: }
1.3 pk 1570:
1571: /*
1572: * Release all complete pages that have not been used recently.
1573: */
1.66 thorpej 1574: int
1.59 thorpej 1575: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1576: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1577: #else
1578: pool_reclaim(struct pool *pp)
1579: #endif
1.3 pk 1580: {
1581: struct pool_item_header *ph, *phnext;
1.61 chs 1582: struct pool_pagelist pq;
1.102 chs 1583: struct timeval curtime, diff;
1.134 ad 1584: bool klock;
1585: int rv;
1.3 pk 1586:
1.68 thorpej 1587: if (pp->pr_drain_hook != NULL) {
1588: /*
1589: * The drain hook must be called with the pool unlocked.
1590: */
1591: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1592: }
1593:
1.134 ad 1594: /*
1595: * XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks,
1596: * and we are called from the pagedaemon without kernel_lock.
1597: * Does not apply to IPL_SOFTBIO.
1598: */
1599: if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1600: pp->pr_ipl == IPL_SOFTSERIAL) {
1601: KERNEL_LOCK(1, NULL);
1602: klock = true;
1603: } else
1604: klock = false;
1605:
1606: /* Reclaim items from the pool's cache (if any). */
1607: if (pp->pr_cache != NULL)
1608: pool_cache_invalidate(pp->pr_cache);
1609:
1610: if (mutex_tryenter(&pp->pr_lock) == 0) {
1611: if (klock) {
1612: KERNEL_UNLOCK_ONE(NULL);
1613: }
1.66 thorpej 1614: return (0);
1.134 ad 1615: }
1.25 thorpej 1616: pr_enter(pp, file, line);
1.68 thorpej 1617:
1.88 chs 1618: LIST_INIT(&pq);
1.43 thorpej 1619:
1.118 kardel 1620: getmicrotime(&curtime);
1.21 thorpej 1621:
1.88 chs 1622: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1623: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1624:
1625: /* Check our minimum page claim */
1626: if (pp->pr_npages <= pp->pr_minpages)
1627: break;
1628:
1.88 chs 1629: KASSERT(ph->ph_nmissing == 0);
1630: timersub(&curtime, &ph->ph_time, &diff);
1.117 yamt 1631: if (diff.tv_sec < pool_inactive_time
1632: && !pa_starved_p(pp->pr_alloc))
1.88 chs 1633: continue;
1.21 thorpej 1634:
1.88 chs 1635: /*
1636: * If freeing this page would put us below
1637: * the low water mark, stop now.
1638: */
1639: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1640: pp->pr_minitems)
1641: break;
1.21 thorpej 1642:
1.88 chs 1643: pr_rmpage(pp, ph, &pq);
1.3 pk 1644: }
1645:
1.25 thorpej 1646: pr_leave(pp);
1.134 ad 1647: mutex_exit(&pp->pr_lock);
1648:
1649: if (LIST_EMPTY(&pq))
1650: rv = 0;
1651: else {
1652: pr_pagelist_free(pp, &pq);
1653: rv = 1;
1654: }
1655:
1656: if (klock) {
1657: KERNEL_UNLOCK_ONE(NULL);
1658: }
1.66 thorpej 1659:
1.134 ad 1660: return (rv);
1.3 pk 1661: }
1662:
1663: /*
1.134 ad 1664: * Drain pools, one at a time. This is a two stage process;
1665: * drain_start kicks off a cross call to drain CPU-level caches
1666: * if the pool has an associated pool_cache. drain_end waits
1667: * for those cross calls to finish, and then drains the cache
1668: * (if any) and pool.
1.131 ad 1669: *
1.134 ad 1670: * Note, must never be called from interrupt context.
1.3 pk 1671: */
1672: void
1.134 ad 1673: pool_drain_start(struct pool **ppp, uint64_t *wp)
1.3 pk 1674: {
1675: struct pool *pp;
1.134 ad 1676:
1677: KASSERT(!LIST_EMPTY(&pool_head));
1.3 pk 1678:
1.61 chs 1679: pp = NULL;
1.134 ad 1680:
1681: /* Find next pool to drain, and add a reference. */
1682: mutex_enter(&pool_head_lock);
1683: do {
1684: if (drainpp == NULL) {
1685: drainpp = LIST_FIRST(&pool_head);
1686: }
1687: if (drainpp != NULL) {
1688: pp = drainpp;
1689: drainpp = LIST_NEXT(pp, pr_poollist);
1690: }
1691: /*
1692: * Skip completely idle pools. We depend on at least
1693: * one pool in the system being active.
1694: */
1695: } while (pp == NULL || pp->pr_npages == 0);
1696: pp->pr_refcnt++;
1697: mutex_exit(&pool_head_lock);
1698:
1699: /* If there is a pool_cache, drain CPU level caches. */
1700: *ppp = pp;
1701: if (pp->pr_cache != NULL) {
1702: *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall,
1703: pp->pr_cache, NULL);
1704: }
1705: }
1706:
1707: void
1708: pool_drain_end(struct pool *pp, uint64_t where)
1709: {
1710:
1711: if (pp == NULL)
1712: return;
1713:
1714: KASSERT(pp->pr_refcnt > 0);
1715:
1716: /* Wait for remote draining to complete. */
1717: if (pp->pr_cache != NULL)
1718: xc_wait(where);
1719:
1720: /* Drain the cache (if any) and pool.. */
1721: pool_reclaim(pp);
1722:
1723: /* Finally, unlock the pool. */
1724: mutex_enter(&pool_head_lock);
1725: pp->pr_refcnt--;
1726: cv_broadcast(&pool_busy);
1727: mutex_exit(&pool_head_lock);
1.3 pk 1728: }
1729:
1730: /*
1731: * Diagnostic helpers.
1732: */
1733: void
1.42 thorpej 1734: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1735: {
1736:
1.25 thorpej 1737: pool_print1(pp, modif, printf);
1.21 thorpej 1738: }
1739:
1.25 thorpej 1740: void
1.108 yamt 1741: pool_printall(const char *modif, void (*pr)(const char *, ...))
1742: {
1743: struct pool *pp;
1744:
1745: LIST_FOREACH(pp, &pool_head, pr_poollist) {
1746: pool_printit(pp, modif, pr);
1747: }
1748: }
1749:
1750: void
1.42 thorpej 1751: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1752: {
1753:
1754: if (pp == NULL) {
1755: (*pr)("Must specify a pool to print.\n");
1756: return;
1757: }
1758:
1759: pool_print1(pp, modif, pr);
1760: }
1761:
1.21 thorpej 1762: static void
1.124 yamt 1763: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1764: void (*pr)(const char *, ...))
1.88 chs 1765: {
1766: struct pool_item_header *ph;
1767: #ifdef DIAGNOSTIC
1768: struct pool_item *pi;
1769: #endif
1770:
1771: LIST_FOREACH(ph, pl, ph_pagelist) {
1772: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1773: ph->ph_page, ph->ph_nmissing,
1774: (u_long)ph->ph_time.tv_sec,
1775: (u_long)ph->ph_time.tv_usec);
1776: #ifdef DIAGNOSTIC
1.97 yamt 1777: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1778: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1779: if (pi->pi_magic != PI_MAGIC) {
1780: (*pr)("\t\t\titem %p, magic 0x%x\n",
1781: pi, pi->pi_magic);
1782: }
1.88 chs 1783: }
1784: }
1785: #endif
1786: }
1787: }
1788:
1789: static void
1.42 thorpej 1790: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1791: {
1.25 thorpej 1792: struct pool_item_header *ph;
1.134 ad 1793: pool_cache_t pc;
1794: pcg_t *pcg;
1795: pool_cache_cpu_t *cc;
1796: uint64_t cpuhit, cpumiss;
1.44 thorpej 1797: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1798: char c;
1799:
1800: while ((c = *modif++) != '\0') {
1801: if (c == 'l')
1802: print_log = 1;
1803: if (c == 'p')
1804: print_pagelist = 1;
1.44 thorpej 1805: if (c == 'c')
1806: print_cache = 1;
1.25 thorpej 1807: }
1808:
1.134 ad 1809: if ((pc = pp->pr_cache) != NULL) {
1810: (*pr)("POOL CACHE");
1811: } else {
1812: (*pr)("POOL");
1813: }
1814:
1815: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1816: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1817: pp->pr_roflags);
1.66 thorpej 1818: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1819: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1820: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1821: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1822: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1823:
1.134 ad 1824: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1825: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1826: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1827: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1828:
1829: if (print_pagelist == 0)
1830: goto skip_pagelist;
1831:
1.88 chs 1832: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1833: (*pr)("\n\tempty page list:\n");
1.97 yamt 1834: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1835: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1836: (*pr)("\n\tfull page list:\n");
1.97 yamt 1837: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1838: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1839: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1840: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1841:
1.25 thorpej 1842: if (pp->pr_curpage == NULL)
1843: (*pr)("\tno current page\n");
1844: else
1845: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1846:
1847: skip_pagelist:
1848: if (print_log == 0)
1849: goto skip_log;
1850:
1851: (*pr)("\n");
1852: if ((pp->pr_roflags & PR_LOGGING) == 0)
1853: (*pr)("\tno log\n");
1.122 christos 1854: else {
1.25 thorpej 1855: pr_printlog(pp, NULL, pr);
1.122 christos 1856: }
1.3 pk 1857:
1.25 thorpej 1858: skip_log:
1.44 thorpej 1859:
1.102 chs 1860: #define PR_GROUPLIST(pcg) \
1861: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1862: for (i = 0; i < PCG_NOBJECTS; i++) { \
1863: if (pcg->pcg_objects[i].pcgo_pa != \
1864: POOL_PADDR_INVALID) { \
1865: (*pr)("\t\t\t%p, 0x%llx\n", \
1866: pcg->pcg_objects[i].pcgo_va, \
1867: (unsigned long long) \
1868: pcg->pcg_objects[i].pcgo_pa); \
1869: } else { \
1870: (*pr)("\t\t\t%p\n", \
1871: pcg->pcg_objects[i].pcgo_va); \
1872: } \
1873: }
1874:
1.134 ad 1875: if (pc != NULL) {
1876: cpuhit = 0;
1877: cpumiss = 0;
1878: for (i = 0; i < MAXCPUS; i++) {
1879: if ((cc = pc->pc_cpus[i]) == NULL)
1880: continue;
1881: cpuhit += cc->cc_hits;
1882: cpumiss += cc->cc_misses;
1883: }
1884: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1885: (*pr)("\tcache layer hits %llu misses %llu\n",
1886: pc->pc_hits, pc->pc_misses);
1887: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1888: pc->pc_hits + pc->pc_misses - pc->pc_contended,
1889: pc->pc_contended);
1890: (*pr)("\tcache layer empty groups %u full groups %u\n",
1891: pc->pc_nempty, pc->pc_nfull);
1892: if (print_cache) {
1893: (*pr)("\tfull cache groups:\n");
1894: for (pcg = pc->pc_fullgroups; pcg != NULL;
1895: pcg = pcg->pcg_next) {
1896: PR_GROUPLIST(pcg);
1897: }
1898: (*pr)("\tempty cache groups:\n");
1899: for (pcg = pc->pc_emptygroups; pcg != NULL;
1900: pcg = pcg->pcg_next) {
1901: PR_GROUPLIST(pcg);
1902: }
1.103 chs 1903: }
1.44 thorpej 1904: }
1.102 chs 1905: #undef PR_GROUPLIST
1.44 thorpej 1906:
1.88 chs 1907: pr_enter_check(pp, pr);
1908: }
1909:
1910: static int
1911: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1912: {
1913: struct pool_item *pi;
1.128 christos 1914: void *page;
1.88 chs 1915: int n;
1916:
1.121 yamt 1917: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1918: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1919: if (page != ph->ph_page &&
1920: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1921: if (label != NULL)
1922: printf("%s: ", label);
1923: printf("pool(%p:%s): page inconsistency: page %p;"
1924: " at page head addr %p (p %p)\n", pp,
1925: pp->pr_wchan, ph->ph_page,
1926: ph, page);
1927: return 1;
1928: }
1.88 chs 1929: }
1.3 pk 1930:
1.97 yamt 1931: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1932: return 0;
1933:
1.102 chs 1934: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1935: pi != NULL;
1.102 chs 1936: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1937:
1938: #ifdef DIAGNOSTIC
1939: if (pi->pi_magic != PI_MAGIC) {
1940: if (label != NULL)
1941: printf("%s: ", label);
1942: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1943: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1944: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1945: n, pi);
1.88 chs 1946: panic("pool");
1947: }
1948: #endif
1.121 yamt 1949: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1950: continue;
1951: }
1.128 christos 1952: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1953: if (page == ph->ph_page)
1954: continue;
1955:
1956: if (label != NULL)
1957: printf("%s: ", label);
1958: printf("pool(%p:%s): page inconsistency: page %p;"
1959: " item ordinal %d; addr %p (p %p)\n", pp,
1960: pp->pr_wchan, ph->ph_page,
1961: n, pi, page);
1962: return 1;
1963: }
1964: return 0;
1.3 pk 1965: }
1966:
1.88 chs 1967:
1.3 pk 1968: int
1.42 thorpej 1969: pool_chk(struct pool *pp, const char *label)
1.3 pk 1970: {
1971: struct pool_item_header *ph;
1972: int r = 0;
1973:
1.134 ad 1974: mutex_enter(&pp->pr_lock);
1.88 chs 1975: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1976: r = pool_chk_page(pp, label, ph);
1977: if (r) {
1978: goto out;
1979: }
1980: }
1981: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1982: r = pool_chk_page(pp, label, ph);
1983: if (r) {
1.3 pk 1984: goto out;
1985: }
1.88 chs 1986: }
1987: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1988: r = pool_chk_page(pp, label, ph);
1989: if (r) {
1.3 pk 1990: goto out;
1991: }
1992: }
1.88 chs 1993:
1.3 pk 1994: out:
1.134 ad 1995: mutex_exit(&pp->pr_lock);
1.3 pk 1996: return (r);
1.43 thorpej 1997: }
1998:
1999: /*
2000: * pool_cache_init:
2001: *
2002: * Initialize a pool cache.
1.134 ad 2003: */
2004: pool_cache_t
2005: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
2006: const char *wchan, struct pool_allocator *palloc, int ipl,
2007: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
2008: {
2009: pool_cache_t pc;
2010:
2011: pc = pool_get(&cache_pool, PR_WAITOK);
2012: if (pc == NULL)
2013: return NULL;
2014:
2015: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
2016: palloc, ipl, ctor, dtor, arg);
2017:
2018: return pc;
2019: }
2020:
2021: /*
2022: * pool_cache_bootstrap:
1.43 thorpej 2023: *
1.134 ad 2024: * Kernel-private version of pool_cache_init(). The caller
2025: * provides initial storage.
1.43 thorpej 2026: */
2027: void
1.134 ad 2028: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
2029: u_int align_offset, u_int flags, const char *wchan,
2030: struct pool_allocator *palloc, int ipl,
2031: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 2032: void *arg)
2033: {
1.134 ad 2034: CPU_INFO_ITERATOR cii;
2035: struct cpu_info *ci;
2036: struct pool *pp;
2037:
2038: pp = &pc->pc_pool;
2039: if (palloc == NULL && ipl == IPL_NONE)
2040: palloc = &pool_allocator_nointr;
2041: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.43 thorpej 2042:
1.134 ad 2043: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, pp->pr_ipl);
1.43 thorpej 2044:
1.134 ad 2045: if (ctor == NULL) {
2046: ctor = (int (*)(void *, void *, int))nullop;
2047: }
2048: if (dtor == NULL) {
2049: dtor = (void (*)(void *, void *))nullop;
2050: }
1.43 thorpej 2051:
1.134 ad 2052: pc->pc_emptygroups = NULL;
2053: pc->pc_fullgroups = NULL;
2054: pc->pc_partgroups = NULL;
1.43 thorpej 2055: pc->pc_ctor = ctor;
2056: pc->pc_dtor = dtor;
2057: pc->pc_arg = arg;
1.134 ad 2058: pc->pc_hits = 0;
1.48 thorpej 2059: pc->pc_misses = 0;
1.134 ad 2060: pc->pc_nempty = 0;
2061: pc->pc_npart = 0;
2062: pc->pc_nfull = 0;
2063: pc->pc_contended = 0;
2064: pc->pc_refcnt = 0;
1.136 yamt 2065: pc->pc_freecheck = NULL;
1.134 ad 2066:
2067: /* Allocate per-CPU caches. */
2068: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
2069: pc->pc_ncpu = 0;
1.137 ! ad 2070: if (ncpu == 0) {
! 2071: /* XXX For sparc: boot CPU is not attached yet. */
! 2072: pool_cache_cpu_init1(curcpu(), pc);
! 2073: } else {
! 2074: for (CPU_INFO_FOREACH(cii, ci)) {
! 2075: pool_cache_cpu_init1(ci, pc);
! 2076: }
1.134 ad 2077: }
2078:
2079: if (__predict_true(!cold)) {
2080: mutex_enter(&pp->pr_lock);
2081: pp->pr_cache = pc;
2082: mutex_exit(&pp->pr_lock);
2083: mutex_enter(&pool_head_lock);
2084: LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist);
2085: mutex_exit(&pool_head_lock);
2086: } else {
2087: pp->pr_cache = pc;
2088: LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist);
2089: }
1.43 thorpej 2090: }
2091:
2092: /*
2093: * pool_cache_destroy:
2094: *
2095: * Destroy a pool cache.
2096: */
2097: void
1.134 ad 2098: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 2099: {
1.134 ad 2100: struct pool *pp = &pc->pc_pool;
2101: pool_cache_cpu_t *cc;
2102: pcg_t *pcg;
2103: int i;
2104:
2105: /* Remove it from the global list. */
2106: mutex_enter(&pool_head_lock);
2107: while (pc->pc_refcnt != 0)
2108: cv_wait(&pool_busy, &pool_head_lock);
2109: LIST_REMOVE(pc, pc_cachelist);
2110: mutex_exit(&pool_head_lock);
1.43 thorpej 2111:
2112: /* First, invalidate the entire cache. */
2113: pool_cache_invalidate(pc);
2114:
1.134 ad 2115: /* Disassociate it from the pool. */
2116: mutex_enter(&pp->pr_lock);
2117: pp->pr_cache = NULL;
2118: mutex_exit(&pp->pr_lock);
2119:
2120: /* Destroy per-CPU data */
2121: for (i = 0; i < MAXCPUS; i++) {
2122: if ((cc = pc->pc_cpus[i]) == NULL)
2123: continue;
2124: if ((pcg = cc->cc_current) != NULL) {
2125: pcg->pcg_next = NULL;
2126: pool_cache_invalidate_groups(pc, pcg);
2127: }
2128: if ((pcg = cc->cc_previous) != NULL) {
2129: pcg->pcg_next = NULL;
2130: pool_cache_invalidate_groups(pc, pcg);
2131: }
2132: if (cc != &pc->pc_cpu0)
2133: pool_put(&cache_cpu_pool, cc);
2134: }
2135:
2136: /* Finally, destroy it. */
2137: mutex_destroy(&pc->pc_lock);
2138: pool_destroy(pp);
2139: pool_put(&cache_pool, pc);
2140: }
2141:
2142: /*
2143: * pool_cache_cpu_init1:
2144: *
2145: * Called for each pool_cache whenever a new CPU is attached.
2146: */
2147: static void
2148: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
2149: {
2150: pool_cache_cpu_t *cc;
1.137 ! ad 2151: int index;
1.134 ad 2152:
1.137 ! ad 2153: index = ci->ci_index;
! 2154:
! 2155: KASSERT(index < MAXCPUS);
1.134 ad 2156: KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0);
2157:
1.137 ! ad 2158: if ((cc = pc->pc_cpus[index]) != NULL) {
! 2159: KASSERT(cc->cc_cpuindex == index);
1.134 ad 2160: return;
2161: }
2162:
2163: /*
2164: * The first CPU is 'free'. This needs to be the case for
2165: * bootstrap - we may not be able to allocate yet.
2166: */
2167: if (pc->pc_ncpu == 0) {
2168: cc = &pc->pc_cpu0;
2169: pc->pc_ncpu = 1;
2170: } else {
2171: mutex_enter(&pc->pc_lock);
2172: pc->pc_ncpu++;
2173: mutex_exit(&pc->pc_lock);
2174: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
2175: }
2176:
2177: cc->cc_ipl = pc->pc_pool.pr_ipl;
2178: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
2179: cc->cc_cache = pc;
1.137 ! ad 2180: cc->cc_cpuindex = index;
1.134 ad 2181: cc->cc_hits = 0;
2182: cc->cc_misses = 0;
2183: cc->cc_current = NULL;
2184: cc->cc_previous = NULL;
2185:
1.137 ! ad 2186: pc->pc_cpus[index] = cc;
1.43 thorpej 2187: }
2188:
1.134 ad 2189: /*
2190: * pool_cache_cpu_init:
2191: *
2192: * Called whenever a new CPU is attached.
2193: */
2194: void
2195: pool_cache_cpu_init(struct cpu_info *ci)
1.43 thorpej 2196: {
1.134 ad 2197: pool_cache_t pc;
2198:
2199: mutex_enter(&pool_head_lock);
2200: LIST_FOREACH(pc, &pool_cache_head, pc_cachelist) {
2201: pc->pc_refcnt++;
2202: mutex_exit(&pool_head_lock);
1.43 thorpej 2203:
1.134 ad 2204: pool_cache_cpu_init1(ci, pc);
1.43 thorpej 2205:
1.134 ad 2206: mutex_enter(&pool_head_lock);
2207: pc->pc_refcnt--;
2208: cv_broadcast(&pool_busy);
2209: }
2210: mutex_exit(&pool_head_lock);
1.43 thorpej 2211: }
2212:
1.134 ad 2213: /*
2214: * pool_cache_reclaim:
2215: *
2216: * Reclaim memory from a pool cache.
2217: */
2218: bool
2219: pool_cache_reclaim(pool_cache_t pc)
1.43 thorpej 2220: {
2221:
1.134 ad 2222: return pool_reclaim(&pc->pc_pool);
2223: }
1.43 thorpej 2224:
1.136 yamt 2225: static void
2226: pool_cache_destruct_object1(pool_cache_t pc, void *object)
2227: {
2228:
2229: (*pc->pc_dtor)(pc->pc_arg, object);
2230: pool_put(&pc->pc_pool, object);
2231: }
2232:
1.134 ad 2233: /*
2234: * pool_cache_destruct_object:
2235: *
2236: * Force destruction of an object and its release back into
2237: * the pool.
2238: */
2239: void
2240: pool_cache_destruct_object(pool_cache_t pc, void *object)
2241: {
2242:
1.136 yamt 2243: FREECHECK_IN(&pc->pc_freecheck, object);
2244:
2245: pool_cache_destruct_object1(pc, object);
1.43 thorpej 2246: }
2247:
1.134 ad 2248: /*
2249: * pool_cache_invalidate_groups:
2250: *
2251: * Invalidate a chain of groups and destruct all objects.
2252: */
1.102 chs 2253: static void
1.134 ad 2254: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1.102 chs 2255: {
1.134 ad 2256: void *object;
2257: pcg_t *next;
2258: int i;
2259:
2260: for (; pcg != NULL; pcg = next) {
2261: next = pcg->pcg_next;
2262:
2263: for (i = 0; i < pcg->pcg_avail; i++) {
2264: object = pcg->pcg_objects[i].pcgo_va;
1.136 yamt 2265: pool_cache_destruct_object1(pc, object);
1.134 ad 2266: }
1.102 chs 2267:
2268: pool_put(&pcgpool, pcg);
2269: }
2270: }
2271:
1.43 thorpej 2272: /*
1.134 ad 2273: * pool_cache_invalidate:
1.43 thorpej 2274: *
1.134 ad 2275: * Invalidate a pool cache (destruct and release all of the
2276: * cached objects). Does not reclaim objects from the pool.
1.43 thorpej 2277: */
1.134 ad 2278: void
2279: pool_cache_invalidate(pool_cache_t pc)
2280: {
2281: pcg_t *full, *empty, *part;
2282:
2283: mutex_enter(&pc->pc_lock);
2284: full = pc->pc_fullgroups;
2285: empty = pc->pc_emptygroups;
2286: part = pc->pc_partgroups;
2287: pc->pc_fullgroups = NULL;
2288: pc->pc_emptygroups = NULL;
2289: pc->pc_partgroups = NULL;
2290: pc->pc_nfull = 0;
2291: pc->pc_nempty = 0;
2292: pc->pc_npart = 0;
2293: mutex_exit(&pc->pc_lock);
2294:
2295: pool_cache_invalidate_groups(pc, full);
2296: pool_cache_invalidate_groups(pc, empty);
2297: pool_cache_invalidate_groups(pc, part);
2298: }
2299:
2300: void
2301: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2302: {
2303:
2304: pool_set_drain_hook(&pc->pc_pool, fn, arg);
2305: }
2306:
2307: void
2308: pool_cache_setlowat(pool_cache_t pc, int n)
2309: {
2310:
2311: pool_setlowat(&pc->pc_pool, n);
2312: }
2313:
2314: void
2315: pool_cache_sethiwat(pool_cache_t pc, int n)
2316: {
2317:
2318: pool_sethiwat(&pc->pc_pool, n);
2319: }
2320:
2321: void
2322: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2323: {
2324:
2325: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2326: }
2327:
2328: static inline pool_cache_cpu_t *
2329: pool_cache_cpu_enter(pool_cache_t pc, int *s)
2330: {
2331: pool_cache_cpu_t *cc;
2332:
2333: /*
2334: * Prevent other users of the cache from accessing our
2335: * CPU-local data. To avoid touching shared state, we
2336: * pull the neccessary information from CPU local data.
2337: */
1.137 ! ad 2338: crit_enter();
! 2339: cc = pc->pc_cpus[curcpu()->ci_index];
1.134 ad 2340: KASSERT(cc->cc_cache == pc);
1.137 ! ad 2341: if (cc->cc_ipl != IPL_NONE) {
1.134 ad 2342: *s = splraiseipl(cc->cc_iplcookie);
2343: }
2344: KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0);
2345:
2346: return cc;
2347: }
2348:
2349: static inline void
2350: pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s)
2351: {
2352:
2353: /* No longer need exclusive access to the per-CPU data. */
1.137 ! ad 2354: if (cc->cc_ipl != IPL_NONE) {
1.134 ad 2355: splx(*s);
2356: }
1.137 ! ad 2357: crit_exit();
1.134 ad 2358: }
2359:
2360: #if __GNUC_PREREQ__(3, 0)
2361: __attribute ((noinline))
2362: #endif
2363: pool_cache_cpu_t *
2364: pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp,
2365: paddr_t *pap, int flags)
1.43 thorpej 2366: {
1.134 ad 2367: pcg_t *pcg, *cur;
2368: uint64_t ncsw;
2369: pool_cache_t pc;
1.43 thorpej 2370: void *object;
1.58 thorpej 2371:
1.134 ad 2372: pc = cc->cc_cache;
2373: cc->cc_misses++;
1.43 thorpej 2374:
1.134 ad 2375: /*
2376: * Nothing was available locally. Try and grab a group
2377: * from the cache.
2378: */
2379: if (!mutex_tryenter(&pc->pc_lock)) {
2380: ncsw = curlwp->l_ncsw;
2381: mutex_enter(&pc->pc_lock);
2382: pc->pc_contended++;
1.43 thorpej 2383:
1.134 ad 2384: /*
2385: * If we context switched while locking, then
2386: * our view of the per-CPU data is invalid:
2387: * retry.
2388: */
2389: if (curlwp->l_ncsw != ncsw) {
2390: mutex_exit(&pc->pc_lock);
2391: pool_cache_cpu_exit(cc, s);
2392: return pool_cache_cpu_enter(pc, s);
1.43 thorpej 2393: }
1.102 chs 2394: }
1.43 thorpej 2395:
1.134 ad 2396: if ((pcg = pc->pc_fullgroups) != NULL) {
1.43 thorpej 2397: /*
1.134 ad 2398: * If there's a full group, release our empty
2399: * group back to the cache. Install the full
2400: * group as cc_current and return.
1.43 thorpej 2401: */
1.134 ad 2402: if ((cur = cc->cc_current) != NULL) {
2403: KASSERT(cur->pcg_avail == 0);
2404: cur->pcg_next = pc->pc_emptygroups;
2405: pc->pc_emptygroups = cur;
2406: pc->pc_nempty++;
1.87 thorpej 2407: }
1.134 ad 2408: KASSERT(pcg->pcg_avail == PCG_NOBJECTS);
2409: cc->cc_current = pcg;
2410: pc->pc_fullgroups = pcg->pcg_next;
2411: pc->pc_hits++;
2412: pc->pc_nfull--;
2413: mutex_exit(&pc->pc_lock);
2414: return cc;
2415: }
2416:
2417: /*
2418: * Nothing available locally or in cache. Take the slow
2419: * path: fetch a new object from the pool and construct
2420: * it.
2421: */
2422: pc->pc_misses++;
2423: mutex_exit(&pc->pc_lock);
2424: pool_cache_cpu_exit(cc, s);
2425:
2426: object = pool_get(&pc->pc_pool, flags);
2427: *objectp = object;
2428: if (object == NULL)
2429: return NULL;
1.125 ad 2430:
1.134 ad 2431: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
2432: pool_put(&pc->pc_pool, object);
2433: *objectp = NULL;
2434: return NULL;
1.43 thorpej 2435: }
2436:
1.134 ad 2437: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2438: (pc->pc_pool.pr_align - 1)) == 0);
1.43 thorpej 2439:
1.134 ad 2440: if (pap != NULL) {
2441: #ifdef POOL_VTOPHYS
2442: *pap = POOL_VTOPHYS(object);
2443: #else
2444: *pap = POOL_PADDR_INVALID;
2445: #endif
1.102 chs 2446: }
1.43 thorpej 2447:
1.125 ad 2448: FREECHECK_OUT(&pc->pc_freecheck, object);
1.134 ad 2449: return NULL;
1.43 thorpej 2450: }
2451:
2452: /*
1.134 ad 2453: * pool_cache_get{,_paddr}:
1.43 thorpej 2454: *
1.134 ad 2455: * Get an object from a pool cache (optionally returning
2456: * the physical address of the object).
1.43 thorpej 2457: */
1.134 ad 2458: void *
2459: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.43 thorpej 2460: {
1.134 ad 2461: pool_cache_cpu_t *cc;
2462: pcg_t *pcg;
2463: void *object;
1.60 thorpej 2464: int s;
1.43 thorpej 2465:
1.134 ad 2466: #ifdef LOCKDEBUG
2467: if (flags & PR_WAITOK)
2468: ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)");
2469: #endif
1.125 ad 2470:
1.134 ad 2471: cc = pool_cache_cpu_enter(pc, &s);
2472: do {
2473: /* Try and allocate an object from the current group. */
2474: pcg = cc->cc_current;
2475: if (pcg != NULL && pcg->pcg_avail > 0) {
2476: object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2477: if (pap != NULL)
2478: *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
2479: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
2480: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
2481: KASSERT(object != NULL);
2482: cc->cc_hits++;
2483: pool_cache_cpu_exit(cc, &s);
2484: FREECHECK_OUT(&pc->pc_freecheck, object);
2485: return object;
1.43 thorpej 2486: }
2487:
2488: /*
1.134 ad 2489: * That failed. If the previous group isn't empty, swap
2490: * it with the current group and allocate from there.
1.43 thorpej 2491: */
1.134 ad 2492: pcg = cc->cc_previous;
2493: if (pcg != NULL && pcg->pcg_avail > 0) {
2494: cc->cc_previous = cc->cc_current;
2495: cc->cc_current = pcg;
2496: continue;
1.43 thorpej 2497: }
2498:
1.134 ad 2499: /*
2500: * Can't allocate from either group: try the slow path.
2501: * If get_slow() allocated an object for us, or if
2502: * no more objects are available, it will return NULL.
2503: * Otherwise, we need to retry.
2504: */
2505: cc = pool_cache_get_slow(cc, &s, &object, pap, flags);
2506: } while (cc != NULL);
1.43 thorpej 2507:
1.134 ad 2508: return object;
1.51 thorpej 2509: }
2510:
1.134 ad 2511: #if __GNUC_PREREQ__(3, 0)
2512: __attribute ((noinline))
2513: #endif
2514: pool_cache_cpu_t *
2515: pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa)
1.51 thorpej 2516: {
1.134 ad 2517: pcg_t *pcg, *cur;
2518: uint64_t ncsw;
2519: pool_cache_t pc;
1.51 thorpej 2520:
1.134 ad 2521: pc = cc->cc_cache;
2522: cc->cc_misses++;
1.43 thorpej 2523:
1.134 ad 2524: /*
2525: * No free slots locally. Try to grab an empty, unused
2526: * group from the cache.
2527: */
2528: if (!mutex_tryenter(&pc->pc_lock)) {
2529: ncsw = curlwp->l_ncsw;
2530: mutex_enter(&pc->pc_lock);
2531: pc->pc_contended++;
1.102 chs 2532:
1.134 ad 2533: /*
2534: * If we context switched while locking, then
2535: * our view of the per-CPU data is invalid:
2536: * retry.
2537: */
2538: if (curlwp->l_ncsw != ncsw) {
2539: mutex_exit(&pc->pc_lock);
2540: pool_cache_cpu_exit(cc, s);
2541: return pool_cache_cpu_enter(pc, s);
2542: }
2543: }
1.130 ad 2544:
1.134 ad 2545: if ((pcg = pc->pc_emptygroups) != NULL) {
2546: /*
2547: * If there's a empty group, release our full
2548: * group back to the cache. Install the empty
2549: * group as cc_current and return.
2550: */
2551: if ((cur = cc->cc_current) != NULL) {
2552: KASSERT(cur->pcg_avail == PCG_NOBJECTS);
2553: cur->pcg_next = pc->pc_fullgroups;
2554: pc->pc_fullgroups = cur;
2555: pc->pc_nfull++;
1.102 chs 2556: }
1.134 ad 2557: KASSERT(pcg->pcg_avail == 0);
2558: cc->cc_current = pcg;
2559: pc->pc_emptygroups = pcg->pcg_next;
2560: pc->pc_hits++;
2561: pc->pc_nempty--;
2562: mutex_exit(&pc->pc_lock);
2563: return cc;
1.102 chs 2564: }
1.105 christos 2565:
1.134 ad 2566: /*
2567: * Nothing available locally or in cache. Take the
2568: * slow path and try to allocate a new group that we
2569: * can release to.
2570: */
2571: pc->pc_misses++;
2572: mutex_exit(&pc->pc_lock);
2573: pool_cache_cpu_exit(cc, s);
1.105 christos 2574:
1.134 ad 2575: /*
2576: * If we can't allocate a new group, just throw the
2577: * object away.
2578: */
2579: pcg = pool_get(&pcgpool, PR_NOWAIT);
2580: if (pcg == NULL) {
2581: pool_cache_destruct_object(pc, object);
2582: return NULL;
2583: }
2584: #ifdef DIAGNOSTIC
2585: memset(pcg, 0, sizeof(*pcg));
2586: #else
2587: pcg->pcg_avail = 0;
2588: #endif
1.105 christos 2589:
1.134 ad 2590: /*
2591: * Add the empty group to the cache and try again.
2592: */
2593: mutex_enter(&pc->pc_lock);
2594: pcg->pcg_next = pc->pc_emptygroups;
2595: pc->pc_emptygroups = pcg;
2596: pc->pc_nempty++;
2597: mutex_exit(&pc->pc_lock);
1.103 chs 2598:
1.134 ad 2599: return pool_cache_cpu_enter(pc, s);
2600: }
1.102 chs 2601:
1.43 thorpej 2602: /*
1.134 ad 2603: * pool_cache_put{,_paddr}:
1.43 thorpej 2604: *
1.134 ad 2605: * Put an object back to the pool cache (optionally caching the
2606: * physical address of the object).
1.43 thorpej 2607: */
1.101 thorpej 2608: void
1.134 ad 2609: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2610: {
1.134 ad 2611: pool_cache_cpu_t *cc;
2612: pcg_t *pcg;
2613: int s;
1.101 thorpej 2614:
1.134 ad 2615: FREECHECK_IN(&pc->pc_freecheck, object);
1.101 thorpej 2616:
1.134 ad 2617: cc = pool_cache_cpu_enter(pc, &s);
2618: do {
2619: /* If the current group isn't full, release it there. */
2620: pcg = cc->cc_current;
2621: if (pcg != NULL && pcg->pcg_avail < PCG_NOBJECTS) {
2622: KASSERT(pcg->pcg_objects[pcg->pcg_avail].pcgo_va
2623: == NULL);
2624: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2625: pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2626: pcg->pcg_avail++;
2627: cc->cc_hits++;
2628: pool_cache_cpu_exit(cc, &s);
2629: return;
2630: }
1.43 thorpej 2631:
1.134 ad 2632: /*
2633: * That failed. If the previous group is empty, swap
2634: * it with the current group and try again.
2635: */
2636: pcg = cc->cc_previous;
2637: if (pcg != NULL && pcg->pcg_avail == 0) {
2638: cc->cc_previous = cc->cc_current;
2639: cc->cc_current = pcg;
2640: continue;
2641: }
1.43 thorpej 2642:
1.134 ad 2643: /*
2644: * Can't free to either group: try the slow path.
2645: * If put_slow() releases the object for us, it
2646: * will return NULL. Otherwise we need to retry.
2647: */
2648: cc = pool_cache_put_slow(cc, &s, object, pa);
2649: } while (cc != NULL);
1.43 thorpej 2650: }
2651:
2652: /*
1.134 ad 2653: * pool_cache_xcall:
1.43 thorpej 2654: *
1.134 ad 2655: * Transfer objects from the per-CPU cache to the global cache.
2656: * Run within a cross-call thread.
1.43 thorpej 2657: */
2658: static void
1.134 ad 2659: pool_cache_xcall(pool_cache_t pc)
1.43 thorpej 2660: {
1.134 ad 2661: pool_cache_cpu_t *cc;
2662: pcg_t *prev, *cur, **list;
2663: int s = 0; /* XXXgcc */
2664:
2665: cc = pool_cache_cpu_enter(pc, &s);
2666: cur = cc->cc_current;
2667: cc->cc_current = NULL;
2668: prev = cc->cc_previous;
2669: cc->cc_previous = NULL;
2670: pool_cache_cpu_exit(cc, &s);
2671:
2672: /*
2673: * XXXSMP Go to splvm to prevent kernel_lock from being taken,
2674: * because locks at IPL_SOFTXXX are still spinlocks. Does not
2675: * apply to IPL_SOFTBIO. Cross-call threads do not take the
2676: * kernel_lock.
1.101 thorpej 2677: */
1.134 ad 2678: s = splvm();
2679: mutex_enter(&pc->pc_lock);
2680: if (cur != NULL) {
2681: if (cur->pcg_avail == PCG_NOBJECTS) {
2682: list = &pc->pc_fullgroups;
2683: pc->pc_nfull++;
2684: } else if (cur->pcg_avail == 0) {
2685: list = &pc->pc_emptygroups;
2686: pc->pc_nempty++;
2687: } else {
2688: list = &pc->pc_partgroups;
2689: pc->pc_npart++;
2690: }
2691: cur->pcg_next = *list;
2692: *list = cur;
2693: }
2694: if (prev != NULL) {
2695: if (prev->pcg_avail == PCG_NOBJECTS) {
2696: list = &pc->pc_fullgroups;
2697: pc->pc_nfull++;
2698: } else if (prev->pcg_avail == 0) {
2699: list = &pc->pc_emptygroups;
2700: pc->pc_nempty++;
2701: } else {
2702: list = &pc->pc_partgroups;
2703: pc->pc_npart++;
2704: }
2705: prev->pcg_next = *list;
2706: *list = prev;
2707: }
2708: mutex_exit(&pc->pc_lock);
2709: splx(s);
1.3 pk 2710: }
1.66 thorpej 2711:
2712: /*
2713: * Pool backend allocators.
2714: *
2715: * Each pool has a backend allocator that handles allocation, deallocation,
2716: * and any additional draining that might be needed.
2717: *
2718: * We provide two standard allocators:
2719: *
2720: * pool_allocator_kmem - the default when no allocator is specified
2721: *
2722: * pool_allocator_nointr - used for pools that will not be accessed
2723: * in interrupt context.
2724: */
2725: void *pool_page_alloc(struct pool *, int);
2726: void pool_page_free(struct pool *, void *);
2727:
1.112 bjh21 2728: #ifdef POOL_SUBPAGE
2729: struct pool_allocator pool_allocator_kmem_fullpage = {
2730: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2731: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2732: };
2733: #else
1.66 thorpej 2734: struct pool_allocator pool_allocator_kmem = {
2735: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2736: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2737: };
1.112 bjh21 2738: #endif
1.66 thorpej 2739:
2740: void *pool_page_alloc_nointr(struct pool *, int);
2741: void pool_page_free_nointr(struct pool *, void *);
2742:
1.112 bjh21 2743: #ifdef POOL_SUBPAGE
2744: struct pool_allocator pool_allocator_nointr_fullpage = {
2745: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2746: .pa_backingmapptr = &kernel_map,
1.112 bjh21 2747: };
2748: #else
1.66 thorpej 2749: struct pool_allocator pool_allocator_nointr = {
2750: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2751: .pa_backingmapptr = &kernel_map,
1.66 thorpej 2752: };
1.112 bjh21 2753: #endif
1.66 thorpej 2754:
2755: #ifdef POOL_SUBPAGE
2756: void *pool_subpage_alloc(struct pool *, int);
2757: void pool_subpage_free(struct pool *, void *);
2758:
1.112 bjh21 2759: struct pool_allocator pool_allocator_kmem = {
2760: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2761: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2762: };
2763:
2764: void *pool_subpage_alloc_nointr(struct pool *, int);
2765: void pool_subpage_free_nointr(struct pool *, void *);
2766:
2767: struct pool_allocator pool_allocator_nointr = {
2768: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2769: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2770: };
2771: #endif /* POOL_SUBPAGE */
2772:
1.117 yamt 2773: static void *
2774: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2775: {
1.117 yamt 2776: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2777: void *res;
2778:
1.117 yamt 2779: res = (*pa->pa_alloc)(pp, flags);
2780: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2781: /*
1.117 yamt 2782: * We only run the drain hook here if PR_NOWAIT.
2783: * In other cases, the hook will be run in
2784: * pool_reclaim().
1.66 thorpej 2785: */
1.117 yamt 2786: if (pp->pr_drain_hook != NULL) {
2787: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2788: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2789: }
1.117 yamt 2790: }
2791: return res;
1.66 thorpej 2792: }
2793:
1.117 yamt 2794: static void
1.66 thorpej 2795: pool_allocator_free(struct pool *pp, void *v)
2796: {
2797: struct pool_allocator *pa = pp->pr_alloc;
2798:
2799: (*pa->pa_free)(pp, v);
2800: }
2801:
2802: void *
1.124 yamt 2803: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2804: {
1.127 thorpej 2805: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2806:
1.100 yamt 2807: return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
1.66 thorpej 2808: }
2809:
2810: void
1.124 yamt 2811: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2812: {
2813:
1.98 yamt 2814: uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2815: }
2816:
2817: static void *
1.124 yamt 2818: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2819: {
1.127 thorpej 2820: bool waitok = (flags & PR_WAITOK) ? true : false;
1.98 yamt 2821:
1.100 yamt 2822: return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
1.98 yamt 2823: }
2824:
2825: static void
1.124 yamt 2826: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2827: {
2828:
1.100 yamt 2829: uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
1.66 thorpej 2830: }
2831:
2832: #ifdef POOL_SUBPAGE
2833: /* Sub-page allocator, for machines with large hardware pages. */
2834: void *
2835: pool_subpage_alloc(struct pool *pp, int flags)
2836: {
1.134 ad 2837: return pool_get(&psppool, flags);
1.66 thorpej 2838: }
2839:
2840: void
2841: pool_subpage_free(struct pool *pp, void *v)
2842: {
2843: pool_put(&psppool, v);
2844: }
2845:
2846: /* We don't provide a real nointr allocator. Maybe later. */
2847: void *
1.112 bjh21 2848: pool_subpage_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2849: {
2850:
2851: return (pool_subpage_alloc(pp, flags));
2852: }
2853:
2854: void
1.112 bjh21 2855: pool_subpage_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2856: {
2857:
2858: pool_subpage_free(pp, v);
2859: }
1.112 bjh21 2860: #endif /* POOL_SUBPAGE */
1.66 thorpej 2861: void *
1.124 yamt 2862: pool_page_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2863: {
1.127 thorpej 2864: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2865:
1.100 yamt 2866: return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
1.66 thorpej 2867: }
2868:
2869: void
1.124 yamt 2870: pool_page_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2871: {
2872:
1.98 yamt 2873: uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
1.66 thorpej 2874: }
CVSweb <webmaster@jp.NetBSD.org>