Annotation of src/sys/kern/subr_pool.c, Revision 1.128.2.7
1.128.2.7! ad 1: /* $NetBSD: subr_pool.c,v 1.128.2.6 2007/08/20 21:27:37 ad Exp $ */
1.1 pk 2:
3: /*-
1.128.2.2 ad 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.128.2.7! ad 9: * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.128.2.7! ad 41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.128.2.6 2007/08/20 21:27:37 ad Exp $");
1.24 scottr 42:
1.25 thorpej 43: #include "opt_pool.h"
1.24 scottr 44: #include "opt_poollog.h"
1.28 thorpej 45: #include "opt_lockdebug.h"
1.1 pk 46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/proc.h>
50: #include <sys/errno.h>
51: #include <sys/kernel.h>
52: #include <sys/malloc.h>
53: #include <sys/lock.h>
54: #include <sys/pool.h>
1.20 thorpej 55: #include <sys/syslog.h>
1.125 ad 56: #include <sys/debug.h>
1.128.2.5 ad 57: #include <sys/lockdebug.h>
1.3 pk 58:
59: #include <uvm/uvm.h>
60:
1.1 pk 61: /*
62: * Pool resource management utility.
1.3 pk 63: *
1.88 chs 64: * Memory is allocated in pages which are split into pieces according to
65: * the pool item size. Each page is kept on one of three lists in the
66: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
67: * for empty, full and partially-full pages respectively. The individual
68: * pool items are on a linked list headed by `ph_itemlist' in each page
69: * header. The memory for building the page list is either taken from
70: * the allocated pages themselves (for small pool items) or taken from
71: * an internal pool of page headers (`phpool').
1.1 pk 72: */
73:
1.3 pk 74: /* List of all pools */
1.102 chs 75: LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
1.3 pk 76:
1.128.2.7! ad 77: /* List of all caches. */
! 78: LIST_HEAD(,pool_cache) pool_cache_head =
! 79: LIST_HEAD_INITIALIZER(pool_cache_head);
! 80:
1.3 pk 81: /* Private pool for page header structures */
1.97 yamt 82: #define PHPOOL_MAX 8
83: static struct pool phpool[PHPOOL_MAX];
84: #define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx)))
1.3 pk 85:
1.62 bjh21 86: #ifdef POOL_SUBPAGE
87: /* Pool of subpages for use by normal pools. */
88: static struct pool psppool;
89: #endif
90:
1.117 yamt 91: static SLIST_HEAD(, pool_allocator) pa_deferinitq =
92: SLIST_HEAD_INITIALIZER(pa_deferinitq);
93:
1.98 yamt 94: static void *pool_page_alloc_meta(struct pool *, int);
95: static void pool_page_free_meta(struct pool *, void *);
96:
97: /* allocator for pool metadata */
1.128.2.7! ad 98: struct pool_allocator pool_allocator_meta = {
1.117 yamt 99: pool_page_alloc_meta, pool_page_free_meta,
100: .pa_backingmapptr = &kmem_map,
1.98 yamt 101: };
102:
1.3 pk 103: /* # of seconds to retain page after last use */
104: int pool_inactive_time = 10;
105:
106: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 107: static struct pool *drainpp;
108:
1.128.2.2 ad 109: /* This lock protects both pool_head and drainpp. */
110: static kmutex_t pool_head_lock;
1.128.2.7! ad 111: static kcondvar_t pool_busy;
1.3 pk 112:
1.99 yamt 113: typedef uint8_t pool_item_freelist_t;
114:
1.3 pk 115: struct pool_item_header {
116: /* Page headers */
1.88 chs 117: LIST_ENTRY(pool_item_header)
1.3 pk 118: ph_pagelist; /* pool page list */
1.88 chs 119: SPLAY_ENTRY(pool_item_header)
120: ph_node; /* Off-page page headers */
1.128 christos 121: void * ph_page; /* this page's address */
1.3 pk 122: struct timeval ph_time; /* last referenced */
1.97 yamt 123: union {
124: /* !PR_NOTOUCH */
125: struct {
1.102 chs 126: LIST_HEAD(, pool_item)
1.97 yamt 127: phu_itemlist; /* chunk list for this page */
128: } phu_normal;
129: /* PR_NOTOUCH */
130: struct {
131: uint16_t
132: phu_off; /* start offset in page */
1.99 yamt 133: pool_item_freelist_t
1.97 yamt 134: phu_firstfree; /* first free item */
1.99 yamt 135: /*
136: * XXX it might be better to use
137: * a simple bitmap and ffs(3)
138: */
1.97 yamt 139: } phu_notouch;
140: } ph_u;
141: uint16_t ph_nmissing; /* # of chunks in use */
1.3 pk 142: };
1.97 yamt 143: #define ph_itemlist ph_u.phu_normal.phu_itemlist
144: #define ph_off ph_u.phu_notouch.phu_off
145: #define ph_firstfree ph_u.phu_notouch.phu_firstfree
1.3 pk 146:
1.1 pk 147: struct pool_item {
1.3 pk 148: #ifdef DIAGNOSTIC
1.82 thorpej 149: u_int pi_magic;
1.33 chs 150: #endif
1.82 thorpej 151: #define PI_MAGIC 0xdeadbeefU
1.3 pk 152: /* Other entries use only this list entry */
1.102 chs 153: LIST_ENTRY(pool_item) pi_list;
1.3 pk 154: };
155:
1.53 thorpej 156: #define POOL_NEEDS_CATCHUP(pp) \
157: ((pp)->pr_nitems < (pp)->pr_minitems)
158:
1.43 thorpej 159: /*
160: * Pool cache management.
161: *
162: * Pool caches provide a way for constructed objects to be cached by the
163: * pool subsystem. This can lead to performance improvements by avoiding
164: * needless object construction/destruction; it is deferred until absolutely
165: * necessary.
166: *
1.128.2.7! ad 167: * Caches are grouped into cache groups. Each cache group references up
! 168: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
! 169: * object from the pool, it calls the object's constructor and places it
! 170: * into a cache group. When a cache group frees an object back to the
! 171: * pool, it first calls the object's destructor. This allows the object
! 172: * to persist in constructed form while freed to the cache.
! 173: *
! 174: * The pool references each cache, so that when a pool is drained by the
! 175: * pagedaemon, it can drain each individual cache as well. Each time a
! 176: * cache is drained, the most idle cache group is freed to the pool in
! 177: * its entirety.
1.43 thorpej 178: *
179: * Pool caches are layed on top of pools. By layering them, we can avoid
180: * the complexity of cache management for pools which would not benefit
181: * from it.
182: */
183:
184: static struct pool pcgpool;
1.128.2.7! ad 185: static struct pool cache_pool;
! 186: static struct pool cache_cpu_pool;
1.3 pk 187:
1.128.2.7! ad 188: static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *,
! 189: void *, paddr_t);
! 190: static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *,
! 191: void **, paddr_t *, int);
! 192: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
! 193: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
1.3 pk 194:
1.42 thorpej 195: static int pool_catchup(struct pool *);
1.128 christos 196: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 197: struct pool_item_header *);
1.88 chs 198: static void pool_update_curpage(struct pool *);
1.66 thorpej 199:
1.113 yamt 200: static int pool_grow(struct pool *, int);
1.117 yamt 201: static void *pool_allocator_alloc(struct pool *, int);
202: static void pool_allocator_free(struct pool *, void *);
1.3 pk 203:
1.97 yamt 204: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 205: void (*)(const char *, ...));
1.42 thorpej 206: static void pool_print1(struct pool *, const char *,
207: void (*)(const char *, ...));
1.3 pk 208:
1.88 chs 209: static int pool_chk_page(struct pool *, const char *,
210: struct pool_item_header *);
211:
1.3 pk 212: /*
1.52 thorpej 213: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 214: */
215: struct pool_log {
216: const char *pl_file;
217: long pl_line;
218: int pl_action;
1.25 thorpej 219: #define PRLOG_GET 1
220: #define PRLOG_PUT 2
1.3 pk 221: void *pl_addr;
1.1 pk 222: };
223:
1.86 matt 224: #ifdef POOL_DIAGNOSTIC
1.3 pk 225: /* Number of entries in pool log buffers */
1.17 thorpej 226: #ifndef POOL_LOGSIZE
227: #define POOL_LOGSIZE 10
228: #endif
229:
230: int pool_logsize = POOL_LOGSIZE;
1.1 pk 231:
1.110 perry 232: static inline void
1.42 thorpej 233: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 234: {
235: int n = pp->pr_curlogentry;
236: struct pool_log *pl;
237:
1.20 thorpej 238: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 239: return;
240:
241: /*
242: * Fill in the current entry. Wrap around and overwrite
243: * the oldest entry if necessary.
244: */
245: pl = &pp->pr_log[n];
246: pl->pl_file = file;
247: pl->pl_line = line;
248: pl->pl_action = action;
249: pl->pl_addr = v;
250: if (++n >= pp->pr_logsize)
251: n = 0;
252: pp->pr_curlogentry = n;
253: }
254:
255: static void
1.42 thorpej 256: pr_printlog(struct pool *pp, struct pool_item *pi,
257: void (*pr)(const char *, ...))
1.3 pk 258: {
259: int i = pp->pr_logsize;
260: int n = pp->pr_curlogentry;
261:
1.20 thorpej 262: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 263: return;
264:
265: /*
266: * Print all entries in this pool's log.
267: */
268: while (i-- > 0) {
269: struct pool_log *pl = &pp->pr_log[n];
270: if (pl->pl_action != 0) {
1.25 thorpej 271: if (pi == NULL || pi == pl->pl_addr) {
272: (*pr)("\tlog entry %d:\n", i);
273: (*pr)("\t\taction = %s, addr = %p\n",
274: pl->pl_action == PRLOG_GET ? "get" : "put",
275: pl->pl_addr);
276: (*pr)("\t\tfile: %s at line %lu\n",
277: pl->pl_file, pl->pl_line);
278: }
1.3 pk 279: }
280: if (++n >= pp->pr_logsize)
281: n = 0;
282: }
283: }
1.25 thorpej 284:
1.110 perry 285: static inline void
1.42 thorpej 286: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 287: {
288:
1.34 thorpej 289: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 290: printf("pool %s: reentrancy at file %s line %ld\n",
291: pp->pr_wchan, file, line);
292: printf(" previous entry at file %s line %ld\n",
293: pp->pr_entered_file, pp->pr_entered_line);
294: panic("pr_enter");
295: }
296:
297: pp->pr_entered_file = file;
298: pp->pr_entered_line = line;
299: }
300:
1.110 perry 301: static inline void
1.42 thorpej 302: pr_leave(struct pool *pp)
1.25 thorpej 303: {
304:
1.34 thorpej 305: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 306: printf("pool %s not entered?\n", pp->pr_wchan);
307: panic("pr_leave");
308: }
309:
310: pp->pr_entered_file = NULL;
311: pp->pr_entered_line = 0;
312: }
313:
1.110 perry 314: static inline void
1.42 thorpej 315: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 316: {
317:
318: if (pp->pr_entered_file != NULL)
319: (*pr)("\n\tcurrently entered from file %s line %ld\n",
320: pp->pr_entered_file, pp->pr_entered_line);
321: }
1.3 pk 322: #else
1.25 thorpej 323: #define pr_log(pp, v, action, file, line)
324: #define pr_printlog(pp, pi, pr)
325: #define pr_enter(pp, file, line)
326: #define pr_leave(pp)
327: #define pr_enter_check(pp, pr)
1.59 thorpej 328: #endif /* POOL_DIAGNOSTIC */
1.3 pk 329:
1.110 perry 330: static inline int
1.97 yamt 331: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
332: const void *v)
333: {
334: const char *cp = v;
335: int idx;
336:
337: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 338: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 339: KASSERT(idx < pp->pr_itemsperpage);
340: return idx;
341: }
342:
1.99 yamt 343: #define PR_FREELIST_ALIGN(p) \
344: roundup((uintptr_t)(p), sizeof(pool_item_freelist_t))
345: #define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1))
346: #define PR_INDEX_USED ((pool_item_freelist_t)-1)
347: #define PR_INDEX_EOL ((pool_item_freelist_t)-2)
1.97 yamt 348:
1.110 perry 349: static inline void
1.97 yamt 350: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
351: void *obj)
352: {
353: int idx = pr_item_notouch_index(pp, ph, obj);
1.99 yamt 354: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 355:
356: KASSERT(freelist[idx] == PR_INDEX_USED);
357: freelist[idx] = ph->ph_firstfree;
358: ph->ph_firstfree = idx;
359: }
360:
1.110 perry 361: static inline void *
1.97 yamt 362: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
363: {
364: int idx = ph->ph_firstfree;
1.99 yamt 365: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 366:
367: KASSERT(freelist[idx] != PR_INDEX_USED);
368: ph->ph_firstfree = freelist[idx];
369: freelist[idx] = PR_INDEX_USED;
370:
1.128 christos 371: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 372: }
373:
1.110 perry 374: static inline int
1.88 chs 375: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
376: {
1.121 yamt 377:
378: /*
379: * we consider pool_item_header with smaller ph_page bigger.
380: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
381: */
382:
1.88 chs 383: if (a->ph_page < b->ph_page)
1.121 yamt 384: return (1);
385: else if (a->ph_page > b->ph_page)
1.88 chs 386: return (-1);
387: else
388: return (0);
389: }
390:
391: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
392: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
393:
1.3 pk 394: /*
1.121 yamt 395: * Return the pool page header based on item address.
1.3 pk 396: */
1.110 perry 397: static inline struct pool_item_header *
1.121 yamt 398: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 399: {
1.88 chs 400: struct pool_item_header *ph, tmp;
1.3 pk 401:
1.121 yamt 402: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.128 christos 403: tmp.ph_page = (void *)(uintptr_t)v;
1.121 yamt 404: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
405: if (ph == NULL) {
406: ph = SPLAY_ROOT(&pp->pr_phtree);
407: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
408: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
409: }
410: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
411: }
412: } else {
1.128 christos 413: void *page =
414: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 415:
416: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 417: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 418: } else {
419: tmp.ph_page = page;
420: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
421: }
422: }
1.3 pk 423:
1.121 yamt 424: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 425: ((char *)ph->ph_page <= (char *)v &&
426: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 427: return ph;
1.3 pk 428: }
429:
1.101 thorpej 430: static void
431: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
432: {
433: struct pool_item_header *ph;
434:
435: while ((ph = LIST_FIRST(pq)) != NULL) {
436: LIST_REMOVE(ph, ph_pagelist);
437: pool_allocator_free(pp, ph->ph_page);
1.128.2.2 ad 438: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 439: pool_put(pp->pr_phpool, ph);
440: }
441: }
442:
1.3 pk 443: /*
444: * Remove a page from the pool.
445: */
1.110 perry 446: static inline void
1.61 chs 447: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
448: struct pool_pagelist *pq)
1.3 pk 449: {
450:
1.128.2.2 ad 451: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 452:
1.3 pk 453: /*
1.7 thorpej 454: * If the page was idle, decrement the idle page count.
1.3 pk 455: */
1.6 thorpej 456: if (ph->ph_nmissing == 0) {
457: #ifdef DIAGNOSTIC
458: if (pp->pr_nidle == 0)
459: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 460: if (pp->pr_nitems < pp->pr_itemsperpage)
461: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 462: #endif
463: pp->pr_nidle--;
464: }
1.7 thorpej 465:
1.20 thorpej 466: pp->pr_nitems -= pp->pr_itemsperpage;
467:
1.7 thorpej 468: /*
1.101 thorpej 469: * Unlink the page from the pool and queue it for release.
1.7 thorpej 470: */
1.88 chs 471: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 472: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
473: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 474: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
475:
1.7 thorpej 476: pp->pr_npages--;
477: pp->pr_npagefree++;
1.6 thorpej 478:
1.88 chs 479: pool_update_curpage(pp);
1.3 pk 480: }
481:
1.126 thorpej 482: static bool
1.117 yamt 483: pa_starved_p(struct pool_allocator *pa)
484: {
485:
486: if (pa->pa_backingmap != NULL) {
487: return vm_map_starved_p(pa->pa_backingmap);
488: }
1.127 thorpej 489: return false;
1.117 yamt 490: }
491:
492: static int
1.124 yamt 493: pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
1.117 yamt 494: {
495: struct pool *pp = obj;
496: struct pool_allocator *pa = pp->pr_alloc;
497:
498: KASSERT(&pp->pr_reclaimerentry == ce);
499: pool_reclaim(pp);
500: if (!pa_starved_p(pa)) {
501: return CALLBACK_CHAIN_ABORT;
502: }
503: return CALLBACK_CHAIN_CONTINUE;
504: }
505:
506: static void
507: pool_reclaim_register(struct pool *pp)
508: {
509: struct vm_map *map = pp->pr_alloc->pa_backingmap;
510: int s;
511:
512: if (map == NULL) {
513: return;
514: }
515:
516: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
517: callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
518: &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
519: splx(s);
520: }
521:
522: static void
523: pool_reclaim_unregister(struct pool *pp)
524: {
525: struct vm_map *map = pp->pr_alloc->pa_backingmap;
526: int s;
527:
528: if (map == NULL) {
529: return;
530: }
531:
532: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
533: callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
534: &pp->pr_reclaimerentry);
535: splx(s);
536: }
537:
538: static void
539: pa_reclaim_register(struct pool_allocator *pa)
540: {
541: struct vm_map *map = *pa->pa_backingmapptr;
542: struct pool *pp;
543:
544: KASSERT(pa->pa_backingmap == NULL);
545: if (map == NULL) {
546: SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
547: return;
548: }
549: pa->pa_backingmap = map;
550: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
551: pool_reclaim_register(pp);
552: }
553: }
554:
1.3 pk 555: /*
1.94 simonb 556: * Initialize all the pools listed in the "pools" link set.
557: */
558: void
1.117 yamt 559: pool_subsystem_init(void)
1.94 simonb 560: {
1.117 yamt 561: struct pool_allocator *pa;
1.94 simonb 562: __link_set_decl(pools, struct link_pool_init);
563: struct link_pool_init * const *pi;
564:
1.128.2.2 ad 565: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
1.128.2.7! ad 566: cv_init(&pool_busy, "poolbusy");
1.128.2.2 ad 567:
1.94 simonb 568: __link_set_foreach(pi, pools)
569: pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
570: (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
1.128.2.1 ad 571: (*pi)->palloc, (*pi)->ipl);
1.117 yamt 572:
573: while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
574: KASSERT(pa->pa_backingmapptr != NULL);
575: KASSERT(*pa->pa_backingmapptr != NULL);
576: SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
577: pa_reclaim_register(pa);
578: }
1.128.2.7! ad 579:
! 580: pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE,
! 581: 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);
! 582:
! 583: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE,
! 584: 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
1.94 simonb 585: }
586:
587: /*
1.3 pk 588: * Initialize the given pool resource structure.
589: *
590: * We export this routine to allow other kernel parts to declare
591: * static pools that must be initialized before malloc() is available.
592: */
593: void
1.42 thorpej 594: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.128.2.1 ad 595: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 596: {
1.116 simonb 597: #ifdef DEBUG
598: struct pool *pp1;
599: #endif
1.92 enami 600: size_t trysize, phsize;
1.128.2.2 ad 601: int off, slack;
1.3 pk 602:
1.99 yamt 603: KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >=
604: PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1));
605:
1.116 simonb 606: #ifdef DEBUG
607: /*
608: * Check that the pool hasn't already been initialised and
609: * added to the list of all pools.
610: */
611: LIST_FOREACH(pp1, &pool_head, pr_poollist) {
612: if (pp == pp1)
613: panic("pool_init: pool %s already initialised",
614: wchan);
615: }
616: #endif
617:
1.25 thorpej 618: #ifdef POOL_DIAGNOSTIC
619: /*
620: * Always log if POOL_DIAGNOSTIC is defined.
621: */
622: if (pool_logsize != 0)
623: flags |= PR_LOGGING;
624: #endif
625:
1.66 thorpej 626: if (palloc == NULL)
627: palloc = &pool_allocator_kmem;
1.112 bjh21 628: #ifdef POOL_SUBPAGE
629: if (size > palloc->pa_pagesz) {
630: if (palloc == &pool_allocator_kmem)
631: palloc = &pool_allocator_kmem_fullpage;
632: else if (palloc == &pool_allocator_nointr)
633: palloc = &pool_allocator_nointr_fullpage;
634: }
1.66 thorpej 635: #endif /* POOL_SUBPAGE */
636: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
1.112 bjh21 637: if (palloc->pa_pagesz == 0)
1.66 thorpej 638: palloc->pa_pagesz = PAGE_SIZE;
639:
640: TAILQ_INIT(&palloc->pa_list);
641:
1.128.2.7! ad 642: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 643: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
644: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.117 yamt 645:
646: if (palloc->pa_backingmapptr != NULL) {
647: pa_reclaim_register(palloc);
648: }
1.66 thorpej 649: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 650: }
1.3 pk 651:
652: if (align == 0)
653: align = ALIGN(1);
1.14 thorpej 654:
1.120 yamt 655: if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
1.14 thorpej 656: size = sizeof(struct pool_item);
1.3 pk 657:
1.78 thorpej 658: size = roundup(size, align);
1.66 thorpej 659: #ifdef DIAGNOSTIC
660: if (size > palloc->pa_pagesz)
1.121 yamt 661: panic("pool_init: pool item size (%zu) too large", size);
1.66 thorpej 662: #endif
1.35 pk 663:
1.3 pk 664: /*
665: * Initialize the pool structure.
666: */
1.88 chs 667: LIST_INIT(&pp->pr_emptypages);
668: LIST_INIT(&pp->pr_fullpages);
669: LIST_INIT(&pp->pr_partpages);
1.128.2.7! ad 670: pp->pr_cache = NULL;
1.3 pk 671: pp->pr_curpage = NULL;
672: pp->pr_npages = 0;
673: pp->pr_minitems = 0;
674: pp->pr_minpages = 0;
675: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 676: pp->pr_roflags = flags;
677: pp->pr_flags = 0;
1.35 pk 678: pp->pr_size = size;
1.3 pk 679: pp->pr_align = align;
680: pp->pr_wchan = wchan;
1.66 thorpej 681: pp->pr_alloc = palloc;
1.20 thorpej 682: pp->pr_nitems = 0;
683: pp->pr_nout = 0;
684: pp->pr_hardlimit = UINT_MAX;
685: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 686: pp->pr_hardlimit_ratecap.tv_sec = 0;
687: pp->pr_hardlimit_ratecap.tv_usec = 0;
688: pp->pr_hardlimit_warning_last.tv_sec = 0;
689: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 690: pp->pr_drain_hook = NULL;
691: pp->pr_drain_hook_arg = NULL;
1.125 ad 692: pp->pr_freecheck = NULL;
1.3 pk 693:
694: /*
695: * Decide whether to put the page header off page to avoid
1.92 enami 696: * wasting too large a part of the page or too big item.
697: * Off-page page headers go on a hash table, so we can match
698: * a returned item with its header based on the page address.
699: * We use 1/16 of the page size and about 8 times of the item
700: * size as the threshold (XXX: tune)
701: *
702: * However, we'll put the header into the page if we can put
703: * it without wasting any items.
704: *
705: * Silently enforce `0 <= ioff < align'.
1.3 pk 706: */
1.92 enami 707: pp->pr_itemoffset = ioff %= align;
708: /* See the comment below about reserved bytes. */
709: trysize = palloc->pa_pagesz - ((align - ioff) % align);
710: phsize = ALIGN(sizeof(struct pool_item_header));
1.121 yamt 711: if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 712: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
713: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 714: /* Use the end of the page for the page header */
1.20 thorpej 715: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 716: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 717: } else {
1.3 pk 718: /* The page header will be taken from our page header pool */
719: pp->pr_phoffset = 0;
1.66 thorpej 720: off = palloc->pa_pagesz;
1.88 chs 721: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 722: }
1.1 pk 723:
1.3 pk 724: /*
725: * Alignment is to take place at `ioff' within the item. This means
726: * we must reserve up to `align - 1' bytes on the page to allow
727: * appropriate positioning of each item.
728: */
729: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 730: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 731: if ((pp->pr_roflags & PR_NOTOUCH)) {
732: int idx;
733:
734: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
735: idx++) {
736: /* nothing */
737: }
738: if (idx >= PHPOOL_MAX) {
739: /*
740: * if you see this panic, consider to tweak
741: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
742: */
743: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
744: pp->pr_wchan, pp->pr_itemsperpage);
745: }
746: pp->pr_phpool = &phpool[idx];
747: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
748: pp->pr_phpool = &phpool[0];
749: }
750: #if defined(DIAGNOSTIC)
751: else {
752: pp->pr_phpool = NULL;
753: }
754: #endif
1.3 pk 755:
756: /*
757: * Use the slack between the chunks and the page header
758: * for "cache coloring".
759: */
760: slack = off - pp->pr_itemsperpage * pp->pr_size;
761: pp->pr_maxcolor = (slack / align) * align;
762: pp->pr_curcolor = 0;
763:
764: pp->pr_nget = 0;
765: pp->pr_nfail = 0;
766: pp->pr_nput = 0;
767: pp->pr_npagealloc = 0;
768: pp->pr_npagefree = 0;
1.1 pk 769: pp->pr_hiwat = 0;
1.8 thorpej 770: pp->pr_nidle = 0;
1.128.2.7! ad 771: pp->pr_refcnt = 0;
1.3 pk 772:
1.59 thorpej 773: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 774: if (flags & PR_LOGGING) {
775: if (kmem_map == NULL ||
776: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
777: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 778: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 779: pp->pr_curlogentry = 0;
780: pp->pr_logsize = pool_logsize;
781: }
1.59 thorpej 782: #endif
1.25 thorpej 783:
784: pp->pr_entered_file = NULL;
785: pp->pr_entered_line = 0;
1.3 pk 786:
1.128.2.7! ad 787: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
1.128.2.2 ad 788: cv_init(&pp->pr_cv, wchan);
789: pp->pr_ipl = ipl;
790:
1.3 pk 791: /*
1.43 thorpej 792: * Initialize private page header pool and cache magazine pool if we
793: * haven't done so yet.
1.23 thorpej 794: * XXX LOCKING.
1.3 pk 795: */
1.97 yamt 796: if (phpool[0].pr_size == 0) {
797: int idx;
798: for (idx = 0; idx < PHPOOL_MAX; idx++) {
799: static char phpool_names[PHPOOL_MAX][6+1+6+1];
800: int nelem;
801: size_t sz;
802:
803: nelem = PHPOOL_FREELIST_NELEM(idx);
804: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
805: "phpool-%d", nelem);
806: sz = sizeof(struct pool_item_header);
807: if (nelem) {
808: sz = PR_FREELIST_ALIGN(sz)
1.99 yamt 809: + nelem * sizeof(pool_item_freelist_t);
1.97 yamt 810: }
811: pool_init(&phpool[idx], sz, 0, 0, 0,
1.128.2.1 ad 812: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.97 yamt 813: }
1.62 bjh21 814: #ifdef POOL_SUBPAGE
815: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.128.2.1 ad 816: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
1.62 bjh21 817: #endif
1.128.2.7! ad 818: pool_init(&pcgpool, sizeof(pcg_t), CACHE_LINE_SIZE, 0, 0,
! 819: "cachegrp", &pool_allocator_meta, IPL_VM);
1.1 pk 820: }
821:
1.128.2.2 ad 822: if (__predict_true(!cold)) {
823: /* Insert into the list of all pools. */
824: mutex_enter(&pool_head_lock);
825: LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
826: mutex_exit(&pool_head_lock);
827:
828: /* Insert this into the list of pools using this allocator. */
829: mutex_enter(&palloc->pa_lock);
830: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
831: mutex_exit(&palloc->pa_lock);
832: } else {
833: LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
834: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
835: }
836:
1.117 yamt 837: pool_reclaim_register(pp);
1.1 pk 838: }
839:
840: /*
841: * De-commision a pool resource.
842: */
843: void
1.42 thorpej 844: pool_destroy(struct pool *pp)
1.1 pk 845: {
1.101 thorpej 846: struct pool_pagelist pq;
1.3 pk 847: struct pool_item_header *ph;
1.43 thorpej 848:
1.101 thorpej 849: /* Remove from global pool list */
1.128.2.2 ad 850: mutex_enter(&pool_head_lock);
1.128.2.7! ad 851: while (pp->pr_refcnt != 0)
! 852: cv_wait(&pool_busy, &pool_head_lock);
1.102 chs 853: LIST_REMOVE(pp, pr_poollist);
1.101 thorpej 854: if (drainpp == pp)
855: drainpp = NULL;
1.128.2.2 ad 856: mutex_exit(&pool_head_lock);
1.101 thorpej 857:
858: /* Remove this pool from its allocator's list of pools. */
1.117 yamt 859: pool_reclaim_unregister(pp);
1.128.2.2 ad 860: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 861: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.128.2.2 ad 862: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 863:
1.128.2.2 ad 864: mutex_enter(&pp->pr_lock);
1.101 thorpej 865:
1.128.2.7! ad 866: KASSERT(pp->pr_cache == NULL);
1.3 pk 867:
868: #ifdef DIAGNOSTIC
1.20 thorpej 869: if (pp->pr_nout != 0) {
1.25 thorpej 870: pr_printlog(pp, NULL, printf);
1.80 provos 871: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 872: pp->pr_nout);
1.3 pk 873: }
874: #endif
1.1 pk 875:
1.101 thorpej 876: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
877: KASSERT(LIST_EMPTY(&pp->pr_partpages));
878:
1.3 pk 879: /* Remove all pages */
1.101 thorpej 880: LIST_INIT(&pq);
1.88 chs 881: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 882: pr_rmpage(pp, ph, &pq);
883:
1.128.2.2 ad 884: mutex_exit(&pp->pr_lock);
1.3 pk 885:
1.101 thorpej 886: pr_pagelist_free(pp, &pq);
1.3 pk 887:
1.59 thorpej 888: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 889: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 890: free(pp->pr_log, M_TEMP);
1.59 thorpej 891: #endif
1.128.2.2 ad 892:
893: cv_destroy(&pp->pr_cv);
894: mutex_destroy(&pp->pr_lock);
1.1 pk 895: }
896:
1.68 thorpej 897: void
898: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
899: {
900:
901: /* XXX no locking -- must be used just after pool_init() */
902: #ifdef DIAGNOSTIC
903: if (pp->pr_drain_hook != NULL)
904: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
905: #endif
906: pp->pr_drain_hook = fn;
907: pp->pr_drain_hook_arg = arg;
908: }
909:
1.88 chs 910: static struct pool_item_header *
1.128 christos 911: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 912: {
913: struct pool_item_header *ph;
914:
915: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 916: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.128.2.2 ad 917: else
1.97 yamt 918: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 919:
920: return (ph);
921: }
1.1 pk 922:
923: /*
1.3 pk 924: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 925: */
1.3 pk 926: void *
1.59 thorpej 927: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 928: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 929: #else
930: pool_get(struct pool *pp, int flags)
931: #endif
1.1 pk 932: {
933: struct pool_item *pi;
1.3 pk 934: struct pool_item_header *ph;
1.55 thorpej 935: void *v;
1.1 pk 936:
1.2 pk 937: #ifdef DIAGNOSTIC
1.95 atatat 938: if (__predict_false(pp->pr_itemsperpage == 0))
939: panic("pool_get: pool %p: pr_itemsperpage is zero, "
940: "pool not initialized?", pp);
1.84 thorpej 941: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 942: (flags & PR_WAITOK) != 0))
1.77 matt 943: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 944:
1.102 chs 945: #endif /* DIAGNOSTIC */
1.58 thorpej 946: #ifdef LOCKDEBUG
947: if (flags & PR_WAITOK)
1.119 yamt 948: ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");
1.56 sommerfe 949: #endif
1.1 pk 950:
1.128.2.2 ad 951: mutex_enter(&pp->pr_lock);
1.25 thorpej 952: pr_enter(pp, file, line);
1.20 thorpej 953:
954: startover:
955: /*
956: * Check to see if we've reached the hard limit. If we have,
957: * and we can wait, then wait until an item has been returned to
958: * the pool.
959: */
960: #ifdef DIAGNOSTIC
1.34 thorpej 961: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 962: pr_leave(pp);
1.128.2.2 ad 963: mutex_exit(&pp->pr_lock);
1.20 thorpej 964: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
965: }
966: #endif
1.34 thorpej 967: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 968: if (pp->pr_drain_hook != NULL) {
969: /*
970: * Since the drain hook is going to free things
971: * back to the pool, unlock, call the hook, re-lock,
972: * and check the hardlimit condition again.
973: */
974: pr_leave(pp);
1.128.2.2 ad 975: mutex_exit(&pp->pr_lock);
1.68 thorpej 976: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.128.2.2 ad 977: mutex_enter(&pp->pr_lock);
1.68 thorpej 978: pr_enter(pp, file, line);
979: if (pp->pr_nout < pp->pr_hardlimit)
980: goto startover;
981: }
982:
1.29 sommerfe 983: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 984: /*
985: * XXX: A warning isn't logged in this case. Should
986: * it be?
987: */
988: pp->pr_flags |= PR_WANTED;
1.25 thorpej 989: pr_leave(pp);
1.128.2.2 ad 990: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.25 thorpej 991: pr_enter(pp, file, line);
1.20 thorpej 992: goto startover;
993: }
1.31 thorpej 994:
995: /*
996: * Log a message that the hard limit has been hit.
997: */
998: if (pp->pr_hardlimit_warning != NULL &&
999: ratecheck(&pp->pr_hardlimit_warning_last,
1000: &pp->pr_hardlimit_ratecap))
1001: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 1002:
1003: pp->pr_nfail++;
1004:
1.25 thorpej 1005: pr_leave(pp);
1.128.2.2 ad 1006: mutex_exit(&pp->pr_lock);
1.20 thorpej 1007: return (NULL);
1008: }
1009:
1.3 pk 1010: /*
1011: * The convention we use is that if `curpage' is not NULL, then
1012: * it points at a non-empty bucket. In particular, `curpage'
1013: * never points at a page header which has PR_PHINPAGE set and
1014: * has no items in its bucket.
1015: */
1.20 thorpej 1016: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 1017: int error;
1018:
1.20 thorpej 1019: #ifdef DIAGNOSTIC
1020: if (pp->pr_nitems != 0) {
1.128.2.2 ad 1021: mutex_exit(&pp->pr_lock);
1.20 thorpej 1022: printf("pool_get: %s: curpage NULL, nitems %u\n",
1023: pp->pr_wchan, pp->pr_nitems);
1.80 provos 1024: panic("pool_get: nitems inconsistent");
1.20 thorpej 1025: }
1026: #endif
1027:
1.21 thorpej 1028: /*
1029: * Call the back-end page allocator for more memory.
1030: * Release the pool lock, as the back-end page allocator
1031: * may block.
1032: */
1.25 thorpej 1033: pr_leave(pp);
1.113 yamt 1034: error = pool_grow(pp, flags);
1035: pr_enter(pp, file, line);
1036: if (error != 0) {
1.21 thorpej 1037: /*
1.55 thorpej 1038: * We were unable to allocate a page or item
1039: * header, but we released the lock during
1040: * allocation, so perhaps items were freed
1041: * back to the pool. Check for this case.
1.21 thorpej 1042: */
1043: if (pp->pr_curpage != NULL)
1044: goto startover;
1.15 pk 1045:
1.117 yamt 1046: pp->pr_nfail++;
1.25 thorpej 1047: pr_leave(pp);
1.128.2.2 ad 1048: mutex_exit(&pp->pr_lock);
1.117 yamt 1049: return (NULL);
1.1 pk 1050: }
1.3 pk 1051:
1.20 thorpej 1052: /* Start the allocation process over. */
1053: goto startover;
1.3 pk 1054: }
1.97 yamt 1055: if (pp->pr_roflags & PR_NOTOUCH) {
1056: #ifdef DIAGNOSTIC
1057: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1058: pr_leave(pp);
1.128.2.2 ad 1059: mutex_exit(&pp->pr_lock);
1.97 yamt 1060: panic("pool_get: %s: page empty", pp->pr_wchan);
1061: }
1062: #endif
1063: v = pr_item_notouch_get(pp, ph);
1064: #ifdef POOL_DIAGNOSTIC
1065: pr_log(pp, v, PRLOG_GET, file, line);
1066: #endif
1067: } else {
1.102 chs 1068: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 1069: if (__predict_false(v == NULL)) {
1070: pr_leave(pp);
1.128.2.2 ad 1071: mutex_exit(&pp->pr_lock);
1.97 yamt 1072: panic("pool_get: %s: page empty", pp->pr_wchan);
1073: }
1.20 thorpej 1074: #ifdef DIAGNOSTIC
1.97 yamt 1075: if (__predict_false(pp->pr_nitems == 0)) {
1076: pr_leave(pp);
1.128.2.2 ad 1077: mutex_exit(&pp->pr_lock);
1.97 yamt 1078: printf("pool_get: %s: items on itemlist, nitems %u\n",
1079: pp->pr_wchan, pp->pr_nitems);
1080: panic("pool_get: nitems inconsistent");
1081: }
1.65 enami 1082: #endif
1.56 sommerfe 1083:
1.65 enami 1084: #ifdef POOL_DIAGNOSTIC
1.97 yamt 1085: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 1086: #endif
1.3 pk 1087:
1.65 enami 1088: #ifdef DIAGNOSTIC
1.97 yamt 1089: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1090: pr_printlog(pp, pi, printf);
1091: panic("pool_get(%s): free list modified: "
1092: "magic=%x; page %p; item addr %p\n",
1093: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1094: }
1.3 pk 1095: #endif
1096:
1.97 yamt 1097: /*
1098: * Remove from item list.
1099: */
1.102 chs 1100: LIST_REMOVE(pi, pi_list);
1.97 yamt 1101: }
1.20 thorpej 1102: pp->pr_nitems--;
1103: pp->pr_nout++;
1.6 thorpej 1104: if (ph->ph_nmissing == 0) {
1105: #ifdef DIAGNOSTIC
1.34 thorpej 1106: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 1107: panic("pool_get: nidle inconsistent");
1108: #endif
1109: pp->pr_nidle--;
1.88 chs 1110:
1111: /*
1112: * This page was previously empty. Move it to the list of
1113: * partially-full pages. This page is already curpage.
1114: */
1115: LIST_REMOVE(ph, ph_pagelist);
1116: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 1117: }
1.3 pk 1118: ph->ph_nmissing++;
1.97 yamt 1119: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 1120: #ifdef DIAGNOSTIC
1.97 yamt 1121: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 1122: !LIST_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 1123: pr_leave(pp);
1.128.2.2 ad 1124: mutex_exit(&pp->pr_lock);
1.21 thorpej 1125: panic("pool_get: %s: nmissing inconsistent",
1126: pp->pr_wchan);
1127: }
1128: #endif
1.3 pk 1129: /*
1.88 chs 1130: * This page is now full. Move it to the full list
1131: * and select a new current page.
1.3 pk 1132: */
1.88 chs 1133: LIST_REMOVE(ph, ph_pagelist);
1134: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1135: pool_update_curpage(pp);
1.1 pk 1136: }
1.3 pk 1137:
1138: pp->pr_nget++;
1.111 christos 1139: pr_leave(pp);
1.20 thorpej 1140:
1141: /*
1142: * If we have a low water mark and we are now below that low
1143: * water mark, add more items to the pool.
1144: */
1.53 thorpej 1145: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1146: /*
1147: * XXX: Should we log a warning? Should we set up a timeout
1148: * to try again in a second or so? The latter could break
1149: * a caller's assumptions about interrupt protection, etc.
1150: */
1151: }
1152:
1.128.2.2 ad 1153: mutex_exit(&pp->pr_lock);
1.125 ad 1154: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
1155: FREECHECK_OUT(&pp->pr_freecheck, v);
1.1 pk 1156: return (v);
1157: }
1158:
1159: /*
1.43 thorpej 1160: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1161: */
1.43 thorpej 1162: static void
1.101 thorpej 1163: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 1164: {
1165: struct pool_item *pi = v;
1.3 pk 1166: struct pool_item_header *ph;
1167:
1.128.2.2 ad 1168: KASSERT(mutex_owned(&pp->pr_lock));
1.125 ad 1169: FREECHECK_IN(&pp->pr_freecheck, v);
1.128.2.5 ad 1170: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 1171:
1.30 thorpej 1172: #ifdef DIAGNOSTIC
1.34 thorpej 1173: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1174: printf("pool %s: putting with none out\n",
1175: pp->pr_wchan);
1176: panic("pool_put");
1177: }
1178: #endif
1.3 pk 1179:
1.121 yamt 1180: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.25 thorpej 1181: pr_printlog(pp, NULL, printf);
1.3 pk 1182: panic("pool_put: %s: page header missing", pp->pr_wchan);
1183: }
1.28 thorpej 1184:
1.3 pk 1185: /*
1186: * Return to item list.
1187: */
1.97 yamt 1188: if (pp->pr_roflags & PR_NOTOUCH) {
1189: pr_item_notouch_put(pp, ph, v);
1190: } else {
1.2 pk 1191: #ifdef DIAGNOSTIC
1.97 yamt 1192: pi->pi_magic = PI_MAGIC;
1.3 pk 1193: #endif
1.32 chs 1194: #ifdef DEBUG
1.97 yamt 1195: {
1196: int i, *ip = v;
1.32 chs 1197:
1.97 yamt 1198: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1199: *ip++ = PI_MAGIC;
1200: }
1.32 chs 1201: }
1202: #endif
1203:
1.102 chs 1204: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 1205: }
1.79 thorpej 1206: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1207: ph->ph_nmissing--;
1208: pp->pr_nput++;
1.20 thorpej 1209: pp->pr_nitems++;
1210: pp->pr_nout--;
1.3 pk 1211:
1212: /* Cancel "pool empty" condition if it exists */
1213: if (pp->pr_curpage == NULL)
1214: pp->pr_curpage = ph;
1215:
1216: if (pp->pr_flags & PR_WANTED) {
1217: pp->pr_flags &= ~PR_WANTED;
1.15 pk 1218: if (ph->ph_nmissing == 0)
1219: pp->pr_nidle++;
1.128.2.4 ad 1220: cv_broadcast(&pp->pr_cv);
1.3 pk 1221: return;
1222: }
1223:
1224: /*
1.88 chs 1225: * If this page is now empty, do one of two things:
1.21 thorpej 1226: *
1.88 chs 1227: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1228: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1229: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1230: * CLAIM.
1.21 thorpej 1231: *
1.88 chs 1232: * (2) Otherwise, move the page to the empty page list.
1233: *
1234: * Either way, select a new current page (so we use a partially-full
1235: * page if one is available).
1.3 pk 1236: */
1237: if (ph->ph_nmissing == 0) {
1.6 thorpej 1238: pp->pr_nidle++;
1.90 thorpej 1239: if (pp->pr_npages > pp->pr_minpages &&
1240: (pp->pr_npages > pp->pr_maxpages ||
1.117 yamt 1241: pa_starved_p(pp->pr_alloc))) {
1.101 thorpej 1242: pr_rmpage(pp, ph, pq);
1.3 pk 1243: } else {
1.88 chs 1244: LIST_REMOVE(ph, ph_pagelist);
1245: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1246:
1.21 thorpej 1247: /*
1248: * Update the timestamp on the page. A page must
1249: * be idle for some period of time before it can
1250: * be reclaimed by the pagedaemon. This minimizes
1251: * ping-pong'ing for memory.
1252: */
1.118 kardel 1253: getmicrotime(&ph->ph_time);
1.1 pk 1254: }
1.88 chs 1255: pool_update_curpage(pp);
1.1 pk 1256: }
1.88 chs 1257:
1.21 thorpej 1258: /*
1.88 chs 1259: * If the page was previously completely full, move it to the
1260: * partially-full list and make it the current page. The next
1261: * allocation will get the item from this page, instead of
1262: * further fragmenting the pool.
1.21 thorpej 1263: */
1264: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1265: LIST_REMOVE(ph, ph_pagelist);
1266: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1267: pp->pr_curpage = ph;
1268: }
1.43 thorpej 1269: }
1270:
1271: /*
1272: * Return resource to the pool; must be called at appropriate spl level
1273: */
1.59 thorpej 1274: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1275: void
1276: _pool_put(struct pool *pp, void *v, const char *file, long line)
1277: {
1.101 thorpej 1278: struct pool_pagelist pq;
1279:
1280: LIST_INIT(&pq);
1.43 thorpej 1281:
1.128.2.2 ad 1282: mutex_enter(&pp->pr_lock);
1.43 thorpej 1283: pr_enter(pp, file, line);
1284:
1.56 sommerfe 1285: pr_log(pp, v, PRLOG_PUT, file, line);
1286:
1.101 thorpej 1287: pool_do_put(pp, v, &pq);
1.21 thorpej 1288:
1.25 thorpej 1289: pr_leave(pp);
1.128.2.2 ad 1290: mutex_exit(&pp->pr_lock);
1.101 thorpej 1291:
1.102 chs 1292: pr_pagelist_free(pp, &pq);
1.1 pk 1293: }
1.57 sommerfe 1294: #undef pool_put
1.59 thorpej 1295: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1296:
1.56 sommerfe 1297: void
1298: pool_put(struct pool *pp, void *v)
1299: {
1.101 thorpej 1300: struct pool_pagelist pq;
1301:
1302: LIST_INIT(&pq);
1.56 sommerfe 1303:
1.128.2.2 ad 1304: mutex_enter(&pp->pr_lock);
1.101 thorpej 1305: pool_do_put(pp, v, &pq);
1.128.2.2 ad 1306: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1307:
1.102 chs 1308: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1309: }
1.57 sommerfe 1310:
1.59 thorpej 1311: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1312: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1313: #endif
1.74 thorpej 1314:
1315: /*
1.113 yamt 1316: * pool_grow: grow a pool by a page.
1317: *
1318: * => called with pool locked.
1319: * => unlock and relock the pool.
1320: * => return with pool locked.
1321: */
1322:
1323: static int
1324: pool_grow(struct pool *pp, int flags)
1325: {
1326: struct pool_item_header *ph = NULL;
1327: char *cp;
1328:
1.128.2.2 ad 1329: mutex_exit(&pp->pr_lock);
1.113 yamt 1330: cp = pool_allocator_alloc(pp, flags);
1331: if (__predict_true(cp != NULL)) {
1332: ph = pool_alloc_item_header(pp, cp, flags);
1333: }
1334: if (__predict_false(cp == NULL || ph == NULL)) {
1335: if (cp != NULL) {
1336: pool_allocator_free(pp, cp);
1337: }
1.128.2.2 ad 1338: mutex_enter(&pp->pr_lock);
1.113 yamt 1339: return ENOMEM;
1340: }
1341:
1.128.2.2 ad 1342: mutex_enter(&pp->pr_lock);
1.113 yamt 1343: pool_prime_page(pp, cp, ph);
1344: pp->pr_npagealloc++;
1345: return 0;
1346: }
1347:
1348: /*
1.74 thorpej 1349: * Add N items to the pool.
1350: */
1351: int
1352: pool_prime(struct pool *pp, int n)
1353: {
1.75 simonb 1354: int newpages;
1.113 yamt 1355: int error = 0;
1.74 thorpej 1356:
1.128.2.2 ad 1357: mutex_enter(&pp->pr_lock);
1.74 thorpej 1358:
1359: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1360:
1361: while (newpages-- > 0) {
1.113 yamt 1362: error = pool_grow(pp, PR_NOWAIT);
1363: if (error) {
1.74 thorpej 1364: break;
1365: }
1366: pp->pr_minpages++;
1367: }
1368:
1369: if (pp->pr_minpages >= pp->pr_maxpages)
1370: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1371:
1.128.2.2 ad 1372: mutex_exit(&pp->pr_lock);
1.113 yamt 1373: return error;
1.74 thorpej 1374: }
1.55 thorpej 1375:
1376: /*
1.3 pk 1377: * Add a page worth of items to the pool.
1.21 thorpej 1378: *
1379: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1380: */
1.55 thorpej 1381: static void
1.128 christos 1382: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1383: {
1384: struct pool_item *pi;
1.128 christos 1385: void *cp = storage;
1.125 ad 1386: const unsigned int align = pp->pr_align;
1387: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1388: int n;
1.36 pk 1389:
1.128.2.2 ad 1390: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 1391:
1.66 thorpej 1392: #ifdef DIAGNOSTIC
1.121 yamt 1393: if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1394: ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1395: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1396: #endif
1.3 pk 1397:
1398: /*
1399: * Insert page header.
1400: */
1.88 chs 1401: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1402: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1403: ph->ph_page = storage;
1404: ph->ph_nmissing = 0;
1.118 kardel 1405: getmicrotime(&ph->ph_time);
1.88 chs 1406: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1407: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1408:
1.6 thorpej 1409: pp->pr_nidle++;
1410:
1.3 pk 1411: /*
1412: * Color this page.
1413: */
1.128 christos 1414: cp = (char *)cp + pp->pr_curcolor;
1.3 pk 1415: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1416: pp->pr_curcolor = 0;
1417:
1418: /*
1419: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1420: */
1421: if (ioff != 0)
1.128 christos 1422: cp = (char *)cp + align - ioff;
1.3 pk 1423:
1.125 ad 1424: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1425:
1.3 pk 1426: /*
1427: * Insert remaining chunks on the bucket list.
1428: */
1429: n = pp->pr_itemsperpage;
1.20 thorpej 1430: pp->pr_nitems += n;
1.3 pk 1431:
1.97 yamt 1432: if (pp->pr_roflags & PR_NOTOUCH) {
1.99 yamt 1433: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 1434: int i;
1435:
1.128 christos 1436: ph->ph_off = (char *)cp - (char *)storage;
1.97 yamt 1437: ph->ph_firstfree = 0;
1438: for (i = 0; i < n - 1; i++)
1439: freelist[i] = i + 1;
1440: freelist[n - 1] = PR_INDEX_EOL;
1441: } else {
1442: while (n--) {
1443: pi = (struct pool_item *)cp;
1.78 thorpej 1444:
1.97 yamt 1445: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1446:
1.97 yamt 1447: /* Insert on page list */
1.102 chs 1448: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1449: #ifdef DIAGNOSTIC
1.97 yamt 1450: pi->pi_magic = PI_MAGIC;
1.3 pk 1451: #endif
1.128 christos 1452: cp = (char *)cp + pp->pr_size;
1.125 ad 1453:
1454: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1455: }
1.3 pk 1456: }
1457:
1458: /*
1459: * If the pool was depleted, point at the new page.
1460: */
1461: if (pp->pr_curpage == NULL)
1462: pp->pr_curpage = ph;
1463:
1464: if (++pp->pr_npages > pp->pr_hiwat)
1465: pp->pr_hiwat = pp->pr_npages;
1466: }
1467:
1.20 thorpej 1468: /*
1.52 thorpej 1469: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1470: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1471: *
1.21 thorpej 1472: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1473: *
1.73 thorpej 1474: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1475: * with it locked.
1476: */
1477: static int
1.42 thorpej 1478: pool_catchup(struct pool *pp)
1.20 thorpej 1479: {
1480: int error = 0;
1481:
1.54 thorpej 1482: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1483: error = pool_grow(pp, PR_NOWAIT);
1484: if (error) {
1.20 thorpej 1485: break;
1486: }
1487: }
1.113 yamt 1488: return error;
1.20 thorpej 1489: }
1490:
1.88 chs 1491: static void
1492: pool_update_curpage(struct pool *pp)
1493: {
1494:
1495: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1496: if (pp->pr_curpage == NULL) {
1497: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1498: }
1499: }
1500:
1.3 pk 1501: void
1.42 thorpej 1502: pool_setlowat(struct pool *pp, int n)
1.3 pk 1503: {
1.15 pk 1504:
1.128.2.2 ad 1505: mutex_enter(&pp->pr_lock);
1.21 thorpej 1506:
1.3 pk 1507: pp->pr_minitems = n;
1.15 pk 1508: pp->pr_minpages = (n == 0)
1509: ? 0
1.18 thorpej 1510: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1511:
1512: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1513: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1514: /*
1515: * XXX: Should we log a warning? Should we set up a timeout
1516: * to try again in a second or so? The latter could break
1517: * a caller's assumptions about interrupt protection, etc.
1518: */
1519: }
1.21 thorpej 1520:
1.128.2.2 ad 1521: mutex_exit(&pp->pr_lock);
1.3 pk 1522: }
1523:
1524: void
1.42 thorpej 1525: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1526: {
1.15 pk 1527:
1.128.2.2 ad 1528: mutex_enter(&pp->pr_lock);
1.21 thorpej 1529:
1.15 pk 1530: pp->pr_maxpages = (n == 0)
1531: ? 0
1.18 thorpej 1532: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1533:
1.128.2.2 ad 1534: mutex_exit(&pp->pr_lock);
1.3 pk 1535: }
1536:
1.20 thorpej 1537: void
1.42 thorpej 1538: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1539: {
1540:
1.128.2.2 ad 1541: mutex_enter(&pp->pr_lock);
1.20 thorpej 1542:
1543: pp->pr_hardlimit = n;
1544: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1545: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1546: pp->pr_hardlimit_warning_last.tv_sec = 0;
1547: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1548:
1549: /*
1.21 thorpej 1550: * In-line version of pool_sethiwat(), because we don't want to
1551: * release the lock.
1.20 thorpej 1552: */
1553: pp->pr_maxpages = (n == 0)
1554: ? 0
1555: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1556:
1.128.2.2 ad 1557: mutex_exit(&pp->pr_lock);
1.20 thorpej 1558: }
1.3 pk 1559:
1560: /*
1561: * Release all complete pages that have not been used recently.
1562: */
1.66 thorpej 1563: int
1.59 thorpej 1564: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1565: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1566: #else
1567: pool_reclaim(struct pool *pp)
1568: #endif
1.3 pk 1569: {
1570: struct pool_item_header *ph, *phnext;
1.61 chs 1571: struct pool_pagelist pq;
1.102 chs 1572: struct timeval curtime, diff;
1.3 pk 1573:
1.68 thorpej 1574: if (pp->pr_drain_hook != NULL) {
1575: /*
1576: * The drain hook must be called with the pool unlocked.
1577: */
1578: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1579: }
1580:
1.128.2.2 ad 1581: if (mutex_tryenter(&pp->pr_lock) == 0)
1.66 thorpej 1582: return (0);
1.25 thorpej 1583: pr_enter(pp, file, line);
1.68 thorpej 1584:
1.88 chs 1585: LIST_INIT(&pq);
1.3 pk 1586:
1.43 thorpej 1587: /*
1588: * Reclaim items from the pool's caches.
1589: */
1.128.2.7! ad 1590: if (pp->pr_cache != NULL)
! 1591: pool_cache_invalidate(pp->pr_cache);
1.43 thorpej 1592:
1.118 kardel 1593: getmicrotime(&curtime);
1.21 thorpej 1594:
1.88 chs 1595: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1596: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1597:
1598: /* Check our minimum page claim */
1599: if (pp->pr_npages <= pp->pr_minpages)
1600: break;
1601:
1.88 chs 1602: KASSERT(ph->ph_nmissing == 0);
1603: timersub(&curtime, &ph->ph_time, &diff);
1.117 yamt 1604: if (diff.tv_sec < pool_inactive_time
1605: && !pa_starved_p(pp->pr_alloc))
1.88 chs 1606: continue;
1.21 thorpej 1607:
1.88 chs 1608: /*
1609: * If freeing this page would put us below
1610: * the low water mark, stop now.
1611: */
1612: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1613: pp->pr_minitems)
1614: break;
1.21 thorpej 1615:
1.88 chs 1616: pr_rmpage(pp, ph, &pq);
1.3 pk 1617: }
1618:
1.25 thorpej 1619: pr_leave(pp);
1.128.2.2 ad 1620: mutex_exit(&pp->pr_lock);
1.128.2.7! ad 1621: if (LIST_EMPTY(&pq))
1.102 chs 1622: return 0;
1.66 thorpej 1623:
1.101 thorpej 1624: pr_pagelist_free(pp, &pq);
1.128.2.7! ad 1625:
1.66 thorpej 1626: return (1);
1.3 pk 1627: }
1628:
1629: /*
1630: * Drain pools, one at a time.
1.21 thorpej 1631: *
1632: * Note, we must never be called from an interrupt context.
1.3 pk 1633: */
1634: void
1.124 yamt 1635: pool_drain(void *arg)
1.3 pk 1636: {
1637: struct pool *pp;
1638:
1.61 chs 1639: pp = NULL;
1.128.2.7! ad 1640:
! 1641: /* Find next pool to drain, and add a reference. */
1.128.2.2 ad 1642: mutex_enter(&pool_head_lock);
1.61 chs 1643: if (drainpp == NULL) {
1.102 chs 1644: drainpp = LIST_FIRST(&pool_head);
1.61 chs 1645: }
1.128.2.7! ad 1646: if (drainpp != NULL) {
1.61 chs 1647: pp = drainpp;
1.102 chs 1648: drainpp = LIST_NEXT(pp, pr_poollist);
1.61 chs 1649: }
1.128.2.7! ad 1650: if (pp != NULL)
! 1651: pp->pr_refcnt++;
1.128.2.2 ad 1652: mutex_exit(&pool_head_lock);
1.128.2.7! ad 1653:
! 1654: /* If we have a candidate, drain it and unlock. */
! 1655: if (pp != NULL) {
1.115 christos 1656: pool_reclaim(pp);
1.128.2.7! ad 1657: mutex_enter(&pool_head_lock);
! 1658: pp->pr_refcnt--;
! 1659: cv_broadcast(&pool_busy);
! 1660: mutex_exit(&pool_head_lock);
! 1661: }
1.3 pk 1662: }
1663:
1664: /*
1665: * Diagnostic helpers.
1666: */
1667: void
1.42 thorpej 1668: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1669: {
1670:
1.25 thorpej 1671: pool_print1(pp, modif, printf);
1.21 thorpej 1672: }
1673:
1.25 thorpej 1674: void
1.108 yamt 1675: pool_printall(const char *modif, void (*pr)(const char *, ...))
1676: {
1677: struct pool *pp;
1678:
1679: LIST_FOREACH(pp, &pool_head, pr_poollist) {
1680: pool_printit(pp, modif, pr);
1681: }
1682: }
1683:
1684: void
1.42 thorpej 1685: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1686: {
1687:
1688: if (pp == NULL) {
1689: (*pr)("Must specify a pool to print.\n");
1690: return;
1691: }
1692:
1693: pool_print1(pp, modif, pr);
1694: }
1695:
1.21 thorpej 1696: static void
1.124 yamt 1697: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1698: void (*pr)(const char *, ...))
1.88 chs 1699: {
1700: struct pool_item_header *ph;
1701: #ifdef DIAGNOSTIC
1702: struct pool_item *pi;
1703: #endif
1704:
1705: LIST_FOREACH(ph, pl, ph_pagelist) {
1706: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1707: ph->ph_page, ph->ph_nmissing,
1708: (u_long)ph->ph_time.tv_sec,
1709: (u_long)ph->ph_time.tv_usec);
1710: #ifdef DIAGNOSTIC
1.97 yamt 1711: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1712: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1713: if (pi->pi_magic != PI_MAGIC) {
1714: (*pr)("\t\t\titem %p, magic 0x%x\n",
1715: pi, pi->pi_magic);
1716: }
1.88 chs 1717: }
1718: }
1719: #endif
1720: }
1721: }
1722:
1723: static void
1.42 thorpej 1724: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1725: {
1.25 thorpej 1726: struct pool_item_header *ph;
1.128.2.7! ad 1727: pool_cache_t pc;
! 1728: pcg_t *pcg;
! 1729: pool_cache_cpu_t *cc;
! 1730: uint64_t cpuhit, cpumiss;
1.44 thorpej 1731: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1732: char c;
1733:
1734: while ((c = *modif++) != '\0') {
1735: if (c == 'l')
1736: print_log = 1;
1737: if (c == 'p')
1738: print_pagelist = 1;
1.44 thorpej 1739: if (c == 'c')
1740: print_cache = 1;
1.25 thorpej 1741: }
1742:
1.128.2.7! ad 1743: if ((pc = pp->pr_cache) != NULL) {
! 1744: (*pr)("POOL CACHE");
! 1745: } else {
! 1746: (*pr)("POOL");
! 1747: }
! 1748:
! 1749: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1750: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1751: pp->pr_roflags);
1.66 thorpej 1752: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1753: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1754: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1755: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1756: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1757:
1.128.2.7! ad 1758: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1759: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1760: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1761: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1762:
1763: if (print_pagelist == 0)
1764: goto skip_pagelist;
1765:
1.88 chs 1766: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1767: (*pr)("\n\tempty page list:\n");
1.97 yamt 1768: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1769: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1770: (*pr)("\n\tfull page list:\n");
1.97 yamt 1771: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1772: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1773: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1774: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1775:
1.25 thorpej 1776: if (pp->pr_curpage == NULL)
1777: (*pr)("\tno current page\n");
1778: else
1779: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1780:
1781: skip_pagelist:
1782: if (print_log == 0)
1783: goto skip_log;
1784:
1785: (*pr)("\n");
1786: if ((pp->pr_roflags & PR_LOGGING) == 0)
1787: (*pr)("\tno log\n");
1.122 christos 1788: else {
1.25 thorpej 1789: pr_printlog(pp, NULL, pr);
1.122 christos 1790: }
1.3 pk 1791:
1.25 thorpej 1792: skip_log:
1.44 thorpej 1793:
1.102 chs 1794: #define PR_GROUPLIST(pcg) \
1795: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1796: for (i = 0; i < PCG_NOBJECTS; i++) { \
1797: if (pcg->pcg_objects[i].pcgo_pa != \
1798: POOL_PADDR_INVALID) { \
1799: (*pr)("\t\t\t%p, 0x%llx\n", \
1800: pcg->pcg_objects[i].pcgo_va, \
1801: (unsigned long long) \
1802: pcg->pcg_objects[i].pcgo_pa); \
1803: } else { \
1804: (*pr)("\t\t\t%p\n", \
1805: pcg->pcg_objects[i].pcgo_va); \
1806: } \
1807: }
1808:
1.128.2.7! ad 1809: if (pc != NULL) {
! 1810: cpuhit = 0;
! 1811: cpumiss = 0;
! 1812: for (i = 0; i < MAXCPUS; i++) {
! 1813: if ((cc = pc->pc_cpus[i]) == NULL)
! 1814: continue;
! 1815: cpuhit += cc->cc_hits;
! 1816: cpumiss += cc->cc_misses;
! 1817: }
! 1818: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
! 1819: (*pr)("\tcache layer hits %llu misses %llu\n",
! 1820: pc->pc_hits, pc->pc_misses);
! 1821: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
! 1822: pc->pc_hits + pc->pc_misses - pc->pc_contended,
! 1823: pc->pc_contended);
! 1824: (*pr)("\tcache layer empty groups %u full groups %u\n",
! 1825: pc->pc_nempty, pc->pc_nfull);
! 1826: if (print_cache) {
! 1827: (*pr)("\tfull cache groups:\n");
! 1828: for (pcg = pc->pc_fullgroups; pcg != NULL;
! 1829: pcg = pcg->pcg_next) {
! 1830: PR_GROUPLIST(pcg);
! 1831: }
! 1832: (*pr)("\tempty cache groups:\n");
! 1833: for (pcg = pc->pc_emptygroups; pcg != NULL;
! 1834: pcg = pcg->pcg_next) {
! 1835: PR_GROUPLIST(pcg);
! 1836: }
1.103 chs 1837: }
1.44 thorpej 1838: }
1.102 chs 1839: #undef PR_GROUPLIST
1.44 thorpej 1840:
1.88 chs 1841: pr_enter_check(pp, pr);
1842: }
1843:
1844: static int
1845: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1846: {
1847: struct pool_item *pi;
1.128 christos 1848: void *page;
1.88 chs 1849: int n;
1850:
1.121 yamt 1851: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1852: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1853: if (page != ph->ph_page &&
1854: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1855: if (label != NULL)
1856: printf("%s: ", label);
1857: printf("pool(%p:%s): page inconsistency: page %p;"
1858: " at page head addr %p (p %p)\n", pp,
1859: pp->pr_wchan, ph->ph_page,
1860: ph, page);
1861: return 1;
1862: }
1.88 chs 1863: }
1.3 pk 1864:
1.97 yamt 1865: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1866: return 0;
1867:
1.102 chs 1868: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1869: pi != NULL;
1.102 chs 1870: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1871:
1872: #ifdef DIAGNOSTIC
1873: if (pi->pi_magic != PI_MAGIC) {
1874: if (label != NULL)
1875: printf("%s: ", label);
1876: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1877: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1878: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1879: n, pi);
1.88 chs 1880: panic("pool");
1881: }
1882: #endif
1.121 yamt 1883: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1884: continue;
1885: }
1.128 christos 1886: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1887: if (page == ph->ph_page)
1888: continue;
1889:
1890: if (label != NULL)
1891: printf("%s: ", label);
1892: printf("pool(%p:%s): page inconsistency: page %p;"
1893: " item ordinal %d; addr %p (p %p)\n", pp,
1894: pp->pr_wchan, ph->ph_page,
1895: n, pi, page);
1896: return 1;
1897: }
1898: return 0;
1.3 pk 1899: }
1900:
1.88 chs 1901:
1.3 pk 1902: int
1.42 thorpej 1903: pool_chk(struct pool *pp, const char *label)
1.3 pk 1904: {
1905: struct pool_item_header *ph;
1906: int r = 0;
1907:
1.128.2.2 ad 1908: mutex_enter(&pp->pr_lock);
1.88 chs 1909: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1910: r = pool_chk_page(pp, label, ph);
1911: if (r) {
1912: goto out;
1913: }
1914: }
1915: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1916: r = pool_chk_page(pp, label, ph);
1917: if (r) {
1.3 pk 1918: goto out;
1919: }
1.88 chs 1920: }
1921: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1922: r = pool_chk_page(pp, label, ph);
1923: if (r) {
1.3 pk 1924: goto out;
1925: }
1926: }
1.88 chs 1927:
1.3 pk 1928: out:
1.128.2.2 ad 1929: mutex_exit(&pp->pr_lock);
1.3 pk 1930: return (r);
1.43 thorpej 1931: }
1932:
1933: /*
1934: * pool_cache_init:
1935: *
1936: * Initialize a pool cache.
1.128.2.7! ad 1937: */
! 1938: pool_cache_t
! 1939: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
! 1940: const char *wchan, struct pool_allocator *palloc, int ipl,
! 1941: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
! 1942: {
! 1943: pool_cache_t pc;
! 1944:
! 1945: pc = pool_get(&cache_pool, PR_WAITOK);
! 1946: if (pc == NULL)
! 1947: return NULL;
! 1948:
! 1949: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
! 1950: palloc, ipl, ctor, dtor, arg);
! 1951:
! 1952: return pc;
! 1953: }
! 1954:
! 1955: /*
! 1956: * pool_cache_bootstrap:
1.43 thorpej 1957: *
1.128.2.7! ad 1958: * Kernel-private version of pool_cache_init(). The caller
! 1959: * provides initial storage.
1.43 thorpej 1960: */
1961: void
1.128.2.7! ad 1962: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
! 1963: u_int align_offset, u_int flags, const char *wchan,
! 1964: struct pool_allocator *palloc, int ipl,
! 1965: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 1966: void *arg)
1967: {
1.128.2.7! ad 1968: CPU_INFO_ITERATOR cii;
! 1969: struct cpu_info *ci;
! 1970: struct pool *pp;
1.43 thorpej 1971:
1.128.2.7! ad 1972: pp = &pc->pc_pool;
! 1973: if (palloc == NULL && ipl == IPL_NONE)
! 1974: palloc = &pool_allocator_nointr;
! 1975: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.43 thorpej 1976:
1.128.2.7! ad 1977: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, pp->pr_ipl);
1.43 thorpej 1978:
1.128.2.7! ad 1979: pc->pc_emptygroups = NULL;
! 1980: pc->pc_fullgroups = NULL;
1.43 thorpej 1981: pc->pc_ctor = ctor;
1982: pc->pc_dtor = dtor;
1983: pc->pc_arg = arg;
1.128.2.7! ad 1984: pc->pc_hits = 0;
1.48 thorpej 1985: pc->pc_misses = 0;
1.128.2.7! ad 1986: pc->pc_nempty = 0;
! 1987: pc->pc_nfull = 0;
! 1988: pc->pc_contended = 0;
! 1989: pc->pc_refcnt = 0;
! 1990:
! 1991: /* Allocate per-CPU caches. */
! 1992: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
! 1993: pc->pc_ncpu = 0;
! 1994: for (CPU_INFO_FOREACH(cii, ci)) {
! 1995: pool_cache_cpu_init1(ci, pc);
! 1996: }
! 1997:
1.128.2.2 ad 1998: if (__predict_true(!cold)) {
1999: mutex_enter(&pp->pr_lock);
1.128.2.7! ad 2000: pp->pr_cache = pc;
1.128.2.2 ad 2001: mutex_exit(&pp->pr_lock);
1.128.2.7! ad 2002: mutex_enter(&pool_head_lock);
! 2003: LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist);
! 2004: mutex_exit(&pool_head_lock);
! 2005: } else {
! 2006: pp->pr_cache = pc;
! 2007: LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist);
! 2008: }
1.43 thorpej 2009: }
2010:
2011: /*
2012: * pool_cache_destroy:
2013: *
2014: * Destroy a pool cache.
2015: */
2016: void
1.128.2.7! ad 2017: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 2018: {
1.128.2.7! ad 2019: struct pool *pp = &pc->pc_pool;
! 2020: pool_cache_cpu_t *cc;
! 2021: pcg_t *pcg;
! 2022: int i;
! 2023:
! 2024: /* Remove it from the global list. */
! 2025: mutex_enter(&pool_head_lock);
! 2026: while (pc->pc_refcnt != 0)
! 2027: cv_wait(&pool_busy, &pool_head_lock);
! 2028: LIST_REMOVE(pc, pc_cachelist);
! 2029: mutex_exit(&pool_head_lock);
1.43 thorpej 2030:
2031: /* First, invalidate the entire cache. */
2032: pool_cache_invalidate(pc);
2033:
1.128.2.7! ad 2034: /* Disassociate it from the pool. */
1.128.2.2 ad 2035: mutex_enter(&pp->pr_lock);
1.128.2.7! ad 2036: pp->pr_cache = NULL;
1.128.2.2 ad 2037: mutex_exit(&pp->pr_lock);
2038:
1.128.2.7! ad 2039: /* Destroy per-CPU data */
! 2040: for (i = 0; i < MAXCPUS; i++) {
! 2041: if ((cc = pc->pc_cpus[i]) == NULL)
! 2042: continue;
! 2043: if ((pcg = cc->cc_current) != NULL) {
! 2044: pcg->pcg_next = NULL;
! 2045: pool_cache_invalidate_groups(pc, pcg);
! 2046: }
! 2047: if ((pcg = cc->cc_previous) != NULL) {
! 2048: pcg->pcg_next = NULL;
! 2049: pool_cache_invalidate_groups(pc, pcg);
! 2050: }
! 2051: if (cc != &pc->pc_cpu0)
! 2052: pool_put(&cache_cpu_pool, cc);
! 2053: }
! 2054:
! 2055: /* Finally, destroy it. */
1.128.2.2 ad 2056: mutex_destroy(&pc->pc_lock);
1.128.2.7! ad 2057: pool_destroy(pp);
! 2058: pool_put(&cache_pool, pc);
! 2059: }
! 2060:
! 2061: /*
! 2062: * pool_cache_cpu_init1:
! 2063: *
! 2064: * Called for each pool_cache whenever a new CPU is attached.
! 2065: */
! 2066: static void
! 2067: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
! 2068: {
! 2069: pool_cache_cpu_t *cc;
! 2070:
! 2071: KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0);
! 2072:
! 2073: if ((cc = pc->pc_cpus[ci->ci_index]) != NULL) {
! 2074: KASSERT(cc->cc_cpu = ci);
! 2075: return;
! 2076: }
! 2077:
! 2078: /*
! 2079: * The first CPU is 'free'. This needs to be the case for
! 2080: * bootstrap - we may not be able to allocate yet.
! 2081: */
! 2082: if (pc->pc_ncpu == 0) {
! 2083: cc = &pc->pc_cpu0;
! 2084: pc->pc_ncpu = 1;
! 2085: } else {
! 2086: mutex_enter(&pc->pc_lock);
! 2087: pc->pc_ncpu++;
! 2088: mutex_exit(&pc->pc_lock);
! 2089: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
! 2090: }
! 2091:
! 2092: cc->cc_ipl = pc->pc_pool.pr_ipl;
! 2093: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
! 2094: cc->cc_cache = pc;
! 2095: cc->cc_cpu = ci;
! 2096: cc->cc_hits = 0;
! 2097: cc->cc_misses = 0;
! 2098: cc->cc_current = NULL;
! 2099: cc->cc_previous = NULL;
! 2100: cc->cc_busy = NULL;
! 2101:
! 2102: pc->pc_cpus[ci->ci_index] = cc;
! 2103: }
! 2104:
! 2105: /*
! 2106: * pool_cache_cpu_init:
! 2107: *
! 2108: * Called whenever a new CPU is attached.
! 2109: */
! 2110: void
! 2111: pool_cache_cpu_init(struct cpu_info *ci)
! 2112: {
! 2113: pool_cache_t pc;
! 2114:
! 2115: mutex_enter(&pool_head_lock);
! 2116: LIST_FOREACH(pc, &pool_cache_head, pc_cachelist) {
! 2117: pc->pc_refcnt++;
! 2118: mutex_exit(&pool_head_lock);
! 2119:
! 2120: pool_cache_cpu_init1(ci, pc);
! 2121:
! 2122: mutex_enter(&pool_head_lock);
! 2123: pc->pc_refcnt--;
! 2124: cv_broadcast(&pool_busy);
! 2125: }
! 2126: mutex_exit(&pool_head_lock);
! 2127: }
! 2128:
! 2129: /*
! 2130: * pool_cache_reclaim:
! 2131: *
! 2132: * Reclaim memory from a pool cache.
! 2133: */
! 2134: bool
! 2135: pool_cache_reclaim(pool_cache_t pc)
! 2136: {
! 2137:
! 2138: return pool_reclaim(&pc->pc_pool);
1.43 thorpej 2139: }
2140:
1.110 perry 2141: static inline void *
1.128.2.7! ad 2142: pcg_get(pcg_t *pcg, paddr_t *pap)
1.43 thorpej 2143: {
2144: void *object;
2145: u_int idx;
2146:
2147: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45 thorpej 2148: KASSERT(pcg->pcg_avail != 0);
1.43 thorpej 2149:
1.128.2.7! ad 2150: idx = --pcg->pcg_avail;
1.87 thorpej 2151: object = pcg->pcg_objects[idx].pcgo_va;
2152: if (pap != NULL)
2153: *pap = pcg->pcg_objects[idx].pcgo_pa;
1.128.2.7! ad 2154:
! 2155: #ifdef DIAGNOSTIC
1.87 thorpej 2156: pcg->pcg_objects[idx].pcgo_va = NULL;
1.128.2.7! ad 2157: KASSERT(object != NULL);
! 2158: #endif
1.43 thorpej 2159:
2160: return (object);
2161: }
2162:
1.110 perry 2163: static inline void
1.128.2.7! ad 2164: pcg_put(pcg_t *pcg, void *object, paddr_t pa)
1.43 thorpej 2165: {
2166: u_int idx;
2167:
2168: idx = pcg->pcg_avail++;
2169:
1.128.2.7! ad 2170: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.87 thorpej 2171: KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1.128.2.7! ad 2172:
1.87 thorpej 2173: pcg->pcg_objects[idx].pcgo_va = object;
2174: pcg->pcg_objects[idx].pcgo_pa = pa;
1.43 thorpej 2175: }
2176:
1.128.2.7! ad 2177: /*
! 2178: * pool_cache_destruct_object:
! 2179: *
! 2180: * Force destruction of an object and its release back into
! 2181: * the pool.
! 2182: */
! 2183: void
! 2184: pool_cache_destruct_object(pool_cache_t pc, void *object)
! 2185: {
! 2186:
! 2187: if (pc->pc_dtor != NULL)
! 2188: (*pc->pc_dtor)(pc->pc_arg, object);
! 2189: pool_put(&pc->pc_pool, object);
! 2190: }
! 2191:
! 2192: /*
! 2193: * pool_cache_invalidate_groups:
! 2194: *
! 2195: * Invalidate a chain of groups and destruct all objects.
! 2196: */
1.102 chs 2197: static void
1.128.2.7! ad 2198: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1.102 chs 2199: {
1.128.2.7! ad 2200: void *object;
! 2201: pcg_t *next;
! 2202: int i;
! 2203:
! 2204: for (; pcg != NULL; pcg = next) {
! 2205: next = pcg->pcg_next;
! 2206:
! 2207: for (i = 0; i < pcg->pcg_avail; i++) {
! 2208: object = pcg->pcg_objects[i].pcgo_va;
! 2209: if (pc->pc_dtor != NULL)
! 2210: (*pc->pc_dtor)(pc->pc_arg, object);
! 2211: pool_put(&pc->pc_pool, object);
! 2212: }
1.102 chs 2213:
2214: pool_put(&pcgpool, pcg);
2215: }
2216: }
2217:
1.43 thorpej 2218: /*
1.128.2.7! ad 2219: * pool_cache_invalidate:
1.43 thorpej 2220: *
1.128.2.7! ad 2221: * Invalidate a pool cache (destruct and release all of the
! 2222: * cached objects). Does not reclaim objects from the pool.
1.43 thorpej 2223: */
1.128.2.7! ad 2224: void
! 2225: pool_cache_invalidate(pool_cache_t pc)
1.43 thorpej 2226: {
1.128.2.7! ad 2227: pcg_t *full, *empty;
1.43 thorpej 2228:
1.128.2.2 ad 2229: mutex_enter(&pc->pc_lock);
1.128.2.7! ad 2230: full = pc->pc_fullgroups;
! 2231: empty = pc->pc_emptygroups;
! 2232: pc->pc_fullgroups = NULL;
! 2233: pc->pc_emptygroups = NULL;
! 2234: pc->pc_nfull = 0;
! 2235: pc->pc_nempty = 0;
! 2236: mutex_exit(&pc->pc_lock);
1.43 thorpej 2237:
1.128.2.7! ad 2238: pool_cache_invalidate_groups(pc, full);
! 2239: pool_cache_invalidate_groups(pc, empty);
! 2240: }
1.43 thorpej 2241:
1.128.2.7! ad 2242: void
! 2243: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
! 2244: {
1.125 ad 2245:
1.128.2.7! ad 2246: pool_set_drain_hook(&pc->pc_pool, fn, arg);
! 2247: }
1.43 thorpej 2248:
1.128.2.7! ad 2249: void
! 2250: pool_cache_setlowat(pool_cache_t pc, int n)
! 2251: {
1.43 thorpej 2252:
1.128.2.7! ad 2253: pool_setlowat(&pc->pc_pool, n);
! 2254: }
1.43 thorpej 2255:
1.128.2.7! ad 2256: void
! 2257: pool_cache_sethiwat(pool_cache_t pc, int n)
! 2258: {
! 2259:
! 2260: pool_sethiwat(&pc->pc_pool, n);
1.43 thorpej 2261: }
2262:
2263: void
1.128.2.7! ad 2264: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
1.43 thorpej 2265: {
2266:
1.128.2.7! ad 2267: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
! 2268: }
! 2269:
! 2270: static inline pool_cache_cpu_t *
! 2271: pool_cache_cpu_enter(pool_cache_t pc, int *s)
! 2272: {
! 2273: pool_cache_cpu_t *cc;
! 2274: struct cpu_info *ci;
1.125 ad 2275:
1.128.2.7! ad 2276: /*
! 2277: * Prevent other users of the cache from accessing our
! 2278: * CPU-local data. To avoid touching shared state, we
! 2279: * pull the neccessary information from CPU local data.
! 2280: */
! 2281: ci = curcpu();
! 2282: cc = pc->pc_cpus[ci->ci_data.cpu_index];
! 2283: if (cc->cc_ipl == IPL_NONE) {
! 2284: crit_enter();
! 2285: } else {
! 2286: *s = splraiseipl(cc->cc_iplcookie);
1.109 christos 2287: }
2288:
1.128.2.7! ad 2289: /* Moved to another CPU before disabling preemption? */
! 2290: if (__predict_false(ci != curcpu())) {
! 2291: ci = curcpu();
! 2292: cc = pc->pc_cpus[ci->ci_data.cpu_index];
! 2293: }
1.43 thorpej 2294:
1.128.2.7! ad 2295: #ifdef DIAGNOSTIC
! 2296: KASSERT(cc->cc_busy == NULL);
! 2297: KASSERT(cc->cc_cpu == ci);
! 2298: KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0);
! 2299: cc->cc_busy = curlwp;
! 2300: #endif
! 2301:
! 2302: return cc;
! 2303: }
! 2304:
! 2305: static inline void
! 2306: pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s)
! 2307: {
! 2308:
! 2309: #ifdef DIAGNOSTIC
! 2310: KASSERT(cc->cc_busy == curlwp);
! 2311: cc->cc_busy = NULL;
! 2312: #endif
! 2313:
! 2314: /* No longer need exclusive access to the per-CPU data. */
! 2315: if (cc->cc_ipl == IPL_NONE) {
! 2316: crit_exit();
! 2317: } else {
! 2318: splx(*s);
! 2319: }
! 2320: }
! 2321:
! 2322: #if __GNUC_PREREQ__(3, 0)
! 2323: __attribute ((noinline))
! 2324: #endif
! 2325: pool_cache_cpu_t *
! 2326: pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp,
! 2327: paddr_t *pap, int flags)
! 2328: {
! 2329: pcg_t *pcg, *cur;
! 2330: uint64_t ncsw;
! 2331: pool_cache_t pc;
! 2332: void *object;
! 2333:
! 2334: pc = cc->cc_cache;
! 2335: cc->cc_misses++;
! 2336:
! 2337: /*
! 2338: * Nothing was available locally. Try and grab a group
! 2339: * from the cache.
! 2340: */
! 2341: if (!mutex_tryenter(&pc->pc_lock)) {
! 2342: ncsw = curlwp->l_ncsw;
! 2343: mutex_enter(&pc->pc_lock);
! 2344: pc->pc_contended++;
! 2345:
! 2346: /*
! 2347: * If we context switched while locking, then
! 2348: * our view of the per-CPU data is invalid:
! 2349: * retry.
! 2350: */
! 2351: if (curlwp->l_ncsw != ncsw) {
! 2352: mutex_exit(&pc->pc_lock);
! 2353: pool_cache_cpu_exit(cc, s);
! 2354: return pool_cache_cpu_enter(pc, s);
1.43 thorpej 2355: }
1.102 chs 2356: }
1.43 thorpej 2357:
1.128.2.7! ad 2358: if ((pcg = pc->pc_fullgroups) != NULL) {
1.43 thorpej 2359: /*
1.128.2.7! ad 2360: * If there's a full group, release our empty
! 2361: * group back to the cache. Install the full
! 2362: * group as cc_current and return.
1.43 thorpej 2363: */
1.128.2.7! ad 2364: if ((cur = cc->cc_current) != NULL) {
! 2365: KASSERT(cur->pcg_avail == 0);
! 2366: cur->pcg_next = pc->pc_emptygroups;
! 2367: pc->pc_emptygroups = cur;
! 2368: pc->pc_nempty++;
! 2369: }
! 2370: KASSERT(pcg->pcg_avail == PCG_NOBJECTS);
! 2371: cc->cc_current = pcg;
! 2372: pc->pc_fullgroups = pcg->pcg_next;
! 2373: pc->pc_hits++;
! 2374: pc->pc_nfull--;
1.128.2.2 ad 2375: mutex_exit(&pc->pc_lock);
1.128.2.7! ad 2376: return cc;
! 2377: }
1.102 chs 2378:
1.128.2.7! ad 2379: /*
! 2380: * Nothing available locally or in cache. Take the slow
! 2381: * path: fetch a new object from the pool and construct
! 2382: * it.
! 2383: */
! 2384: pc->pc_misses++;
! 2385: mutex_exit(&pc->pc_lock);
! 2386: pool_cache_cpu_exit(cc, s);
! 2387:
! 2388: object = pool_get(&pc->pc_pool, flags);
! 2389: *objectp = object;
! 2390: if (object == NULL)
! 2391: return NULL;
! 2392:
! 2393: if (pc->pc_ctor != NULL) {
! 2394: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
! 2395: pool_put(&pc->pc_pool, object);
! 2396: *objectp = NULL;
! 2397: return NULL;
1.43 thorpej 2398: }
2399: }
2400:
1.128.2.7! ad 2401: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
! 2402: (pc->pc_pool.pr_align - 1)) == 0);
1.43 thorpej 2403:
1.128.2.7! ad 2404: if (pap != NULL) {
! 2405: #ifdef POOL_VTOPHYS
! 2406: *pap = POOL_VTOPHYS(object);
! 2407: #else
! 2408: *pap = POOL_PADDR_INVALID;
! 2409: #endif
1.102 chs 2410: }
1.51 thorpej 2411:
1.128.2.7! ad 2412: FREECHECK_OUT(&pc->pc_freecheck, object);
! 2413: return NULL;
1.43 thorpej 2414: }
2415:
1.128.2.6 ad 2416: /*
1.128.2.7! ad 2417: * pool_cache_get{,_paddr}:
1.128.2.6 ad 2418: *
1.128.2.7! ad 2419: * Get an object from a pool cache (optionally returning
! 2420: * the physical address of the object).
1.128.2.6 ad 2421: */
1.128.2.7! ad 2422: void *
! 2423: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.102 chs 2424: {
1.128.2.7! ad 2425: pool_cache_cpu_t *cc;
! 2426: pcg_t *pcg;
1.102 chs 2427: void *object;
1.128.2.7! ad 2428: int s;
1.102 chs 2429:
1.128.2.7! ad 2430: #ifdef LOCKDEBUG
! 2431: if (flags & PR_WAITOK)
! 2432: ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)");
! 2433: #endif
1.128.2.6 ad 2434:
1.128.2.7! ad 2435: cc = pool_cache_cpu_enter(pc, &s);
! 2436: do {
! 2437: /* Try and allocate an object from the current group. */
! 2438: pcg = cc->cc_current;
! 2439: if (pcg != NULL && pcg->pcg_avail > 0) {
! 2440: object = pcg_get(pcg, pap);
! 2441: cc->cc_hits++;
! 2442: pool_cache_cpu_exit(cc, &s);
! 2443: FREECHECK_OUT(&pc->pc_freecheck, object);
! 2444: return object;
! 2445: }
1.128.2.6 ad 2446:
1.128.2.7! ad 2447: /*
! 2448: * That failed. If the previous group isn't empty, swap
! 2449: * it with the current group and allocate from there.
! 2450: */
! 2451: pcg = cc->cc_previous;
! 2452: if (pcg != NULL && pcg->pcg_avail > 0) {
! 2453: cc->cc_previous = cc->cc_current;
! 2454: cc->cc_current = pcg;
! 2455: continue;
1.102 chs 2456: }
1.128.2.6 ad 2457:
1.128.2.7! ad 2458: /*
! 2459: * Can't allocate from either group: try the slow path.
! 2460: * If get_slow() allocated an object for us, or if
! 2461: * no more objects are available, it will return NULL.
! 2462: * Otherwise, we need to retry.
! 2463: */
! 2464: cc = pool_cache_get_slow(cc, &s, &object, pap, flags);
! 2465: } while (cc != NULL);
! 2466:
! 2467: return object;
1.105 christos 2468: }
2469:
1.128.2.7! ad 2470: #if __GNUC_PREREQ__(3, 0)
! 2471: __attribute ((noinline))
! 2472: #endif
! 2473: pool_cache_cpu_t *
! 2474: pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa)
1.105 christos 2475: {
1.128.2.7! ad 2476: pcg_t *pcg, *cur;
! 2477: uint64_t ncsw;
! 2478: pool_cache_t pc;
1.105 christos 2479:
1.128.2.7! ad 2480: pc = cc->cc_cache;
! 2481: cc->cc_misses++;
1.105 christos 2482:
1.128.2.7! ad 2483: /*
! 2484: * No free slots locally. Try to grab an empty, unused
! 2485: * group from the cache.
! 2486: */
! 2487: if (!mutex_tryenter(&pc->pc_lock)) {
! 2488: ncsw = curlwp->l_ncsw;
! 2489: mutex_enter(&pc->pc_lock);
! 2490: pc->pc_contended++;
1.102 chs 2491:
1.128.2.7! ad 2492: /*
! 2493: * If we context switched while locking, then
! 2494: * our view of the per-CPU data is invalid:
! 2495: * retry.
! 2496: */
! 2497: if (curlwp->l_ncsw != ncsw) {
! 2498: mutex_exit(&pc->pc_lock);
! 2499: pool_cache_cpu_exit(cc, s);
! 2500: return pool_cache_cpu_enter(pc, s);
! 2501: }
! 2502: }
1.101 thorpej 2503:
1.128.2.7! ad 2504: if ((pcg = pc->pc_emptygroups) != NULL) {
! 2505: /*
! 2506: * If there's a empty group, release our full
! 2507: * group back to the cache. Install the empty
! 2508: * group as cc_current and return.
! 2509: */
! 2510: if ((cur = cc->cc_current) != NULL) {
! 2511: KASSERT(cur->pcg_avail == PCG_NOBJECTS);
! 2512: cur->pcg_next = pc->pc_fullgroups;
! 2513: pc->pc_fullgroups = cur;
! 2514: pc->pc_nfull++;
! 2515: }
! 2516: KASSERT(pcg->pcg_avail == 0);
! 2517: cc->cc_current = pcg;
! 2518: pc->pc_emptygroups = pcg->pcg_next;
! 2519: pc->pc_hits++;
! 2520: pc->pc_nempty--;
! 2521: mutex_exit(&pc->pc_lock);
! 2522: return cc;
! 2523: }
1.101 thorpej 2524:
1.128.2.7! ad 2525: /*
! 2526: * Nothing available locally or in cache. Take the
! 2527: * slow path and try to allocate a new group that we
! 2528: * can release to.
! 2529: */
! 2530: pc->pc_misses++;
! 2531: mutex_exit(&pc->pc_lock);
! 2532: pool_cache_cpu_exit(cc, s);
1.43 thorpej 2533:
1.128.2.7! ad 2534: /*
! 2535: * If we can't allocate a new group, just throw the
! 2536: * object away.
! 2537: */
! 2538: #ifdef XXXAD /* Disable the cache layer for now. */
! 2539: pcg = pool_get(&pcgpool, PR_NOWAIT);
! 2540: #else
! 2541: pcg = NULL;
! 2542: #endif
! 2543: if (pcg == NULL) {
! 2544: pool_cache_destruct_object(pc, object);
! 2545: return NULL;
! 2546: }
! 2547: #ifdef DIAGNOSTIC
! 2548: memset(pcg, 0, sizeof(*pcg));
! 2549: #else
! 2550: pcg->pcg_avail = 0;
! 2551: #endif
1.43 thorpej 2552:
1.128.2.7! ad 2553: /*
! 2554: * Add the empty group to the cache and try again.
! 2555: */
! 2556: mutex_enter(&pc->pc_lock);
! 2557: pcg->pcg_next = pc->pc_emptygroups;
! 2558: pc->pc_emptygroups = pcg;
! 2559: pc->pc_nempty++;
1.128.2.2 ad 2560: mutex_exit(&pc->pc_lock);
1.43 thorpej 2561:
1.128.2.7! ad 2562: return pool_cache_cpu_enter(pc, s);
! 2563: }
1.43 thorpej 2564:
2565: /*
1.128.2.7! ad 2566: * pool_cache_put{,_paddr}:
1.43 thorpej 2567: *
1.128.2.7! ad 2568: * Put an object back to the pool cache (optionally caching the
! 2569: * physical address of the object).
1.43 thorpej 2570: */
1.128.2.7! ad 2571: void
! 2572: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2573: {
1.128.2.7! ad 2574: pool_cache_cpu_t *cc;
! 2575: pcg_t *pcg;
! 2576: int s;
1.101 thorpej 2577:
1.128.2.7! ad 2578: FREECHECK_IN(&pc->pc_freecheck, object);
1.101 thorpej 2579:
1.128.2.7! ad 2580: cc = pool_cache_cpu_enter(pc, &s);
! 2581: do {
! 2582: /* If the current group isn't full, release it there. */
! 2583: pcg = cc->cc_current;
! 2584: if (pcg != NULL && pcg->pcg_avail < PCG_NOBJECTS) {
! 2585: pcg_put(pcg, object, pa);
! 2586: cc->cc_hits++;
! 2587: pool_cache_cpu_exit(cc, &s);
! 2588: return;
! 2589: }
1.43 thorpej 2590:
1.128.2.7! ad 2591: /*
! 2592: * That failed. If the previous group is empty, swap
! 2593: * it with the current group and try again.
! 2594: */
! 2595: pcg = cc->cc_previous;
! 2596: if (pcg != NULL && pcg->pcg_avail == 0) {
! 2597: cc->cc_previous = cc->cc_current;
! 2598: cc->cc_current = pcg;
! 2599: continue;
! 2600: }
! 2601:
! 2602: /*
! 2603: * Can't free to either group: try the slow path.
! 2604: * If put_slow() releases the object for us, it
! 2605: * will return NULL. Otherwise we need to retry.
! 2606: */
! 2607: cc = pool_cache_put_slow(cc, &s, object, pa);
! 2608: } while (cc != NULL);
1.3 pk 2609: }
1.66 thorpej 2610:
2611: /*
2612: * Pool backend allocators.
2613: *
2614: * Each pool has a backend allocator that handles allocation, deallocation,
2615: * and any additional draining that might be needed.
2616: *
2617: * We provide two standard allocators:
2618: *
2619: * pool_allocator_kmem - the default when no allocator is specified
2620: *
2621: * pool_allocator_nointr - used for pools that will not be accessed
2622: * in interrupt context.
2623: */
2624: void *pool_page_alloc(struct pool *, int);
2625: void pool_page_free(struct pool *, void *);
2626:
1.112 bjh21 2627: #ifdef POOL_SUBPAGE
2628: struct pool_allocator pool_allocator_kmem_fullpage = {
2629: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2630: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2631: };
2632: #else
1.66 thorpej 2633: struct pool_allocator pool_allocator_kmem = {
2634: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2635: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2636: };
1.112 bjh21 2637: #endif
1.66 thorpej 2638:
2639: void *pool_page_alloc_nointr(struct pool *, int);
2640: void pool_page_free_nointr(struct pool *, void *);
2641:
1.112 bjh21 2642: #ifdef POOL_SUBPAGE
2643: struct pool_allocator pool_allocator_nointr_fullpage = {
2644: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2645: .pa_backingmapptr = &kernel_map,
1.112 bjh21 2646: };
2647: #else
1.66 thorpej 2648: struct pool_allocator pool_allocator_nointr = {
2649: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2650: .pa_backingmapptr = &kernel_map,
1.66 thorpej 2651: };
1.112 bjh21 2652: #endif
1.66 thorpej 2653:
2654: #ifdef POOL_SUBPAGE
2655: void *pool_subpage_alloc(struct pool *, int);
2656: void pool_subpage_free(struct pool *, void *);
2657:
1.112 bjh21 2658: struct pool_allocator pool_allocator_kmem = {
2659: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2660: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2661: };
2662:
2663: void *pool_subpage_alloc_nointr(struct pool *, int);
2664: void pool_subpage_free_nointr(struct pool *, void *);
2665:
2666: struct pool_allocator pool_allocator_nointr = {
2667: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2668: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2669: };
2670: #endif /* POOL_SUBPAGE */
2671:
1.117 yamt 2672: static void *
2673: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2674: {
1.117 yamt 2675: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2676: void *res;
2677:
1.117 yamt 2678: res = (*pa->pa_alloc)(pp, flags);
2679: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2680: /*
1.117 yamt 2681: * We only run the drain hook here if PR_NOWAIT.
2682: * In other cases, the hook will be run in
2683: * pool_reclaim().
1.66 thorpej 2684: */
1.117 yamt 2685: if (pp->pr_drain_hook != NULL) {
2686: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2687: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2688: }
1.117 yamt 2689: }
2690: return res;
1.66 thorpej 2691: }
2692:
1.117 yamt 2693: static void
1.66 thorpej 2694: pool_allocator_free(struct pool *pp, void *v)
2695: {
2696: struct pool_allocator *pa = pp->pr_alloc;
2697:
2698: (*pa->pa_free)(pp, v);
2699: }
2700:
2701: void *
1.124 yamt 2702: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2703: {
1.127 thorpej 2704: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2705:
1.100 yamt 2706: return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
1.66 thorpej 2707: }
2708:
2709: void
1.124 yamt 2710: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2711: {
2712:
1.98 yamt 2713: uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2714: }
2715:
2716: static void *
1.124 yamt 2717: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2718: {
1.127 thorpej 2719: bool waitok = (flags & PR_WAITOK) ? true : false;
1.98 yamt 2720:
1.100 yamt 2721: return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
1.98 yamt 2722: }
2723:
2724: static void
1.124 yamt 2725: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2726: {
2727:
1.100 yamt 2728: uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
1.66 thorpej 2729: }
2730:
2731: #ifdef POOL_SUBPAGE
2732: /* Sub-page allocator, for machines with large hardware pages. */
2733: void *
2734: pool_subpage_alloc(struct pool *pp, int flags)
2735: {
1.128.2.2 ad 2736: return pool_get(&psppool, flags);
1.66 thorpej 2737: }
2738:
2739: void
2740: pool_subpage_free(struct pool *pp, void *v)
2741: {
2742: pool_put(&psppool, v);
2743: }
2744:
2745: /* We don't provide a real nointr allocator. Maybe later. */
2746: void *
1.112 bjh21 2747: pool_subpage_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2748: {
2749:
2750: return (pool_subpage_alloc(pp, flags));
2751: }
2752:
2753: void
1.112 bjh21 2754: pool_subpage_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2755: {
2756:
2757: pool_subpage_free(pp, v);
2758: }
1.112 bjh21 2759: #endif /* POOL_SUBPAGE */
1.66 thorpej 2760: void *
1.124 yamt 2761: pool_page_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2762: {
1.127 thorpej 2763: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2764:
1.100 yamt 2765: return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
1.66 thorpej 2766: }
2767:
2768: void
1.124 yamt 2769: pool_page_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2770: {
2771:
1.98 yamt 2772: uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
1.66 thorpej 2773: }
CVSweb <webmaster@jp.NetBSD.org>