Annotation of src/sys/kern/subr_pool.c, Revision 1.138.4.2
1.138.4.1 bouyer 1: /* $NetBSD$ */
1.1 pk 2:
3: /*-
1.134 ad 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.134 ad 9: * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.138.4.1 bouyer 41: __KERNEL_RCSID(0, "$NetBSD$");
1.24 scottr 42:
1.138.4.1 bouyer 43: #include "opt_ddb.h"
1.25 thorpej 44: #include "opt_pool.h"
1.24 scottr 45: #include "opt_poollog.h"
1.28 thorpej 46: #include "opt_lockdebug.h"
1.1 pk 47:
48: #include <sys/param.h>
49: #include <sys/systm.h>
1.135 yamt 50: #include <sys/bitops.h>
1.1 pk 51: #include <sys/proc.h>
52: #include <sys/errno.h>
53: #include <sys/kernel.h>
54: #include <sys/malloc.h>
55: #include <sys/lock.h>
56: #include <sys/pool.h>
1.20 thorpej 57: #include <sys/syslog.h>
1.125 ad 58: #include <sys/debug.h>
1.134 ad 59: #include <sys/lockdebug.h>
60: #include <sys/xcall.h>
61: #include <sys/cpu.h>
1.138.4.2! bouyer 62: #include <sys/atomic.h>
1.3 pk 63:
64: #include <uvm/uvm.h>
65:
1.1 pk 66: /*
67: * Pool resource management utility.
1.3 pk 68: *
1.88 chs 69: * Memory is allocated in pages which are split into pieces according to
70: * the pool item size. Each page is kept on one of three lists in the
71: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
72: * for empty, full and partially-full pages respectively. The individual
73: * pool items are on a linked list headed by `ph_itemlist' in each page
74: * header. The memory for building the page list is either taken from
75: * the allocated pages themselves (for small pool items) or taken from
76: * an internal pool of page headers (`phpool').
1.1 pk 77: */
78:
1.3 pk 79: /* List of all pools */
1.138.4.2! bouyer 80: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.134 ad 81:
1.3 pk 82: /* Private pool for page header structures */
1.97 yamt 83: #define PHPOOL_MAX 8
84: static struct pool phpool[PHPOOL_MAX];
1.135 yamt 85: #define PHPOOL_FREELIST_NELEM(idx) \
86: (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
1.3 pk 87:
1.62 bjh21 88: #ifdef POOL_SUBPAGE
89: /* Pool of subpages for use by normal pools. */
90: static struct pool psppool;
91: #endif
92:
1.117 yamt 93: static SLIST_HEAD(, pool_allocator) pa_deferinitq =
94: SLIST_HEAD_INITIALIZER(pa_deferinitq);
95:
1.98 yamt 96: static void *pool_page_alloc_meta(struct pool *, int);
97: static void pool_page_free_meta(struct pool *, void *);
98:
99: /* allocator for pool metadata */
1.134 ad 100: struct pool_allocator pool_allocator_meta = {
1.117 yamt 101: pool_page_alloc_meta, pool_page_free_meta,
102: .pa_backingmapptr = &kmem_map,
1.98 yamt 103: };
104:
1.3 pk 105: /* # of seconds to retain page after last use */
106: int pool_inactive_time = 10;
107:
108: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 109: static struct pool *drainpp;
110:
1.134 ad 111: /* This lock protects both pool_head and drainpp. */
112: static kmutex_t pool_head_lock;
113: static kcondvar_t pool_busy;
1.3 pk 114:
1.135 yamt 115: typedef uint32_t pool_item_bitmap_t;
116: #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
117: #define BITMAP_MASK (BITMAP_SIZE - 1)
1.99 yamt 118:
1.3 pk 119: struct pool_item_header {
120: /* Page headers */
1.88 chs 121: LIST_ENTRY(pool_item_header)
1.3 pk 122: ph_pagelist; /* pool page list */
1.88 chs 123: SPLAY_ENTRY(pool_item_header)
124: ph_node; /* Off-page page headers */
1.128 christos 125: void * ph_page; /* this page's address */
1.3 pk 126: struct timeval ph_time; /* last referenced */
1.135 yamt 127: uint16_t ph_nmissing; /* # of chunks in use */
1.138.4.1 bouyer 128: uint16_t ph_off; /* start offset in page */
1.97 yamt 129: union {
130: /* !PR_NOTOUCH */
131: struct {
1.102 chs 132: LIST_HEAD(, pool_item)
1.97 yamt 133: phu_itemlist; /* chunk list for this page */
134: } phu_normal;
135: /* PR_NOTOUCH */
136: struct {
1.138.4.1 bouyer 137: pool_item_bitmap_t phu_bitmap[1];
1.97 yamt 138: } phu_notouch;
139: } ph_u;
1.3 pk 140: };
1.97 yamt 141: #define ph_itemlist ph_u.phu_normal.phu_itemlist
1.135 yamt 142: #define ph_bitmap ph_u.phu_notouch.phu_bitmap
1.3 pk 143:
1.1 pk 144: struct pool_item {
1.3 pk 145: #ifdef DIAGNOSTIC
1.82 thorpej 146: u_int pi_magic;
1.33 chs 147: #endif
1.134 ad 148: #define PI_MAGIC 0xdeaddeadU
1.3 pk 149: /* Other entries use only this list entry */
1.102 chs 150: LIST_ENTRY(pool_item) pi_list;
1.3 pk 151: };
152:
1.53 thorpej 153: #define POOL_NEEDS_CATCHUP(pp) \
154: ((pp)->pr_nitems < (pp)->pr_minitems)
155:
1.43 thorpej 156: /*
157: * Pool cache management.
158: *
159: * Pool caches provide a way for constructed objects to be cached by the
160: * pool subsystem. This can lead to performance improvements by avoiding
161: * needless object construction/destruction; it is deferred until absolutely
162: * necessary.
163: *
1.134 ad 164: * Caches are grouped into cache groups. Each cache group references up
165: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
166: * object from the pool, it calls the object's constructor and places it
167: * into a cache group. When a cache group frees an object back to the
168: * pool, it first calls the object's destructor. This allows the object
169: * to persist in constructed form while freed to the cache.
170: *
171: * The pool references each cache, so that when a pool is drained by the
172: * pagedaemon, it can drain each individual cache as well. Each time a
173: * cache is drained, the most idle cache group is freed to the pool in
174: * its entirety.
1.43 thorpej 175: *
176: * Pool caches are layed on top of pools. By layering them, we can avoid
177: * the complexity of cache management for pools which would not benefit
178: * from it.
179: */
180:
1.138.4.2! bouyer 181: static struct pool pcg_normal_pool;
! 182: static struct pool pcg_large_pool;
1.134 ad 183: static struct pool cache_pool;
184: static struct pool cache_cpu_pool;
1.3 pk 185:
1.138.4.2! bouyer 186: /* List of all caches. */
! 187: TAILQ_HEAD(,pool_cache) pool_cache_head =
! 188: TAILQ_HEAD_INITIALIZER(pool_cache_head);
! 189:
! 190: int pool_cache_disable;
! 191:
! 192:
1.134 ad 193: static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *,
194: void *, paddr_t);
195: static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *,
196: void **, paddr_t *, int);
197: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
198: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
199: static void pool_cache_xcall(pool_cache_t);
1.3 pk 200:
1.42 thorpej 201: static int pool_catchup(struct pool *);
1.128 christos 202: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 203: struct pool_item_header *);
1.88 chs 204: static void pool_update_curpage(struct pool *);
1.66 thorpej 205:
1.113 yamt 206: static int pool_grow(struct pool *, int);
1.117 yamt 207: static void *pool_allocator_alloc(struct pool *, int);
208: static void pool_allocator_free(struct pool *, void *);
1.3 pk 209:
1.97 yamt 210: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 211: void (*)(const char *, ...));
1.42 thorpej 212: static void pool_print1(struct pool *, const char *,
213: void (*)(const char *, ...));
1.3 pk 214:
1.88 chs 215: static int pool_chk_page(struct pool *, const char *,
216: struct pool_item_header *);
217:
1.3 pk 218: /*
1.52 thorpej 219: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 220: */
221: struct pool_log {
222: const char *pl_file;
223: long pl_line;
224: int pl_action;
1.25 thorpej 225: #define PRLOG_GET 1
226: #define PRLOG_PUT 2
1.3 pk 227: void *pl_addr;
1.1 pk 228: };
229:
1.86 matt 230: #ifdef POOL_DIAGNOSTIC
1.3 pk 231: /* Number of entries in pool log buffers */
1.17 thorpej 232: #ifndef POOL_LOGSIZE
233: #define POOL_LOGSIZE 10
234: #endif
235:
236: int pool_logsize = POOL_LOGSIZE;
1.1 pk 237:
1.110 perry 238: static inline void
1.42 thorpej 239: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 240: {
241: int n = pp->pr_curlogentry;
242: struct pool_log *pl;
243:
1.20 thorpej 244: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 245: return;
246:
247: /*
248: * Fill in the current entry. Wrap around and overwrite
249: * the oldest entry if necessary.
250: */
251: pl = &pp->pr_log[n];
252: pl->pl_file = file;
253: pl->pl_line = line;
254: pl->pl_action = action;
255: pl->pl_addr = v;
256: if (++n >= pp->pr_logsize)
257: n = 0;
258: pp->pr_curlogentry = n;
259: }
260:
261: static void
1.42 thorpej 262: pr_printlog(struct pool *pp, struct pool_item *pi,
263: void (*pr)(const char *, ...))
1.3 pk 264: {
265: int i = pp->pr_logsize;
266: int n = pp->pr_curlogentry;
267:
1.20 thorpej 268: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 269: return;
270:
271: /*
272: * Print all entries in this pool's log.
273: */
274: while (i-- > 0) {
275: struct pool_log *pl = &pp->pr_log[n];
276: if (pl->pl_action != 0) {
1.25 thorpej 277: if (pi == NULL || pi == pl->pl_addr) {
278: (*pr)("\tlog entry %d:\n", i);
279: (*pr)("\t\taction = %s, addr = %p\n",
280: pl->pl_action == PRLOG_GET ? "get" : "put",
281: pl->pl_addr);
282: (*pr)("\t\tfile: %s at line %lu\n",
283: pl->pl_file, pl->pl_line);
284: }
1.3 pk 285: }
286: if (++n >= pp->pr_logsize)
287: n = 0;
288: }
289: }
1.25 thorpej 290:
1.110 perry 291: static inline void
1.42 thorpej 292: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 293: {
294:
1.34 thorpej 295: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 296: printf("pool %s: reentrancy at file %s line %ld\n",
297: pp->pr_wchan, file, line);
298: printf(" previous entry at file %s line %ld\n",
299: pp->pr_entered_file, pp->pr_entered_line);
300: panic("pr_enter");
301: }
302:
303: pp->pr_entered_file = file;
304: pp->pr_entered_line = line;
305: }
306:
1.110 perry 307: static inline void
1.42 thorpej 308: pr_leave(struct pool *pp)
1.25 thorpej 309: {
310:
1.34 thorpej 311: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 312: printf("pool %s not entered?\n", pp->pr_wchan);
313: panic("pr_leave");
314: }
315:
316: pp->pr_entered_file = NULL;
317: pp->pr_entered_line = 0;
318: }
319:
1.110 perry 320: static inline void
1.42 thorpej 321: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 322: {
323:
324: if (pp->pr_entered_file != NULL)
325: (*pr)("\n\tcurrently entered from file %s line %ld\n",
326: pp->pr_entered_file, pp->pr_entered_line);
327: }
1.3 pk 328: #else
1.25 thorpej 329: #define pr_log(pp, v, action, file, line)
330: #define pr_printlog(pp, pi, pr)
331: #define pr_enter(pp, file, line)
332: #define pr_leave(pp)
333: #define pr_enter_check(pp, pr)
1.59 thorpej 334: #endif /* POOL_DIAGNOSTIC */
1.3 pk 335:
1.135 yamt 336: static inline unsigned int
1.97 yamt 337: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
338: const void *v)
339: {
340: const char *cp = v;
1.135 yamt 341: unsigned int idx;
1.97 yamt 342:
343: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 344: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 345: KASSERT(idx < pp->pr_itemsperpage);
346: return idx;
347: }
348:
1.110 perry 349: static inline void
1.97 yamt 350: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
351: void *obj)
352: {
1.135 yamt 353: unsigned int idx = pr_item_notouch_index(pp, ph, obj);
354: pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
355: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
1.97 yamt 356:
1.135 yamt 357: KASSERT((*bitmap & mask) == 0);
358: *bitmap |= mask;
1.97 yamt 359: }
360:
1.110 perry 361: static inline void *
1.97 yamt 362: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
363: {
1.135 yamt 364: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
365: unsigned int idx;
366: int i;
1.97 yamt 367:
1.135 yamt 368: for (i = 0; ; i++) {
369: int bit;
1.97 yamt 370:
1.135 yamt 371: KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
372: bit = ffs32(bitmap[i]);
373: if (bit) {
374: pool_item_bitmap_t mask;
375:
376: bit--;
377: idx = (i * BITMAP_SIZE) + bit;
378: mask = 1 << bit;
379: KASSERT((bitmap[i] & mask) != 0);
380: bitmap[i] &= ~mask;
381: break;
382: }
383: }
384: KASSERT(idx < pp->pr_itemsperpage);
1.128 christos 385: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 386: }
387:
1.135 yamt 388: static inline void
389: pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
390: {
391: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
392: const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
393: int i;
394:
395: for (i = 0; i < n; i++) {
396: bitmap[i] = (pool_item_bitmap_t)-1;
397: }
398: }
399:
1.110 perry 400: static inline int
1.88 chs 401: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
402: {
1.121 yamt 403:
404: /*
405: * we consider pool_item_header with smaller ph_page bigger.
406: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
407: */
408:
1.88 chs 409: if (a->ph_page < b->ph_page)
1.121 yamt 410: return (1);
411: else if (a->ph_page > b->ph_page)
1.88 chs 412: return (-1);
413: else
414: return (0);
415: }
416:
417: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
418: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
419:
1.138.4.1 bouyer 420: static inline struct pool_item_header *
421: pr_find_pagehead_noalign(struct pool *pp, void *v)
422: {
423: struct pool_item_header *ph, tmp;
424:
425: tmp.ph_page = (void *)(uintptr_t)v;
426: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
427: if (ph == NULL) {
428: ph = SPLAY_ROOT(&pp->pr_phtree);
429: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
430: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
431: }
432: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
433: }
434:
435: return ph;
436: }
437:
1.3 pk 438: /*
1.121 yamt 439: * Return the pool page header based on item address.
1.3 pk 440: */
1.110 perry 441: static inline struct pool_item_header *
1.121 yamt 442: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 443: {
1.88 chs 444: struct pool_item_header *ph, tmp;
1.3 pk 445:
1.121 yamt 446: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.138.4.1 bouyer 447: ph = pr_find_pagehead_noalign(pp, v);
1.121 yamt 448: } else {
1.128 christos 449: void *page =
450: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 451:
452: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 453: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 454: } else {
455: tmp.ph_page = page;
456: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
457: }
458: }
1.3 pk 459:
1.121 yamt 460: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 461: ((char *)ph->ph_page <= (char *)v &&
462: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 463: return ph;
1.3 pk 464: }
465:
1.101 thorpej 466: static void
467: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
468: {
469: struct pool_item_header *ph;
470:
471: while ((ph = LIST_FIRST(pq)) != NULL) {
472: LIST_REMOVE(ph, ph_pagelist);
473: pool_allocator_free(pp, ph->ph_page);
1.134 ad 474: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 475: pool_put(pp->pr_phpool, ph);
476: }
477: }
478:
1.3 pk 479: /*
480: * Remove a page from the pool.
481: */
1.110 perry 482: static inline void
1.61 chs 483: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
484: struct pool_pagelist *pq)
1.3 pk 485: {
486:
1.134 ad 487: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 488:
1.3 pk 489: /*
1.7 thorpej 490: * If the page was idle, decrement the idle page count.
1.3 pk 491: */
1.6 thorpej 492: if (ph->ph_nmissing == 0) {
493: #ifdef DIAGNOSTIC
494: if (pp->pr_nidle == 0)
495: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 496: if (pp->pr_nitems < pp->pr_itemsperpage)
497: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 498: #endif
499: pp->pr_nidle--;
500: }
1.7 thorpej 501:
1.20 thorpej 502: pp->pr_nitems -= pp->pr_itemsperpage;
503:
1.7 thorpej 504: /*
1.101 thorpej 505: * Unlink the page from the pool and queue it for release.
1.7 thorpej 506: */
1.88 chs 507: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 508: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
509: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 510: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
511:
1.7 thorpej 512: pp->pr_npages--;
513: pp->pr_npagefree++;
1.6 thorpej 514:
1.88 chs 515: pool_update_curpage(pp);
1.3 pk 516: }
517:
1.126 thorpej 518: static bool
1.117 yamt 519: pa_starved_p(struct pool_allocator *pa)
520: {
521:
522: if (pa->pa_backingmap != NULL) {
523: return vm_map_starved_p(pa->pa_backingmap);
524: }
1.127 thorpej 525: return false;
1.117 yamt 526: }
527:
528: static int
1.124 yamt 529: pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
1.117 yamt 530: {
531: struct pool *pp = obj;
532: struct pool_allocator *pa = pp->pr_alloc;
533:
534: KASSERT(&pp->pr_reclaimerentry == ce);
535: pool_reclaim(pp);
536: if (!pa_starved_p(pa)) {
537: return CALLBACK_CHAIN_ABORT;
538: }
539: return CALLBACK_CHAIN_CONTINUE;
540: }
541:
542: static void
543: pool_reclaim_register(struct pool *pp)
544: {
545: struct vm_map *map = pp->pr_alloc->pa_backingmap;
546: int s;
547:
548: if (map == NULL) {
549: return;
550: }
551:
552: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
553: callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
554: &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
555: splx(s);
556: }
557:
558: static void
559: pool_reclaim_unregister(struct pool *pp)
560: {
561: struct vm_map *map = pp->pr_alloc->pa_backingmap;
562: int s;
563:
564: if (map == NULL) {
565: return;
566: }
567:
568: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
569: callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
570: &pp->pr_reclaimerentry);
571: splx(s);
572: }
573:
574: static void
575: pa_reclaim_register(struct pool_allocator *pa)
576: {
577: struct vm_map *map = *pa->pa_backingmapptr;
578: struct pool *pp;
579:
580: KASSERT(pa->pa_backingmap == NULL);
581: if (map == NULL) {
582: SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
583: return;
584: }
585: pa->pa_backingmap = map;
586: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
587: pool_reclaim_register(pp);
588: }
589: }
590:
1.3 pk 591: /*
1.94 simonb 592: * Initialize all the pools listed in the "pools" link set.
593: */
594: void
1.117 yamt 595: pool_subsystem_init(void)
1.94 simonb 596: {
1.117 yamt 597: struct pool_allocator *pa;
1.94 simonb 598: __link_set_decl(pools, struct link_pool_init);
599: struct link_pool_init * const *pi;
600:
1.134 ad 601: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
602: cv_init(&pool_busy, "poolbusy");
603:
1.94 simonb 604: __link_set_foreach(pi, pools)
605: pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
606: (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
1.129 ad 607: (*pi)->palloc, (*pi)->ipl);
1.117 yamt 608:
609: while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
610: KASSERT(pa->pa_backingmapptr != NULL);
611: KASSERT(*pa->pa_backingmapptr != NULL);
612: SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
613: pa_reclaim_register(pa);
614: }
1.134 ad 615:
616: pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE,
617: 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);
618:
619: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE,
620: 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
1.94 simonb 621: }
622:
623: /*
1.3 pk 624: * Initialize the given pool resource structure.
625: *
626: * We export this routine to allow other kernel parts to declare
627: * static pools that must be initialized before malloc() is available.
628: */
629: void
1.42 thorpej 630: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.129 ad 631: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 632: {
1.116 simonb 633: struct pool *pp1;
1.92 enami 634: size_t trysize, phsize;
1.134 ad 635: int off, slack;
1.3 pk 636:
1.116 simonb 637: #ifdef DEBUG
638: /*
639: * Check that the pool hasn't already been initialised and
640: * added to the list of all pools.
641: */
1.138.4.2! bouyer 642: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
1.116 simonb 643: if (pp == pp1)
644: panic("pool_init: pool %s already initialised",
645: wchan);
646: }
647: #endif
648:
1.25 thorpej 649: #ifdef POOL_DIAGNOSTIC
650: /*
651: * Always log if POOL_DIAGNOSTIC is defined.
652: */
653: if (pool_logsize != 0)
654: flags |= PR_LOGGING;
655: #endif
656:
1.66 thorpej 657: if (palloc == NULL)
658: palloc = &pool_allocator_kmem;
1.112 bjh21 659: #ifdef POOL_SUBPAGE
660: if (size > palloc->pa_pagesz) {
661: if (palloc == &pool_allocator_kmem)
662: palloc = &pool_allocator_kmem_fullpage;
663: else if (palloc == &pool_allocator_nointr)
664: palloc = &pool_allocator_nointr_fullpage;
665: }
1.66 thorpej 666: #endif /* POOL_SUBPAGE */
667: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
1.112 bjh21 668: if (palloc->pa_pagesz == 0)
1.66 thorpej 669: palloc->pa_pagesz = PAGE_SIZE;
670:
671: TAILQ_INIT(&palloc->pa_list);
672:
1.134 ad 673: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 674: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
675: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.117 yamt 676:
677: if (palloc->pa_backingmapptr != NULL) {
678: pa_reclaim_register(palloc);
679: }
1.66 thorpej 680: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 681: }
1.3 pk 682:
683: if (align == 0)
684: align = ALIGN(1);
1.14 thorpej 685:
1.120 yamt 686: if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
1.14 thorpej 687: size = sizeof(struct pool_item);
1.3 pk 688:
1.78 thorpej 689: size = roundup(size, align);
1.66 thorpej 690: #ifdef DIAGNOSTIC
691: if (size > palloc->pa_pagesz)
1.121 yamt 692: panic("pool_init: pool item size (%zu) too large", size);
1.66 thorpej 693: #endif
1.35 pk 694:
1.3 pk 695: /*
696: * Initialize the pool structure.
697: */
1.88 chs 698: LIST_INIT(&pp->pr_emptypages);
699: LIST_INIT(&pp->pr_fullpages);
700: LIST_INIT(&pp->pr_partpages);
1.134 ad 701: pp->pr_cache = NULL;
1.3 pk 702: pp->pr_curpage = NULL;
703: pp->pr_npages = 0;
704: pp->pr_minitems = 0;
705: pp->pr_minpages = 0;
706: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 707: pp->pr_roflags = flags;
708: pp->pr_flags = 0;
1.35 pk 709: pp->pr_size = size;
1.3 pk 710: pp->pr_align = align;
711: pp->pr_wchan = wchan;
1.66 thorpej 712: pp->pr_alloc = palloc;
1.20 thorpej 713: pp->pr_nitems = 0;
714: pp->pr_nout = 0;
715: pp->pr_hardlimit = UINT_MAX;
716: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 717: pp->pr_hardlimit_ratecap.tv_sec = 0;
718: pp->pr_hardlimit_ratecap.tv_usec = 0;
719: pp->pr_hardlimit_warning_last.tv_sec = 0;
720: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 721: pp->pr_drain_hook = NULL;
722: pp->pr_drain_hook_arg = NULL;
1.125 ad 723: pp->pr_freecheck = NULL;
1.3 pk 724:
725: /*
726: * Decide whether to put the page header off page to avoid
1.92 enami 727: * wasting too large a part of the page or too big item.
728: * Off-page page headers go on a hash table, so we can match
729: * a returned item with its header based on the page address.
730: * We use 1/16 of the page size and about 8 times of the item
731: * size as the threshold (XXX: tune)
732: *
733: * However, we'll put the header into the page if we can put
734: * it without wasting any items.
735: *
736: * Silently enforce `0 <= ioff < align'.
1.3 pk 737: */
1.92 enami 738: pp->pr_itemoffset = ioff %= align;
739: /* See the comment below about reserved bytes. */
740: trysize = palloc->pa_pagesz - ((align - ioff) % align);
741: phsize = ALIGN(sizeof(struct pool_item_header));
1.121 yamt 742: if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 743: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
744: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 745: /* Use the end of the page for the page header */
1.20 thorpej 746: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 747: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 748: } else {
1.3 pk 749: /* The page header will be taken from our page header pool */
750: pp->pr_phoffset = 0;
1.66 thorpej 751: off = palloc->pa_pagesz;
1.88 chs 752: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 753: }
1.1 pk 754:
1.3 pk 755: /*
756: * Alignment is to take place at `ioff' within the item. This means
757: * we must reserve up to `align - 1' bytes on the page to allow
758: * appropriate positioning of each item.
759: */
760: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 761: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 762: if ((pp->pr_roflags & PR_NOTOUCH)) {
763: int idx;
764:
765: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
766: idx++) {
767: /* nothing */
768: }
769: if (idx >= PHPOOL_MAX) {
770: /*
771: * if you see this panic, consider to tweak
772: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
773: */
774: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
775: pp->pr_wchan, pp->pr_itemsperpage);
776: }
777: pp->pr_phpool = &phpool[idx];
778: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
779: pp->pr_phpool = &phpool[0];
780: }
781: #if defined(DIAGNOSTIC)
782: else {
783: pp->pr_phpool = NULL;
784: }
785: #endif
1.3 pk 786:
787: /*
788: * Use the slack between the chunks and the page header
789: * for "cache coloring".
790: */
791: slack = off - pp->pr_itemsperpage * pp->pr_size;
792: pp->pr_maxcolor = (slack / align) * align;
793: pp->pr_curcolor = 0;
794:
795: pp->pr_nget = 0;
796: pp->pr_nfail = 0;
797: pp->pr_nput = 0;
798: pp->pr_npagealloc = 0;
799: pp->pr_npagefree = 0;
1.1 pk 800: pp->pr_hiwat = 0;
1.8 thorpej 801: pp->pr_nidle = 0;
1.134 ad 802: pp->pr_refcnt = 0;
1.3 pk 803:
1.59 thorpej 804: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 805: if (flags & PR_LOGGING) {
806: if (kmem_map == NULL ||
807: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
808: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 809: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 810: pp->pr_curlogentry = 0;
811: pp->pr_logsize = pool_logsize;
812: }
1.59 thorpej 813: #endif
1.25 thorpej 814:
815: pp->pr_entered_file = NULL;
816: pp->pr_entered_line = 0;
1.3 pk 817:
1.138 ad 818: /*
819: * XXXAD hack to prevent IP input processing from blocking.
820: */
821: if (ipl == IPL_SOFTNET) {
822: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, IPL_VM);
823: } else {
824: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
825: }
1.134 ad 826: cv_init(&pp->pr_cv, wchan);
827: pp->pr_ipl = ipl;
1.1 pk 828:
1.3 pk 829: /*
1.43 thorpej 830: * Initialize private page header pool and cache magazine pool if we
831: * haven't done so yet.
1.23 thorpej 832: * XXX LOCKING.
1.3 pk 833: */
1.97 yamt 834: if (phpool[0].pr_size == 0) {
835: int idx;
836: for (idx = 0; idx < PHPOOL_MAX; idx++) {
837: static char phpool_names[PHPOOL_MAX][6+1+6+1];
838: int nelem;
839: size_t sz;
840:
841: nelem = PHPOOL_FREELIST_NELEM(idx);
842: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
843: "phpool-%d", nelem);
844: sz = sizeof(struct pool_item_header);
845: if (nelem) {
1.135 yamt 846: sz = offsetof(struct pool_item_header,
847: ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
1.97 yamt 848: }
849: pool_init(&phpool[idx], sz, 0, 0, 0,
1.129 ad 850: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.97 yamt 851: }
1.62 bjh21 852: #ifdef POOL_SUBPAGE
853: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.129 ad 854: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
1.62 bjh21 855: #endif
1.138.4.2! bouyer 856:
! 857: size = sizeof(pcg_t) +
! 858: (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
! 859: pool_init(&pcg_normal_pool, size, CACHE_LINE_SIZE, 0, 0,
! 860: "pcgnormal", &pool_allocator_meta, IPL_VM);
! 861:
! 862: size = sizeof(pcg_t) +
! 863: (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
! 864: pool_init(&pcg_large_pool, size, CACHE_LINE_SIZE, 0, 0,
! 865: "pcglarge", &pool_allocator_meta, IPL_VM);
1.1 pk 866: }
867:
1.138.4.2! bouyer 868: /* Insert into the list of all pools. */
! 869: if (__predict_true(!cold))
1.134 ad 870: mutex_enter(&pool_head_lock);
1.138.4.2! bouyer 871: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
! 872: if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
! 873: break;
! 874: }
! 875: if (pp1 == NULL)
! 876: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
! 877: else
! 878: TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
! 879: if (__predict_true(!cold))
1.134 ad 880: mutex_exit(&pool_head_lock);
881:
882: /* Insert this into the list of pools using this allocator. */
1.138.4.2! bouyer 883: if (__predict_true(!cold))
1.134 ad 884: mutex_enter(&palloc->pa_lock);
1.138.4.2! bouyer 885: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
! 886: if (__predict_true(!cold))
1.134 ad 887: mutex_exit(&palloc->pa_lock);
1.66 thorpej 888:
1.117 yamt 889: pool_reclaim_register(pp);
1.1 pk 890: }
891:
892: /*
893: * De-commision a pool resource.
894: */
895: void
1.42 thorpej 896: pool_destroy(struct pool *pp)
1.1 pk 897: {
1.101 thorpej 898: struct pool_pagelist pq;
1.3 pk 899: struct pool_item_header *ph;
1.43 thorpej 900:
1.101 thorpej 901: /* Remove from global pool list */
1.134 ad 902: mutex_enter(&pool_head_lock);
903: while (pp->pr_refcnt != 0)
904: cv_wait(&pool_busy, &pool_head_lock);
1.138.4.2! bouyer 905: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.101 thorpej 906: if (drainpp == pp)
907: drainpp = NULL;
1.134 ad 908: mutex_exit(&pool_head_lock);
1.101 thorpej 909:
910: /* Remove this pool from its allocator's list of pools. */
1.117 yamt 911: pool_reclaim_unregister(pp);
1.134 ad 912: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 913: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.134 ad 914: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 915:
1.134 ad 916: mutex_enter(&pp->pr_lock);
1.101 thorpej 917:
1.134 ad 918: KASSERT(pp->pr_cache == NULL);
1.3 pk 919:
920: #ifdef DIAGNOSTIC
1.20 thorpej 921: if (pp->pr_nout != 0) {
1.25 thorpej 922: pr_printlog(pp, NULL, printf);
1.80 provos 923: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 924: pp->pr_nout);
1.3 pk 925: }
926: #endif
1.1 pk 927:
1.101 thorpej 928: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
929: KASSERT(LIST_EMPTY(&pp->pr_partpages));
930:
1.3 pk 931: /* Remove all pages */
1.101 thorpej 932: LIST_INIT(&pq);
1.88 chs 933: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 934: pr_rmpage(pp, ph, &pq);
935:
1.134 ad 936: mutex_exit(&pp->pr_lock);
1.3 pk 937:
1.101 thorpej 938: pr_pagelist_free(pp, &pq);
1.3 pk 939:
1.59 thorpej 940: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 941: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 942: free(pp->pr_log, M_TEMP);
1.59 thorpej 943: #endif
1.134 ad 944:
945: cv_destroy(&pp->pr_cv);
946: mutex_destroy(&pp->pr_lock);
1.1 pk 947: }
948:
1.68 thorpej 949: void
950: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
951: {
952:
953: /* XXX no locking -- must be used just after pool_init() */
954: #ifdef DIAGNOSTIC
955: if (pp->pr_drain_hook != NULL)
956: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
957: #endif
958: pp->pr_drain_hook = fn;
959: pp->pr_drain_hook_arg = arg;
960: }
961:
1.88 chs 962: static struct pool_item_header *
1.128 christos 963: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 964: {
965: struct pool_item_header *ph;
966:
967: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 968: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.134 ad 969: else
1.97 yamt 970: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 971:
972: return (ph);
973: }
1.1 pk 974:
975: /*
1.134 ad 976: * Grab an item from the pool.
1.1 pk 977: */
1.3 pk 978: void *
1.59 thorpej 979: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 980: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 981: #else
982: pool_get(struct pool *pp, int flags)
983: #endif
1.1 pk 984: {
985: struct pool_item *pi;
1.3 pk 986: struct pool_item_header *ph;
1.55 thorpej 987: void *v;
1.1 pk 988:
1.2 pk 989: #ifdef DIAGNOSTIC
1.95 atatat 990: if (__predict_false(pp->pr_itemsperpage == 0))
991: panic("pool_get: pool %p: pr_itemsperpage is zero, "
992: "pool not initialized?", pp);
1.84 thorpej 993: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 994: (flags & PR_WAITOK) != 0))
1.77 matt 995: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 996:
1.102 chs 997: #endif /* DIAGNOSTIC */
1.58 thorpej 998: #ifdef LOCKDEBUG
999: if (flags & PR_WAITOK)
1.119 yamt 1000: ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");
1.56 sommerfe 1001: #endif
1.1 pk 1002:
1.134 ad 1003: mutex_enter(&pp->pr_lock);
1.25 thorpej 1004: pr_enter(pp, file, line);
1.20 thorpej 1005:
1006: startover:
1007: /*
1008: * Check to see if we've reached the hard limit. If we have,
1009: * and we can wait, then wait until an item has been returned to
1010: * the pool.
1011: */
1012: #ifdef DIAGNOSTIC
1.34 thorpej 1013: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 1014: pr_leave(pp);
1.134 ad 1015: mutex_exit(&pp->pr_lock);
1.20 thorpej 1016: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
1017: }
1018: #endif
1.34 thorpej 1019: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 1020: if (pp->pr_drain_hook != NULL) {
1021: /*
1022: * Since the drain hook is going to free things
1023: * back to the pool, unlock, call the hook, re-lock,
1024: * and check the hardlimit condition again.
1025: */
1026: pr_leave(pp);
1.134 ad 1027: mutex_exit(&pp->pr_lock);
1.68 thorpej 1028: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.134 ad 1029: mutex_enter(&pp->pr_lock);
1.68 thorpej 1030: pr_enter(pp, file, line);
1031: if (pp->pr_nout < pp->pr_hardlimit)
1032: goto startover;
1033: }
1034:
1.29 sommerfe 1035: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 1036: /*
1037: * XXX: A warning isn't logged in this case. Should
1038: * it be?
1039: */
1040: pp->pr_flags |= PR_WANTED;
1.25 thorpej 1041: pr_leave(pp);
1.134 ad 1042: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.25 thorpej 1043: pr_enter(pp, file, line);
1.20 thorpej 1044: goto startover;
1045: }
1.31 thorpej 1046:
1047: /*
1048: * Log a message that the hard limit has been hit.
1049: */
1050: if (pp->pr_hardlimit_warning != NULL &&
1051: ratecheck(&pp->pr_hardlimit_warning_last,
1052: &pp->pr_hardlimit_ratecap))
1053: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 1054:
1055: pp->pr_nfail++;
1056:
1.25 thorpej 1057: pr_leave(pp);
1.134 ad 1058: mutex_exit(&pp->pr_lock);
1.20 thorpej 1059: return (NULL);
1060: }
1061:
1.3 pk 1062: /*
1063: * The convention we use is that if `curpage' is not NULL, then
1064: * it points at a non-empty bucket. In particular, `curpage'
1065: * never points at a page header which has PR_PHINPAGE set and
1066: * has no items in its bucket.
1067: */
1.20 thorpej 1068: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 1069: int error;
1070:
1.20 thorpej 1071: #ifdef DIAGNOSTIC
1072: if (pp->pr_nitems != 0) {
1.134 ad 1073: mutex_exit(&pp->pr_lock);
1.20 thorpej 1074: printf("pool_get: %s: curpage NULL, nitems %u\n",
1075: pp->pr_wchan, pp->pr_nitems);
1.80 provos 1076: panic("pool_get: nitems inconsistent");
1.20 thorpej 1077: }
1078: #endif
1079:
1.21 thorpej 1080: /*
1081: * Call the back-end page allocator for more memory.
1082: * Release the pool lock, as the back-end page allocator
1083: * may block.
1084: */
1.25 thorpej 1085: pr_leave(pp);
1.113 yamt 1086: error = pool_grow(pp, flags);
1087: pr_enter(pp, file, line);
1088: if (error != 0) {
1.21 thorpej 1089: /*
1.55 thorpej 1090: * We were unable to allocate a page or item
1091: * header, but we released the lock during
1092: * allocation, so perhaps items were freed
1093: * back to the pool. Check for this case.
1.21 thorpej 1094: */
1095: if (pp->pr_curpage != NULL)
1096: goto startover;
1.15 pk 1097:
1.117 yamt 1098: pp->pr_nfail++;
1.25 thorpej 1099: pr_leave(pp);
1.134 ad 1100: mutex_exit(&pp->pr_lock);
1.117 yamt 1101: return (NULL);
1.1 pk 1102: }
1.3 pk 1103:
1.20 thorpej 1104: /* Start the allocation process over. */
1105: goto startover;
1.3 pk 1106: }
1.97 yamt 1107: if (pp->pr_roflags & PR_NOTOUCH) {
1108: #ifdef DIAGNOSTIC
1109: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1110: pr_leave(pp);
1.134 ad 1111: mutex_exit(&pp->pr_lock);
1.97 yamt 1112: panic("pool_get: %s: page empty", pp->pr_wchan);
1113: }
1114: #endif
1115: v = pr_item_notouch_get(pp, ph);
1116: #ifdef POOL_DIAGNOSTIC
1117: pr_log(pp, v, PRLOG_GET, file, line);
1118: #endif
1119: } else {
1.102 chs 1120: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 1121: if (__predict_false(v == NULL)) {
1122: pr_leave(pp);
1.134 ad 1123: mutex_exit(&pp->pr_lock);
1.97 yamt 1124: panic("pool_get: %s: page empty", pp->pr_wchan);
1125: }
1.20 thorpej 1126: #ifdef DIAGNOSTIC
1.97 yamt 1127: if (__predict_false(pp->pr_nitems == 0)) {
1128: pr_leave(pp);
1.134 ad 1129: mutex_exit(&pp->pr_lock);
1.97 yamt 1130: printf("pool_get: %s: items on itemlist, nitems %u\n",
1131: pp->pr_wchan, pp->pr_nitems);
1132: panic("pool_get: nitems inconsistent");
1133: }
1.65 enami 1134: #endif
1.56 sommerfe 1135:
1.65 enami 1136: #ifdef POOL_DIAGNOSTIC
1.97 yamt 1137: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 1138: #endif
1.3 pk 1139:
1.65 enami 1140: #ifdef DIAGNOSTIC
1.97 yamt 1141: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1142: pr_printlog(pp, pi, printf);
1143: panic("pool_get(%s): free list modified: "
1144: "magic=%x; page %p; item addr %p\n",
1145: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1146: }
1.3 pk 1147: #endif
1148:
1.97 yamt 1149: /*
1150: * Remove from item list.
1151: */
1.102 chs 1152: LIST_REMOVE(pi, pi_list);
1.97 yamt 1153: }
1.20 thorpej 1154: pp->pr_nitems--;
1155: pp->pr_nout++;
1.6 thorpej 1156: if (ph->ph_nmissing == 0) {
1157: #ifdef DIAGNOSTIC
1.34 thorpej 1158: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 1159: panic("pool_get: nidle inconsistent");
1160: #endif
1161: pp->pr_nidle--;
1.88 chs 1162:
1163: /*
1164: * This page was previously empty. Move it to the list of
1165: * partially-full pages. This page is already curpage.
1166: */
1167: LIST_REMOVE(ph, ph_pagelist);
1168: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 1169: }
1.3 pk 1170: ph->ph_nmissing++;
1.97 yamt 1171: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 1172: #ifdef DIAGNOSTIC
1.97 yamt 1173: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 1174: !LIST_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 1175: pr_leave(pp);
1.134 ad 1176: mutex_exit(&pp->pr_lock);
1.21 thorpej 1177: panic("pool_get: %s: nmissing inconsistent",
1178: pp->pr_wchan);
1179: }
1180: #endif
1.3 pk 1181: /*
1.88 chs 1182: * This page is now full. Move it to the full list
1183: * and select a new current page.
1.3 pk 1184: */
1.88 chs 1185: LIST_REMOVE(ph, ph_pagelist);
1186: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1187: pool_update_curpage(pp);
1.1 pk 1188: }
1.3 pk 1189:
1190: pp->pr_nget++;
1.111 christos 1191: pr_leave(pp);
1.20 thorpej 1192:
1193: /*
1194: * If we have a low water mark and we are now below that low
1195: * water mark, add more items to the pool.
1196: */
1.53 thorpej 1197: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1198: /*
1199: * XXX: Should we log a warning? Should we set up a timeout
1200: * to try again in a second or so? The latter could break
1201: * a caller's assumptions about interrupt protection, etc.
1202: */
1203: }
1204:
1.134 ad 1205: mutex_exit(&pp->pr_lock);
1.125 ad 1206: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
1207: FREECHECK_OUT(&pp->pr_freecheck, v);
1.1 pk 1208: return (v);
1209: }
1210:
1211: /*
1.43 thorpej 1212: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1213: */
1.43 thorpej 1214: static void
1.101 thorpej 1215: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 1216: {
1217: struct pool_item *pi = v;
1.3 pk 1218: struct pool_item_header *ph;
1219:
1.134 ad 1220: KASSERT(mutex_owned(&pp->pr_lock));
1.125 ad 1221: FREECHECK_IN(&pp->pr_freecheck, v);
1.134 ad 1222: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 1223:
1.30 thorpej 1224: #ifdef DIAGNOSTIC
1.34 thorpej 1225: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1226: printf("pool %s: putting with none out\n",
1227: pp->pr_wchan);
1228: panic("pool_put");
1229: }
1230: #endif
1.3 pk 1231:
1.121 yamt 1232: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.25 thorpej 1233: pr_printlog(pp, NULL, printf);
1.3 pk 1234: panic("pool_put: %s: page header missing", pp->pr_wchan);
1235: }
1.28 thorpej 1236:
1.3 pk 1237: /*
1238: * Return to item list.
1239: */
1.97 yamt 1240: if (pp->pr_roflags & PR_NOTOUCH) {
1241: pr_item_notouch_put(pp, ph, v);
1242: } else {
1.2 pk 1243: #ifdef DIAGNOSTIC
1.97 yamt 1244: pi->pi_magic = PI_MAGIC;
1.3 pk 1245: #endif
1.32 chs 1246: #ifdef DEBUG
1.97 yamt 1247: {
1248: int i, *ip = v;
1.32 chs 1249:
1.97 yamt 1250: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1251: *ip++ = PI_MAGIC;
1252: }
1.32 chs 1253: }
1254: #endif
1255:
1.102 chs 1256: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 1257: }
1.79 thorpej 1258: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1259: ph->ph_nmissing--;
1260: pp->pr_nput++;
1.20 thorpej 1261: pp->pr_nitems++;
1262: pp->pr_nout--;
1.3 pk 1263:
1264: /* Cancel "pool empty" condition if it exists */
1265: if (pp->pr_curpage == NULL)
1266: pp->pr_curpage = ph;
1267:
1268: if (pp->pr_flags & PR_WANTED) {
1269: pp->pr_flags &= ~PR_WANTED;
1.15 pk 1270: if (ph->ph_nmissing == 0)
1271: pp->pr_nidle++;
1.134 ad 1272: cv_broadcast(&pp->pr_cv);
1.3 pk 1273: return;
1274: }
1275:
1276: /*
1.88 chs 1277: * If this page is now empty, do one of two things:
1.21 thorpej 1278: *
1.88 chs 1279: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1280: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1281: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1282: * CLAIM.
1.21 thorpej 1283: *
1.88 chs 1284: * (2) Otherwise, move the page to the empty page list.
1285: *
1286: * Either way, select a new current page (so we use a partially-full
1287: * page if one is available).
1.3 pk 1288: */
1289: if (ph->ph_nmissing == 0) {
1.6 thorpej 1290: pp->pr_nidle++;
1.90 thorpej 1291: if (pp->pr_npages > pp->pr_minpages &&
1292: (pp->pr_npages > pp->pr_maxpages ||
1.117 yamt 1293: pa_starved_p(pp->pr_alloc))) {
1.101 thorpej 1294: pr_rmpage(pp, ph, pq);
1.3 pk 1295: } else {
1.88 chs 1296: LIST_REMOVE(ph, ph_pagelist);
1297: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1298:
1.21 thorpej 1299: /*
1300: * Update the timestamp on the page. A page must
1301: * be idle for some period of time before it can
1302: * be reclaimed by the pagedaemon. This minimizes
1303: * ping-pong'ing for memory.
1304: */
1.118 kardel 1305: getmicrotime(&ph->ph_time);
1.1 pk 1306: }
1.88 chs 1307: pool_update_curpage(pp);
1.1 pk 1308: }
1.88 chs 1309:
1.21 thorpej 1310: /*
1.88 chs 1311: * If the page was previously completely full, move it to the
1312: * partially-full list and make it the current page. The next
1313: * allocation will get the item from this page, instead of
1314: * further fragmenting the pool.
1.21 thorpej 1315: */
1316: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1317: LIST_REMOVE(ph, ph_pagelist);
1318: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1319: pp->pr_curpage = ph;
1320: }
1.43 thorpej 1321: }
1322:
1323: /*
1.134 ad 1324: * Return resource to the pool.
1.43 thorpej 1325: */
1.59 thorpej 1326: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1327: void
1328: _pool_put(struct pool *pp, void *v, const char *file, long line)
1329: {
1.101 thorpej 1330: struct pool_pagelist pq;
1331:
1332: LIST_INIT(&pq);
1.43 thorpej 1333:
1.134 ad 1334: mutex_enter(&pp->pr_lock);
1.43 thorpej 1335: pr_enter(pp, file, line);
1336:
1.56 sommerfe 1337: pr_log(pp, v, PRLOG_PUT, file, line);
1338:
1.101 thorpej 1339: pool_do_put(pp, v, &pq);
1.21 thorpej 1340:
1.25 thorpej 1341: pr_leave(pp);
1.134 ad 1342: mutex_exit(&pp->pr_lock);
1.101 thorpej 1343:
1.102 chs 1344: pr_pagelist_free(pp, &pq);
1.1 pk 1345: }
1.57 sommerfe 1346: #undef pool_put
1.59 thorpej 1347: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1348:
1.56 sommerfe 1349: void
1350: pool_put(struct pool *pp, void *v)
1351: {
1.101 thorpej 1352: struct pool_pagelist pq;
1353:
1354: LIST_INIT(&pq);
1.56 sommerfe 1355:
1.134 ad 1356: mutex_enter(&pp->pr_lock);
1.101 thorpej 1357: pool_do_put(pp, v, &pq);
1.134 ad 1358: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1359:
1.102 chs 1360: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1361: }
1.57 sommerfe 1362:
1.59 thorpej 1363: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1364: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1365: #endif
1.74 thorpej 1366:
1367: /*
1.113 yamt 1368: * pool_grow: grow a pool by a page.
1369: *
1370: * => called with pool locked.
1371: * => unlock and relock the pool.
1372: * => return with pool locked.
1373: */
1374:
1375: static int
1376: pool_grow(struct pool *pp, int flags)
1377: {
1378: struct pool_item_header *ph = NULL;
1379: char *cp;
1380:
1.134 ad 1381: mutex_exit(&pp->pr_lock);
1.113 yamt 1382: cp = pool_allocator_alloc(pp, flags);
1383: if (__predict_true(cp != NULL)) {
1384: ph = pool_alloc_item_header(pp, cp, flags);
1385: }
1386: if (__predict_false(cp == NULL || ph == NULL)) {
1387: if (cp != NULL) {
1388: pool_allocator_free(pp, cp);
1389: }
1.134 ad 1390: mutex_enter(&pp->pr_lock);
1.113 yamt 1391: return ENOMEM;
1392: }
1393:
1.134 ad 1394: mutex_enter(&pp->pr_lock);
1.113 yamt 1395: pool_prime_page(pp, cp, ph);
1396: pp->pr_npagealloc++;
1397: return 0;
1398: }
1399:
1400: /*
1.74 thorpej 1401: * Add N items to the pool.
1402: */
1403: int
1404: pool_prime(struct pool *pp, int n)
1405: {
1.75 simonb 1406: int newpages;
1.113 yamt 1407: int error = 0;
1.74 thorpej 1408:
1.134 ad 1409: mutex_enter(&pp->pr_lock);
1.74 thorpej 1410:
1411: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1412:
1413: while (newpages-- > 0) {
1.113 yamt 1414: error = pool_grow(pp, PR_NOWAIT);
1415: if (error) {
1.74 thorpej 1416: break;
1417: }
1418: pp->pr_minpages++;
1419: }
1420:
1421: if (pp->pr_minpages >= pp->pr_maxpages)
1422: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1423:
1.134 ad 1424: mutex_exit(&pp->pr_lock);
1.113 yamt 1425: return error;
1.74 thorpej 1426: }
1.55 thorpej 1427:
1428: /*
1.3 pk 1429: * Add a page worth of items to the pool.
1.21 thorpej 1430: *
1431: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1432: */
1.55 thorpej 1433: static void
1.128 christos 1434: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1435: {
1436: struct pool_item *pi;
1.128 christos 1437: void *cp = storage;
1.125 ad 1438: const unsigned int align = pp->pr_align;
1439: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1440: int n;
1.36 pk 1441:
1.134 ad 1442: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 1443:
1.66 thorpej 1444: #ifdef DIAGNOSTIC
1.121 yamt 1445: if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1446: ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1447: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1448: #endif
1.3 pk 1449:
1450: /*
1451: * Insert page header.
1452: */
1.88 chs 1453: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1454: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1455: ph->ph_page = storage;
1456: ph->ph_nmissing = 0;
1.118 kardel 1457: getmicrotime(&ph->ph_time);
1.88 chs 1458: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1459: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1460:
1.6 thorpej 1461: pp->pr_nidle++;
1462:
1.3 pk 1463: /*
1464: * Color this page.
1465: */
1.138.4.1 bouyer 1466: ph->ph_off = pp->pr_curcolor;
1467: cp = (char *)cp + ph->ph_off;
1.3 pk 1468: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1469: pp->pr_curcolor = 0;
1470:
1471: /*
1472: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1473: */
1474: if (ioff != 0)
1.128 christos 1475: cp = (char *)cp + align - ioff;
1.3 pk 1476:
1.125 ad 1477: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1478:
1.3 pk 1479: /*
1480: * Insert remaining chunks on the bucket list.
1481: */
1482: n = pp->pr_itemsperpage;
1.20 thorpej 1483: pp->pr_nitems += n;
1.3 pk 1484:
1.97 yamt 1485: if (pp->pr_roflags & PR_NOTOUCH) {
1.135 yamt 1486: pr_item_notouch_init(pp, ph);
1.97 yamt 1487: } else {
1488: while (n--) {
1489: pi = (struct pool_item *)cp;
1.78 thorpej 1490:
1.97 yamt 1491: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1492:
1.97 yamt 1493: /* Insert on page list */
1.102 chs 1494: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1495: #ifdef DIAGNOSTIC
1.97 yamt 1496: pi->pi_magic = PI_MAGIC;
1.3 pk 1497: #endif
1.128 christos 1498: cp = (char *)cp + pp->pr_size;
1.125 ad 1499:
1500: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1501: }
1.3 pk 1502: }
1503:
1504: /*
1505: * If the pool was depleted, point at the new page.
1506: */
1507: if (pp->pr_curpage == NULL)
1508: pp->pr_curpage = ph;
1509:
1510: if (++pp->pr_npages > pp->pr_hiwat)
1511: pp->pr_hiwat = pp->pr_npages;
1512: }
1513:
1.20 thorpej 1514: /*
1.52 thorpej 1515: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1516: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1517: *
1.21 thorpej 1518: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1519: *
1.73 thorpej 1520: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1521: * with it locked.
1522: */
1523: static int
1.42 thorpej 1524: pool_catchup(struct pool *pp)
1.20 thorpej 1525: {
1526: int error = 0;
1527:
1.54 thorpej 1528: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1529: error = pool_grow(pp, PR_NOWAIT);
1530: if (error) {
1.20 thorpej 1531: break;
1532: }
1533: }
1.113 yamt 1534: return error;
1.20 thorpej 1535: }
1536:
1.88 chs 1537: static void
1538: pool_update_curpage(struct pool *pp)
1539: {
1540:
1541: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1542: if (pp->pr_curpage == NULL) {
1543: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1544: }
1545: }
1546:
1.3 pk 1547: void
1.42 thorpej 1548: pool_setlowat(struct pool *pp, int n)
1.3 pk 1549: {
1.15 pk 1550:
1.134 ad 1551: mutex_enter(&pp->pr_lock);
1.21 thorpej 1552:
1.3 pk 1553: pp->pr_minitems = n;
1.15 pk 1554: pp->pr_minpages = (n == 0)
1555: ? 0
1.18 thorpej 1556: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1557:
1558: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1559: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1560: /*
1561: * XXX: Should we log a warning? Should we set up a timeout
1562: * to try again in a second or so? The latter could break
1563: * a caller's assumptions about interrupt protection, etc.
1564: */
1565: }
1.21 thorpej 1566:
1.134 ad 1567: mutex_exit(&pp->pr_lock);
1.3 pk 1568: }
1569:
1570: void
1.42 thorpej 1571: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1572: {
1.15 pk 1573:
1.134 ad 1574: mutex_enter(&pp->pr_lock);
1.21 thorpej 1575:
1.15 pk 1576: pp->pr_maxpages = (n == 0)
1577: ? 0
1.18 thorpej 1578: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1579:
1.134 ad 1580: mutex_exit(&pp->pr_lock);
1.3 pk 1581: }
1582:
1.20 thorpej 1583: void
1.42 thorpej 1584: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1585: {
1586:
1.134 ad 1587: mutex_enter(&pp->pr_lock);
1.20 thorpej 1588:
1589: pp->pr_hardlimit = n;
1590: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1591: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1592: pp->pr_hardlimit_warning_last.tv_sec = 0;
1593: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1594:
1595: /*
1.21 thorpej 1596: * In-line version of pool_sethiwat(), because we don't want to
1597: * release the lock.
1.20 thorpej 1598: */
1599: pp->pr_maxpages = (n == 0)
1600: ? 0
1601: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1602:
1.134 ad 1603: mutex_exit(&pp->pr_lock);
1.20 thorpej 1604: }
1.3 pk 1605:
1606: /*
1607: * Release all complete pages that have not been used recently.
1608: */
1.66 thorpej 1609: int
1.59 thorpej 1610: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1611: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1612: #else
1613: pool_reclaim(struct pool *pp)
1614: #endif
1.3 pk 1615: {
1616: struct pool_item_header *ph, *phnext;
1.61 chs 1617: struct pool_pagelist pq;
1.102 chs 1618: struct timeval curtime, diff;
1.134 ad 1619: bool klock;
1620: int rv;
1.3 pk 1621:
1.68 thorpej 1622: if (pp->pr_drain_hook != NULL) {
1623: /*
1624: * The drain hook must be called with the pool unlocked.
1625: */
1626: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1627: }
1628:
1.134 ad 1629: /*
1630: * XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks,
1631: * and we are called from the pagedaemon without kernel_lock.
1632: * Does not apply to IPL_SOFTBIO.
1633: */
1634: if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1635: pp->pr_ipl == IPL_SOFTSERIAL) {
1636: KERNEL_LOCK(1, NULL);
1637: klock = true;
1638: } else
1639: klock = false;
1640:
1641: /* Reclaim items from the pool's cache (if any). */
1642: if (pp->pr_cache != NULL)
1643: pool_cache_invalidate(pp->pr_cache);
1644:
1645: if (mutex_tryenter(&pp->pr_lock) == 0) {
1646: if (klock) {
1647: KERNEL_UNLOCK_ONE(NULL);
1648: }
1.66 thorpej 1649: return (0);
1.134 ad 1650: }
1.25 thorpej 1651: pr_enter(pp, file, line);
1.68 thorpej 1652:
1.88 chs 1653: LIST_INIT(&pq);
1.43 thorpej 1654:
1.118 kardel 1655: getmicrotime(&curtime);
1.21 thorpej 1656:
1.88 chs 1657: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1658: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1659:
1660: /* Check our minimum page claim */
1661: if (pp->pr_npages <= pp->pr_minpages)
1662: break;
1663:
1.88 chs 1664: KASSERT(ph->ph_nmissing == 0);
1665: timersub(&curtime, &ph->ph_time, &diff);
1.117 yamt 1666: if (diff.tv_sec < pool_inactive_time
1667: && !pa_starved_p(pp->pr_alloc))
1.88 chs 1668: continue;
1.21 thorpej 1669:
1.88 chs 1670: /*
1671: * If freeing this page would put us below
1672: * the low water mark, stop now.
1673: */
1674: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1675: pp->pr_minitems)
1676: break;
1.21 thorpej 1677:
1.88 chs 1678: pr_rmpage(pp, ph, &pq);
1.3 pk 1679: }
1680:
1.25 thorpej 1681: pr_leave(pp);
1.134 ad 1682: mutex_exit(&pp->pr_lock);
1683:
1684: if (LIST_EMPTY(&pq))
1685: rv = 0;
1686: else {
1687: pr_pagelist_free(pp, &pq);
1688: rv = 1;
1689: }
1690:
1691: if (klock) {
1692: KERNEL_UNLOCK_ONE(NULL);
1693: }
1.66 thorpej 1694:
1.134 ad 1695: return (rv);
1.3 pk 1696: }
1697:
1698: /*
1.134 ad 1699: * Drain pools, one at a time. This is a two stage process;
1700: * drain_start kicks off a cross call to drain CPU-level caches
1701: * if the pool has an associated pool_cache. drain_end waits
1702: * for those cross calls to finish, and then drains the cache
1703: * (if any) and pool.
1.131 ad 1704: *
1.134 ad 1705: * Note, must never be called from interrupt context.
1.3 pk 1706: */
1707: void
1.134 ad 1708: pool_drain_start(struct pool **ppp, uint64_t *wp)
1.3 pk 1709: {
1710: struct pool *pp;
1.134 ad 1711:
1.138.4.2! bouyer 1712: KASSERT(!TAILQ_EMPTY(&pool_head));
1.3 pk 1713:
1.61 chs 1714: pp = NULL;
1.134 ad 1715:
1716: /* Find next pool to drain, and add a reference. */
1717: mutex_enter(&pool_head_lock);
1718: do {
1719: if (drainpp == NULL) {
1.138.4.2! bouyer 1720: drainpp = TAILQ_FIRST(&pool_head);
1.134 ad 1721: }
1722: if (drainpp != NULL) {
1723: pp = drainpp;
1.138.4.2! bouyer 1724: drainpp = TAILQ_NEXT(pp, pr_poollist);
1.134 ad 1725: }
1726: /*
1727: * Skip completely idle pools. We depend on at least
1728: * one pool in the system being active.
1729: */
1730: } while (pp == NULL || pp->pr_npages == 0);
1731: pp->pr_refcnt++;
1732: mutex_exit(&pool_head_lock);
1733:
1734: /* If there is a pool_cache, drain CPU level caches. */
1735: *ppp = pp;
1736: if (pp->pr_cache != NULL) {
1737: *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall,
1738: pp->pr_cache, NULL);
1739: }
1740: }
1741:
1742: void
1743: pool_drain_end(struct pool *pp, uint64_t where)
1744: {
1745:
1746: if (pp == NULL)
1747: return;
1748:
1749: KASSERT(pp->pr_refcnt > 0);
1750:
1751: /* Wait for remote draining to complete. */
1752: if (pp->pr_cache != NULL)
1753: xc_wait(where);
1754:
1755: /* Drain the cache (if any) and pool.. */
1756: pool_reclaim(pp);
1757:
1758: /* Finally, unlock the pool. */
1759: mutex_enter(&pool_head_lock);
1760: pp->pr_refcnt--;
1761: cv_broadcast(&pool_busy);
1762: mutex_exit(&pool_head_lock);
1.3 pk 1763: }
1764:
1765: /*
1766: * Diagnostic helpers.
1767: */
1768: void
1.42 thorpej 1769: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1770: {
1771:
1.25 thorpej 1772: pool_print1(pp, modif, printf);
1.21 thorpej 1773: }
1774:
1.25 thorpej 1775: void
1.108 yamt 1776: pool_printall(const char *modif, void (*pr)(const char *, ...))
1777: {
1778: struct pool *pp;
1779:
1.138.4.2! bouyer 1780: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.108 yamt 1781: pool_printit(pp, modif, pr);
1782: }
1783: }
1784:
1785: void
1.42 thorpej 1786: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1787: {
1788:
1789: if (pp == NULL) {
1790: (*pr)("Must specify a pool to print.\n");
1791: return;
1792: }
1793:
1794: pool_print1(pp, modif, pr);
1795: }
1796:
1.21 thorpej 1797: static void
1.124 yamt 1798: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1799: void (*pr)(const char *, ...))
1.88 chs 1800: {
1801: struct pool_item_header *ph;
1802: #ifdef DIAGNOSTIC
1803: struct pool_item *pi;
1804: #endif
1805:
1806: LIST_FOREACH(ph, pl, ph_pagelist) {
1807: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1808: ph->ph_page, ph->ph_nmissing,
1809: (u_long)ph->ph_time.tv_sec,
1810: (u_long)ph->ph_time.tv_usec);
1811: #ifdef DIAGNOSTIC
1.97 yamt 1812: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1813: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1814: if (pi->pi_magic != PI_MAGIC) {
1815: (*pr)("\t\t\titem %p, magic 0x%x\n",
1816: pi, pi->pi_magic);
1817: }
1.88 chs 1818: }
1819: }
1820: #endif
1821: }
1822: }
1823:
1824: static void
1.42 thorpej 1825: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1826: {
1.25 thorpej 1827: struct pool_item_header *ph;
1.134 ad 1828: pool_cache_t pc;
1829: pcg_t *pcg;
1830: pool_cache_cpu_t *cc;
1831: uint64_t cpuhit, cpumiss;
1.44 thorpej 1832: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1833: char c;
1834:
1835: while ((c = *modif++) != '\0') {
1836: if (c == 'l')
1837: print_log = 1;
1838: if (c == 'p')
1839: print_pagelist = 1;
1.44 thorpej 1840: if (c == 'c')
1841: print_cache = 1;
1.25 thorpej 1842: }
1843:
1.134 ad 1844: if ((pc = pp->pr_cache) != NULL) {
1845: (*pr)("POOL CACHE");
1846: } else {
1847: (*pr)("POOL");
1848: }
1849:
1850: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1851: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1852: pp->pr_roflags);
1.66 thorpej 1853: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1854: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1855: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1856: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1857: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1858:
1.134 ad 1859: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1860: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1861: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1862: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1863:
1864: if (print_pagelist == 0)
1865: goto skip_pagelist;
1866:
1.88 chs 1867: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1868: (*pr)("\n\tempty page list:\n");
1.97 yamt 1869: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1870: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1871: (*pr)("\n\tfull page list:\n");
1.97 yamt 1872: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1873: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1874: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1875: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1876:
1.25 thorpej 1877: if (pp->pr_curpage == NULL)
1878: (*pr)("\tno current page\n");
1879: else
1880: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1881:
1882: skip_pagelist:
1883: if (print_log == 0)
1884: goto skip_log;
1885:
1886: (*pr)("\n");
1887: if ((pp->pr_roflags & PR_LOGGING) == 0)
1888: (*pr)("\tno log\n");
1.122 christos 1889: else {
1.25 thorpej 1890: pr_printlog(pp, NULL, pr);
1.122 christos 1891: }
1.3 pk 1892:
1.25 thorpej 1893: skip_log:
1.44 thorpej 1894:
1.102 chs 1895: #define PR_GROUPLIST(pcg) \
1896: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1.138.4.2! bouyer 1897: for (i = 0; i < pcg->pcg_size; i++) { \
1.102 chs 1898: if (pcg->pcg_objects[i].pcgo_pa != \
1899: POOL_PADDR_INVALID) { \
1900: (*pr)("\t\t\t%p, 0x%llx\n", \
1901: pcg->pcg_objects[i].pcgo_va, \
1902: (unsigned long long) \
1903: pcg->pcg_objects[i].pcgo_pa); \
1904: } else { \
1905: (*pr)("\t\t\t%p\n", \
1906: pcg->pcg_objects[i].pcgo_va); \
1907: } \
1908: }
1909:
1.134 ad 1910: if (pc != NULL) {
1911: cpuhit = 0;
1912: cpumiss = 0;
1913: for (i = 0; i < MAXCPUS; i++) {
1914: if ((cc = pc->pc_cpus[i]) == NULL)
1915: continue;
1916: cpuhit += cc->cc_hits;
1917: cpumiss += cc->cc_misses;
1918: }
1919: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1920: (*pr)("\tcache layer hits %llu misses %llu\n",
1921: pc->pc_hits, pc->pc_misses);
1922: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1923: pc->pc_hits + pc->pc_misses - pc->pc_contended,
1924: pc->pc_contended);
1925: (*pr)("\tcache layer empty groups %u full groups %u\n",
1926: pc->pc_nempty, pc->pc_nfull);
1927: if (print_cache) {
1928: (*pr)("\tfull cache groups:\n");
1929: for (pcg = pc->pc_fullgroups; pcg != NULL;
1930: pcg = pcg->pcg_next) {
1931: PR_GROUPLIST(pcg);
1932: }
1933: (*pr)("\tempty cache groups:\n");
1934: for (pcg = pc->pc_emptygroups; pcg != NULL;
1935: pcg = pcg->pcg_next) {
1936: PR_GROUPLIST(pcg);
1937: }
1.103 chs 1938: }
1.44 thorpej 1939: }
1.102 chs 1940: #undef PR_GROUPLIST
1.44 thorpej 1941:
1.88 chs 1942: pr_enter_check(pp, pr);
1943: }
1944:
1945: static int
1946: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1947: {
1948: struct pool_item *pi;
1.128 christos 1949: void *page;
1.88 chs 1950: int n;
1951:
1.121 yamt 1952: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1953: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1954: if (page != ph->ph_page &&
1955: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1956: if (label != NULL)
1957: printf("%s: ", label);
1958: printf("pool(%p:%s): page inconsistency: page %p;"
1959: " at page head addr %p (p %p)\n", pp,
1960: pp->pr_wchan, ph->ph_page,
1961: ph, page);
1962: return 1;
1963: }
1.88 chs 1964: }
1.3 pk 1965:
1.97 yamt 1966: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1967: return 0;
1968:
1.102 chs 1969: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1970: pi != NULL;
1.102 chs 1971: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1972:
1973: #ifdef DIAGNOSTIC
1974: if (pi->pi_magic != PI_MAGIC) {
1975: if (label != NULL)
1976: printf("%s: ", label);
1977: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1978: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1979: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1980: n, pi);
1.88 chs 1981: panic("pool");
1982: }
1983: #endif
1.121 yamt 1984: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1985: continue;
1986: }
1.128 christos 1987: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1988: if (page == ph->ph_page)
1989: continue;
1990:
1991: if (label != NULL)
1992: printf("%s: ", label);
1993: printf("pool(%p:%s): page inconsistency: page %p;"
1994: " item ordinal %d; addr %p (p %p)\n", pp,
1995: pp->pr_wchan, ph->ph_page,
1996: n, pi, page);
1997: return 1;
1998: }
1999: return 0;
1.3 pk 2000: }
2001:
1.88 chs 2002:
1.3 pk 2003: int
1.42 thorpej 2004: pool_chk(struct pool *pp, const char *label)
1.3 pk 2005: {
2006: struct pool_item_header *ph;
2007: int r = 0;
2008:
1.134 ad 2009: mutex_enter(&pp->pr_lock);
1.88 chs 2010: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2011: r = pool_chk_page(pp, label, ph);
2012: if (r) {
2013: goto out;
2014: }
2015: }
2016: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2017: r = pool_chk_page(pp, label, ph);
2018: if (r) {
1.3 pk 2019: goto out;
2020: }
1.88 chs 2021: }
2022: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2023: r = pool_chk_page(pp, label, ph);
2024: if (r) {
1.3 pk 2025: goto out;
2026: }
2027: }
1.88 chs 2028:
1.3 pk 2029: out:
1.134 ad 2030: mutex_exit(&pp->pr_lock);
1.3 pk 2031: return (r);
1.43 thorpej 2032: }
2033:
2034: /*
2035: * pool_cache_init:
2036: *
2037: * Initialize a pool cache.
1.134 ad 2038: */
2039: pool_cache_t
2040: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
2041: const char *wchan, struct pool_allocator *palloc, int ipl,
2042: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
2043: {
2044: pool_cache_t pc;
2045:
2046: pc = pool_get(&cache_pool, PR_WAITOK);
2047: if (pc == NULL)
2048: return NULL;
2049:
2050: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
2051: palloc, ipl, ctor, dtor, arg);
2052:
2053: return pc;
2054: }
2055:
2056: /*
2057: * pool_cache_bootstrap:
1.43 thorpej 2058: *
1.134 ad 2059: * Kernel-private version of pool_cache_init(). The caller
2060: * provides initial storage.
1.43 thorpej 2061: */
2062: void
1.134 ad 2063: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
2064: u_int align_offset, u_int flags, const char *wchan,
2065: struct pool_allocator *palloc, int ipl,
2066: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 2067: void *arg)
2068: {
1.134 ad 2069: CPU_INFO_ITERATOR cii;
1.138.4.2! bouyer 2070: pool_cache_t pc1;
1.134 ad 2071: struct cpu_info *ci;
2072: struct pool *pp;
2073:
2074: pp = &pc->pc_pool;
2075: if (palloc == NULL && ipl == IPL_NONE)
2076: palloc = &pool_allocator_nointr;
2077: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.43 thorpej 2078:
1.138 ad 2079: /*
2080: * XXXAD hack to prevent IP input processing from blocking.
2081: */
2082: if (ipl == IPL_SOFTNET) {
2083: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, IPL_VM);
2084: } else {
2085: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
2086: }
1.43 thorpej 2087:
1.134 ad 2088: if (ctor == NULL) {
2089: ctor = (int (*)(void *, void *, int))nullop;
2090: }
2091: if (dtor == NULL) {
2092: dtor = (void (*)(void *, void *))nullop;
2093: }
1.43 thorpej 2094:
1.134 ad 2095: pc->pc_emptygroups = NULL;
2096: pc->pc_fullgroups = NULL;
2097: pc->pc_partgroups = NULL;
1.43 thorpej 2098: pc->pc_ctor = ctor;
2099: pc->pc_dtor = dtor;
2100: pc->pc_arg = arg;
1.134 ad 2101: pc->pc_hits = 0;
1.48 thorpej 2102: pc->pc_misses = 0;
1.134 ad 2103: pc->pc_nempty = 0;
2104: pc->pc_npart = 0;
2105: pc->pc_nfull = 0;
2106: pc->pc_contended = 0;
2107: pc->pc_refcnt = 0;
1.136 yamt 2108: pc->pc_freecheck = NULL;
1.134 ad 2109:
1.138.4.2! bouyer 2110: if ((flags & PR_LARGECACHE) != 0) {
! 2111: pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
! 2112: } else {
! 2113: pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
! 2114: }
! 2115:
1.134 ad 2116: /* Allocate per-CPU caches. */
2117: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
2118: pc->pc_ncpu = 0;
1.138.4.1 bouyer 2119: if (ncpu < 2) {
1.137 ad 2120: /* XXX For sparc: boot CPU is not attached yet. */
2121: pool_cache_cpu_init1(curcpu(), pc);
2122: } else {
2123: for (CPU_INFO_FOREACH(cii, ci)) {
2124: pool_cache_cpu_init1(ci, pc);
2125: }
1.134 ad 2126: }
1.138.4.2! bouyer 2127:
! 2128: /* Add to list of all pools. */
! 2129: if (__predict_true(!cold))
1.134 ad 2130: mutex_enter(&pool_head_lock);
1.138.4.2! bouyer 2131: TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
! 2132: if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
! 2133: break;
1.134 ad 2134: }
1.138.4.2! bouyer 2135: if (pc1 == NULL)
! 2136: TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
! 2137: else
! 2138: TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
! 2139: if (__predict_true(!cold))
! 2140: mutex_exit(&pool_head_lock);
! 2141:
! 2142: membar_sync();
! 2143: pp->pr_cache = pc;
1.43 thorpej 2144: }
2145:
2146: /*
2147: * pool_cache_destroy:
2148: *
2149: * Destroy a pool cache.
2150: */
2151: void
1.134 ad 2152: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 2153: {
1.134 ad 2154: struct pool *pp = &pc->pc_pool;
2155: pool_cache_cpu_t *cc;
2156: pcg_t *pcg;
2157: int i;
2158:
2159: /* Remove it from the global list. */
2160: mutex_enter(&pool_head_lock);
2161: while (pc->pc_refcnt != 0)
2162: cv_wait(&pool_busy, &pool_head_lock);
1.138.4.2! bouyer 2163: TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1.134 ad 2164: mutex_exit(&pool_head_lock);
1.43 thorpej 2165:
2166: /* First, invalidate the entire cache. */
2167: pool_cache_invalidate(pc);
2168:
1.134 ad 2169: /* Disassociate it from the pool. */
2170: mutex_enter(&pp->pr_lock);
2171: pp->pr_cache = NULL;
2172: mutex_exit(&pp->pr_lock);
2173:
2174: /* Destroy per-CPU data */
2175: for (i = 0; i < MAXCPUS; i++) {
2176: if ((cc = pc->pc_cpus[i]) == NULL)
2177: continue;
2178: if ((pcg = cc->cc_current) != NULL) {
2179: pcg->pcg_next = NULL;
2180: pool_cache_invalidate_groups(pc, pcg);
2181: }
2182: if ((pcg = cc->cc_previous) != NULL) {
2183: pcg->pcg_next = NULL;
2184: pool_cache_invalidate_groups(pc, pcg);
2185: }
2186: if (cc != &pc->pc_cpu0)
2187: pool_put(&cache_cpu_pool, cc);
2188: }
2189:
2190: /* Finally, destroy it. */
2191: mutex_destroy(&pc->pc_lock);
2192: pool_destroy(pp);
2193: pool_put(&cache_pool, pc);
2194: }
2195:
2196: /*
2197: * pool_cache_cpu_init1:
2198: *
2199: * Called for each pool_cache whenever a new CPU is attached.
2200: */
2201: static void
2202: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
2203: {
2204: pool_cache_cpu_t *cc;
1.137 ad 2205: int index;
1.134 ad 2206:
1.137 ad 2207: index = ci->ci_index;
2208:
2209: KASSERT(index < MAXCPUS);
1.134 ad 2210: KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0);
2211:
1.137 ad 2212: if ((cc = pc->pc_cpus[index]) != NULL) {
2213: KASSERT(cc->cc_cpuindex == index);
1.134 ad 2214: return;
2215: }
2216:
2217: /*
2218: * The first CPU is 'free'. This needs to be the case for
2219: * bootstrap - we may not be able to allocate yet.
2220: */
2221: if (pc->pc_ncpu == 0) {
2222: cc = &pc->pc_cpu0;
2223: pc->pc_ncpu = 1;
2224: } else {
2225: mutex_enter(&pc->pc_lock);
2226: pc->pc_ncpu++;
2227: mutex_exit(&pc->pc_lock);
2228: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
2229: }
2230:
2231: cc->cc_ipl = pc->pc_pool.pr_ipl;
2232: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
2233: cc->cc_cache = pc;
1.137 ad 2234: cc->cc_cpuindex = index;
1.134 ad 2235: cc->cc_hits = 0;
2236: cc->cc_misses = 0;
2237: cc->cc_current = NULL;
2238: cc->cc_previous = NULL;
2239:
1.137 ad 2240: pc->pc_cpus[index] = cc;
1.43 thorpej 2241: }
2242:
1.134 ad 2243: /*
2244: * pool_cache_cpu_init:
2245: *
2246: * Called whenever a new CPU is attached.
2247: */
2248: void
2249: pool_cache_cpu_init(struct cpu_info *ci)
1.43 thorpej 2250: {
1.134 ad 2251: pool_cache_t pc;
2252:
2253: mutex_enter(&pool_head_lock);
1.138.4.2! bouyer 2254: TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1.134 ad 2255: pc->pc_refcnt++;
2256: mutex_exit(&pool_head_lock);
1.43 thorpej 2257:
1.134 ad 2258: pool_cache_cpu_init1(ci, pc);
1.43 thorpej 2259:
1.134 ad 2260: mutex_enter(&pool_head_lock);
2261: pc->pc_refcnt--;
2262: cv_broadcast(&pool_busy);
2263: }
2264: mutex_exit(&pool_head_lock);
1.43 thorpej 2265: }
2266:
1.134 ad 2267: /*
2268: * pool_cache_reclaim:
2269: *
2270: * Reclaim memory from a pool cache.
2271: */
2272: bool
2273: pool_cache_reclaim(pool_cache_t pc)
1.43 thorpej 2274: {
2275:
1.134 ad 2276: return pool_reclaim(&pc->pc_pool);
2277: }
1.43 thorpej 2278:
1.136 yamt 2279: static void
2280: pool_cache_destruct_object1(pool_cache_t pc, void *object)
2281: {
2282:
2283: (*pc->pc_dtor)(pc->pc_arg, object);
2284: pool_put(&pc->pc_pool, object);
2285: }
2286:
1.134 ad 2287: /*
2288: * pool_cache_destruct_object:
2289: *
2290: * Force destruction of an object and its release back into
2291: * the pool.
2292: */
2293: void
2294: pool_cache_destruct_object(pool_cache_t pc, void *object)
2295: {
2296:
1.136 yamt 2297: FREECHECK_IN(&pc->pc_freecheck, object);
2298:
2299: pool_cache_destruct_object1(pc, object);
1.43 thorpej 2300: }
2301:
1.134 ad 2302: /*
2303: * pool_cache_invalidate_groups:
2304: *
2305: * Invalidate a chain of groups and destruct all objects.
2306: */
1.102 chs 2307: static void
1.134 ad 2308: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1.102 chs 2309: {
1.134 ad 2310: void *object;
2311: pcg_t *next;
2312: int i;
2313:
2314: for (; pcg != NULL; pcg = next) {
2315: next = pcg->pcg_next;
2316:
2317: for (i = 0; i < pcg->pcg_avail; i++) {
2318: object = pcg->pcg_objects[i].pcgo_va;
1.136 yamt 2319: pool_cache_destruct_object1(pc, object);
1.134 ad 2320: }
1.102 chs 2321:
1.138.4.2! bouyer 2322: if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
! 2323: pool_put(&pcg_large_pool, pcg);
! 2324: } else {
! 2325: KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
! 2326: pool_put(&pcg_normal_pool, pcg);
! 2327: }
1.102 chs 2328: }
2329: }
2330:
1.43 thorpej 2331: /*
1.134 ad 2332: * pool_cache_invalidate:
1.43 thorpej 2333: *
1.134 ad 2334: * Invalidate a pool cache (destruct and release all of the
2335: * cached objects). Does not reclaim objects from the pool.
1.43 thorpej 2336: */
1.134 ad 2337: void
2338: pool_cache_invalidate(pool_cache_t pc)
2339: {
2340: pcg_t *full, *empty, *part;
2341:
2342: mutex_enter(&pc->pc_lock);
2343: full = pc->pc_fullgroups;
2344: empty = pc->pc_emptygroups;
2345: part = pc->pc_partgroups;
2346: pc->pc_fullgroups = NULL;
2347: pc->pc_emptygroups = NULL;
2348: pc->pc_partgroups = NULL;
2349: pc->pc_nfull = 0;
2350: pc->pc_nempty = 0;
2351: pc->pc_npart = 0;
2352: mutex_exit(&pc->pc_lock);
2353:
2354: pool_cache_invalidate_groups(pc, full);
2355: pool_cache_invalidate_groups(pc, empty);
2356: pool_cache_invalidate_groups(pc, part);
2357: }
2358:
2359: void
2360: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2361: {
2362:
2363: pool_set_drain_hook(&pc->pc_pool, fn, arg);
2364: }
2365:
2366: void
2367: pool_cache_setlowat(pool_cache_t pc, int n)
2368: {
2369:
2370: pool_setlowat(&pc->pc_pool, n);
2371: }
2372:
2373: void
2374: pool_cache_sethiwat(pool_cache_t pc, int n)
2375: {
2376:
2377: pool_sethiwat(&pc->pc_pool, n);
2378: }
2379:
2380: void
2381: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2382: {
2383:
2384: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2385: }
2386:
2387: static inline pool_cache_cpu_t *
2388: pool_cache_cpu_enter(pool_cache_t pc, int *s)
2389: {
2390: pool_cache_cpu_t *cc;
2391:
2392: /*
2393: * Prevent other users of the cache from accessing our
2394: * CPU-local data. To avoid touching shared state, we
2395: * pull the neccessary information from CPU local data.
2396: */
1.137 ad 2397: crit_enter();
2398: cc = pc->pc_cpus[curcpu()->ci_index];
1.134 ad 2399: KASSERT(cc->cc_cache == pc);
1.137 ad 2400: if (cc->cc_ipl != IPL_NONE) {
1.134 ad 2401: *s = splraiseipl(cc->cc_iplcookie);
2402: }
2403: KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0);
2404:
2405: return cc;
2406: }
2407:
2408: static inline void
2409: pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s)
2410: {
2411:
2412: /* No longer need exclusive access to the per-CPU data. */
1.137 ad 2413: if (cc->cc_ipl != IPL_NONE) {
1.134 ad 2414: splx(*s);
2415: }
1.137 ad 2416: crit_exit();
1.134 ad 2417: }
2418:
2419: #if __GNUC_PREREQ__(3, 0)
2420: __attribute ((noinline))
2421: #endif
2422: pool_cache_cpu_t *
2423: pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp,
2424: paddr_t *pap, int flags)
1.43 thorpej 2425: {
1.134 ad 2426: pcg_t *pcg, *cur;
2427: uint64_t ncsw;
2428: pool_cache_t pc;
1.43 thorpej 2429: void *object;
1.58 thorpej 2430:
1.134 ad 2431: pc = cc->cc_cache;
2432: cc->cc_misses++;
1.43 thorpej 2433:
1.134 ad 2434: /*
2435: * Nothing was available locally. Try and grab a group
2436: * from the cache.
2437: */
2438: if (!mutex_tryenter(&pc->pc_lock)) {
2439: ncsw = curlwp->l_ncsw;
2440: mutex_enter(&pc->pc_lock);
2441: pc->pc_contended++;
1.43 thorpej 2442:
1.134 ad 2443: /*
2444: * If we context switched while locking, then
2445: * our view of the per-CPU data is invalid:
2446: * retry.
2447: */
2448: if (curlwp->l_ncsw != ncsw) {
2449: mutex_exit(&pc->pc_lock);
2450: pool_cache_cpu_exit(cc, s);
2451: return pool_cache_cpu_enter(pc, s);
1.43 thorpej 2452: }
1.102 chs 2453: }
1.43 thorpej 2454:
1.134 ad 2455: if ((pcg = pc->pc_fullgroups) != NULL) {
1.43 thorpej 2456: /*
1.134 ad 2457: * If there's a full group, release our empty
2458: * group back to the cache. Install the full
2459: * group as cc_current and return.
1.43 thorpej 2460: */
1.134 ad 2461: if ((cur = cc->cc_current) != NULL) {
2462: KASSERT(cur->pcg_avail == 0);
2463: cur->pcg_next = pc->pc_emptygroups;
2464: pc->pc_emptygroups = cur;
2465: pc->pc_nempty++;
1.87 thorpej 2466: }
1.138.4.2! bouyer 2467: KASSERT(pcg->pcg_avail == pcg->pcg_size);
1.134 ad 2468: cc->cc_current = pcg;
2469: pc->pc_fullgroups = pcg->pcg_next;
2470: pc->pc_hits++;
2471: pc->pc_nfull--;
2472: mutex_exit(&pc->pc_lock);
2473: return cc;
2474: }
2475:
2476: /*
2477: * Nothing available locally or in cache. Take the slow
2478: * path: fetch a new object from the pool and construct
2479: * it.
2480: */
2481: pc->pc_misses++;
2482: mutex_exit(&pc->pc_lock);
2483: pool_cache_cpu_exit(cc, s);
2484:
2485: object = pool_get(&pc->pc_pool, flags);
2486: *objectp = object;
2487: if (object == NULL)
2488: return NULL;
1.125 ad 2489:
1.134 ad 2490: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
2491: pool_put(&pc->pc_pool, object);
2492: *objectp = NULL;
2493: return NULL;
1.43 thorpej 2494: }
2495:
1.134 ad 2496: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2497: (pc->pc_pool.pr_align - 1)) == 0);
1.43 thorpej 2498:
1.134 ad 2499: if (pap != NULL) {
2500: #ifdef POOL_VTOPHYS
2501: *pap = POOL_VTOPHYS(object);
2502: #else
2503: *pap = POOL_PADDR_INVALID;
2504: #endif
1.102 chs 2505: }
1.43 thorpej 2506:
1.125 ad 2507: FREECHECK_OUT(&pc->pc_freecheck, object);
1.134 ad 2508: return NULL;
1.43 thorpej 2509: }
2510:
2511: /*
1.134 ad 2512: * pool_cache_get{,_paddr}:
1.43 thorpej 2513: *
1.134 ad 2514: * Get an object from a pool cache (optionally returning
2515: * the physical address of the object).
1.43 thorpej 2516: */
1.134 ad 2517: void *
2518: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.43 thorpej 2519: {
1.134 ad 2520: pool_cache_cpu_t *cc;
2521: pcg_t *pcg;
2522: void *object;
1.60 thorpej 2523: int s;
1.43 thorpej 2524:
1.134 ad 2525: #ifdef LOCKDEBUG
2526: if (flags & PR_WAITOK)
2527: ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)");
2528: #endif
1.125 ad 2529:
1.134 ad 2530: cc = pool_cache_cpu_enter(pc, &s);
2531: do {
2532: /* Try and allocate an object from the current group. */
2533: pcg = cc->cc_current;
2534: if (pcg != NULL && pcg->pcg_avail > 0) {
2535: object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2536: if (pap != NULL)
2537: *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
2538: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
1.138.4.2! bouyer 2539: KASSERT(pcg->pcg_avail <= pcg->pcg_size);
1.134 ad 2540: KASSERT(object != NULL);
2541: cc->cc_hits++;
2542: pool_cache_cpu_exit(cc, &s);
2543: FREECHECK_OUT(&pc->pc_freecheck, object);
2544: return object;
1.43 thorpej 2545: }
2546:
2547: /*
1.134 ad 2548: * That failed. If the previous group isn't empty, swap
2549: * it with the current group and allocate from there.
1.43 thorpej 2550: */
1.134 ad 2551: pcg = cc->cc_previous;
2552: if (pcg != NULL && pcg->pcg_avail > 0) {
2553: cc->cc_previous = cc->cc_current;
2554: cc->cc_current = pcg;
2555: continue;
1.43 thorpej 2556: }
2557:
1.134 ad 2558: /*
2559: * Can't allocate from either group: try the slow path.
2560: * If get_slow() allocated an object for us, or if
2561: * no more objects are available, it will return NULL.
2562: * Otherwise, we need to retry.
2563: */
2564: cc = pool_cache_get_slow(cc, &s, &object, pap, flags);
2565: } while (cc != NULL);
1.43 thorpej 2566:
1.134 ad 2567: return object;
1.51 thorpej 2568: }
2569:
1.134 ad 2570: #if __GNUC_PREREQ__(3, 0)
2571: __attribute ((noinline))
2572: #endif
2573: pool_cache_cpu_t *
2574: pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa)
1.51 thorpej 2575: {
1.134 ad 2576: pcg_t *pcg, *cur;
2577: uint64_t ncsw;
2578: pool_cache_t pc;
1.138.4.2! bouyer 2579: u_int nobj;
1.51 thorpej 2580:
1.134 ad 2581: pc = cc->cc_cache;
2582: cc->cc_misses++;
1.43 thorpej 2583:
1.134 ad 2584: /*
2585: * No free slots locally. Try to grab an empty, unused
2586: * group from the cache.
2587: */
2588: if (!mutex_tryenter(&pc->pc_lock)) {
2589: ncsw = curlwp->l_ncsw;
2590: mutex_enter(&pc->pc_lock);
2591: pc->pc_contended++;
1.102 chs 2592:
1.134 ad 2593: /*
2594: * If we context switched while locking, then
2595: * our view of the per-CPU data is invalid:
2596: * retry.
2597: */
2598: if (curlwp->l_ncsw != ncsw) {
2599: mutex_exit(&pc->pc_lock);
2600: pool_cache_cpu_exit(cc, s);
2601: return pool_cache_cpu_enter(pc, s);
2602: }
2603: }
1.130 ad 2604:
1.134 ad 2605: if ((pcg = pc->pc_emptygroups) != NULL) {
2606: /*
2607: * If there's a empty group, release our full
2608: * group back to the cache. Install the empty
1.138.4.2! bouyer 2609: * group and return.
1.134 ad 2610: */
2611: KASSERT(pcg->pcg_avail == 0);
2612: pc->pc_emptygroups = pcg->pcg_next;
1.138.4.2! bouyer 2613: if (cc->cc_previous == NULL) {
! 2614: cc->cc_previous = pcg;
! 2615: } else {
! 2616: if ((cur = cc->cc_current) != NULL) {
! 2617: KASSERT(cur->pcg_avail == pcg->pcg_size);
! 2618: cur->pcg_next = pc->pc_fullgroups;
! 2619: pc->pc_fullgroups = cur;
! 2620: pc->pc_nfull++;
! 2621: }
! 2622: cc->cc_current = pcg;
! 2623: }
1.134 ad 2624: pc->pc_hits++;
2625: pc->pc_nempty--;
2626: mutex_exit(&pc->pc_lock);
2627: return cc;
1.102 chs 2628: }
1.105 christos 2629:
1.134 ad 2630: /*
2631: * Nothing available locally or in cache. Take the
2632: * slow path and try to allocate a new group that we
2633: * can release to.
2634: */
2635: pc->pc_misses++;
2636: mutex_exit(&pc->pc_lock);
2637: pool_cache_cpu_exit(cc, s);
1.105 christos 2638:
1.134 ad 2639: /*
2640: * If we can't allocate a new group, just throw the
2641: * object away.
2642: */
1.138.4.2! bouyer 2643: nobj = pc->pc_pcgsize;
! 2644: if (pool_cache_disable) {
! 2645: pcg = NULL;
! 2646: } else if (nobj == PCG_NOBJECTS_LARGE) {
! 2647: pcg = pool_get(&pcg_large_pool, PR_NOWAIT);
! 2648: } else {
! 2649: pcg = pool_get(&pcg_normal_pool, PR_NOWAIT);
! 2650: }
1.134 ad 2651: if (pcg == NULL) {
2652: pool_cache_destruct_object(pc, object);
2653: return NULL;
2654: }
2655: pcg->pcg_avail = 0;
1.138.4.2! bouyer 2656: pcg->pcg_size = nobj;
1.105 christos 2657:
1.134 ad 2658: /*
2659: * Add the empty group to the cache and try again.
2660: */
2661: mutex_enter(&pc->pc_lock);
2662: pcg->pcg_next = pc->pc_emptygroups;
2663: pc->pc_emptygroups = pcg;
2664: pc->pc_nempty++;
2665: mutex_exit(&pc->pc_lock);
1.103 chs 2666:
1.134 ad 2667: return pool_cache_cpu_enter(pc, s);
2668: }
1.102 chs 2669:
1.43 thorpej 2670: /*
1.134 ad 2671: * pool_cache_put{,_paddr}:
1.43 thorpej 2672: *
1.134 ad 2673: * Put an object back to the pool cache (optionally caching the
2674: * physical address of the object).
1.43 thorpej 2675: */
1.101 thorpej 2676: void
1.134 ad 2677: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2678: {
1.134 ad 2679: pool_cache_cpu_t *cc;
2680: pcg_t *pcg;
2681: int s;
1.101 thorpej 2682:
1.134 ad 2683: FREECHECK_IN(&pc->pc_freecheck, object);
1.101 thorpej 2684:
1.134 ad 2685: cc = pool_cache_cpu_enter(pc, &s);
2686: do {
2687: /* If the current group isn't full, release it there. */
2688: pcg = cc->cc_current;
1.138.4.2! bouyer 2689: if (pcg != NULL && pcg->pcg_avail < pcg->pcg_size) {
1.134 ad 2690: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2691: pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2692: pcg->pcg_avail++;
2693: cc->cc_hits++;
2694: pool_cache_cpu_exit(cc, &s);
2695: return;
2696: }
1.43 thorpej 2697:
1.134 ad 2698: /*
2699: * That failed. If the previous group is empty, swap
2700: * it with the current group and try again.
2701: */
2702: pcg = cc->cc_previous;
2703: if (pcg != NULL && pcg->pcg_avail == 0) {
2704: cc->cc_previous = cc->cc_current;
2705: cc->cc_current = pcg;
2706: continue;
2707: }
1.43 thorpej 2708:
1.134 ad 2709: /*
2710: * Can't free to either group: try the slow path.
2711: * If put_slow() releases the object for us, it
2712: * will return NULL. Otherwise we need to retry.
2713: */
2714: cc = pool_cache_put_slow(cc, &s, object, pa);
2715: } while (cc != NULL);
1.43 thorpej 2716: }
2717:
2718: /*
1.134 ad 2719: * pool_cache_xcall:
1.43 thorpej 2720: *
1.134 ad 2721: * Transfer objects from the per-CPU cache to the global cache.
2722: * Run within a cross-call thread.
1.43 thorpej 2723: */
2724: static void
1.134 ad 2725: pool_cache_xcall(pool_cache_t pc)
1.43 thorpej 2726: {
1.134 ad 2727: pool_cache_cpu_t *cc;
2728: pcg_t *prev, *cur, **list;
2729: int s = 0; /* XXXgcc */
2730:
2731: cc = pool_cache_cpu_enter(pc, &s);
2732: cur = cc->cc_current;
2733: cc->cc_current = NULL;
2734: prev = cc->cc_previous;
2735: cc->cc_previous = NULL;
2736: pool_cache_cpu_exit(cc, &s);
2737:
2738: /*
2739: * XXXSMP Go to splvm to prevent kernel_lock from being taken,
2740: * because locks at IPL_SOFTXXX are still spinlocks. Does not
2741: * apply to IPL_SOFTBIO. Cross-call threads do not take the
2742: * kernel_lock.
1.101 thorpej 2743: */
1.134 ad 2744: s = splvm();
2745: mutex_enter(&pc->pc_lock);
2746: if (cur != NULL) {
1.138.4.2! bouyer 2747: if (cur->pcg_avail == cur->pcg_size) {
1.134 ad 2748: list = &pc->pc_fullgroups;
2749: pc->pc_nfull++;
2750: } else if (cur->pcg_avail == 0) {
2751: list = &pc->pc_emptygroups;
2752: pc->pc_nempty++;
2753: } else {
2754: list = &pc->pc_partgroups;
2755: pc->pc_npart++;
2756: }
2757: cur->pcg_next = *list;
2758: *list = cur;
2759: }
2760: if (prev != NULL) {
1.138.4.2! bouyer 2761: if (prev->pcg_avail == prev->pcg_size) {
1.134 ad 2762: list = &pc->pc_fullgroups;
2763: pc->pc_nfull++;
2764: } else if (prev->pcg_avail == 0) {
2765: list = &pc->pc_emptygroups;
2766: pc->pc_nempty++;
2767: } else {
2768: list = &pc->pc_partgroups;
2769: pc->pc_npart++;
2770: }
2771: prev->pcg_next = *list;
2772: *list = prev;
2773: }
2774: mutex_exit(&pc->pc_lock);
2775: splx(s);
1.3 pk 2776: }
1.66 thorpej 2777:
2778: /*
2779: * Pool backend allocators.
2780: *
2781: * Each pool has a backend allocator that handles allocation, deallocation,
2782: * and any additional draining that might be needed.
2783: *
2784: * We provide two standard allocators:
2785: *
2786: * pool_allocator_kmem - the default when no allocator is specified
2787: *
2788: * pool_allocator_nointr - used for pools that will not be accessed
2789: * in interrupt context.
2790: */
2791: void *pool_page_alloc(struct pool *, int);
2792: void pool_page_free(struct pool *, void *);
2793:
1.112 bjh21 2794: #ifdef POOL_SUBPAGE
2795: struct pool_allocator pool_allocator_kmem_fullpage = {
2796: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2797: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2798: };
2799: #else
1.66 thorpej 2800: struct pool_allocator pool_allocator_kmem = {
2801: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2802: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2803: };
1.112 bjh21 2804: #endif
1.66 thorpej 2805:
2806: void *pool_page_alloc_nointr(struct pool *, int);
2807: void pool_page_free_nointr(struct pool *, void *);
2808:
1.112 bjh21 2809: #ifdef POOL_SUBPAGE
2810: struct pool_allocator pool_allocator_nointr_fullpage = {
2811: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2812: .pa_backingmapptr = &kernel_map,
1.112 bjh21 2813: };
2814: #else
1.66 thorpej 2815: struct pool_allocator pool_allocator_nointr = {
2816: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2817: .pa_backingmapptr = &kernel_map,
1.66 thorpej 2818: };
1.112 bjh21 2819: #endif
1.66 thorpej 2820:
2821: #ifdef POOL_SUBPAGE
2822: void *pool_subpage_alloc(struct pool *, int);
2823: void pool_subpage_free(struct pool *, void *);
2824:
1.112 bjh21 2825: struct pool_allocator pool_allocator_kmem = {
2826: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2827: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2828: };
2829:
2830: void *pool_subpage_alloc_nointr(struct pool *, int);
2831: void pool_subpage_free_nointr(struct pool *, void *);
2832:
2833: struct pool_allocator pool_allocator_nointr = {
2834: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2835: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2836: };
2837: #endif /* POOL_SUBPAGE */
2838:
1.117 yamt 2839: static void *
2840: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2841: {
1.117 yamt 2842: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2843: void *res;
2844:
1.117 yamt 2845: res = (*pa->pa_alloc)(pp, flags);
2846: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2847: /*
1.117 yamt 2848: * We only run the drain hook here if PR_NOWAIT.
2849: * In other cases, the hook will be run in
2850: * pool_reclaim().
1.66 thorpej 2851: */
1.117 yamt 2852: if (pp->pr_drain_hook != NULL) {
2853: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2854: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2855: }
1.117 yamt 2856: }
2857: return res;
1.66 thorpej 2858: }
2859:
1.117 yamt 2860: static void
1.66 thorpej 2861: pool_allocator_free(struct pool *pp, void *v)
2862: {
2863: struct pool_allocator *pa = pp->pr_alloc;
2864:
2865: (*pa->pa_free)(pp, v);
2866: }
2867:
2868: void *
1.124 yamt 2869: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2870: {
1.127 thorpej 2871: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2872:
1.100 yamt 2873: return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
1.66 thorpej 2874: }
2875:
2876: void
1.124 yamt 2877: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2878: {
2879:
1.98 yamt 2880: uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2881: }
2882:
2883: static void *
1.124 yamt 2884: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2885: {
1.127 thorpej 2886: bool waitok = (flags & PR_WAITOK) ? true : false;
1.98 yamt 2887:
1.100 yamt 2888: return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
1.98 yamt 2889: }
2890:
2891: static void
1.124 yamt 2892: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2893: {
2894:
1.100 yamt 2895: uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
1.66 thorpej 2896: }
2897:
2898: #ifdef POOL_SUBPAGE
2899: /* Sub-page allocator, for machines with large hardware pages. */
2900: void *
2901: pool_subpage_alloc(struct pool *pp, int flags)
2902: {
1.134 ad 2903: return pool_get(&psppool, flags);
1.66 thorpej 2904: }
2905:
2906: void
2907: pool_subpage_free(struct pool *pp, void *v)
2908: {
2909: pool_put(&psppool, v);
2910: }
2911:
2912: /* We don't provide a real nointr allocator. Maybe later. */
2913: void *
1.112 bjh21 2914: pool_subpage_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2915: {
2916:
2917: return (pool_subpage_alloc(pp, flags));
2918: }
2919:
2920: void
1.112 bjh21 2921: pool_subpage_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2922: {
2923:
2924: pool_subpage_free(pp, v);
2925: }
1.112 bjh21 2926: #endif /* POOL_SUBPAGE */
1.66 thorpej 2927: void *
1.124 yamt 2928: pool_page_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2929: {
1.127 thorpej 2930: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2931:
1.100 yamt 2932: return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
1.66 thorpej 2933: }
2934:
2935: void
1.124 yamt 2936: pool_page_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2937: {
2938:
1.98 yamt 2939: uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
1.66 thorpej 2940: }
1.138.4.1 bouyer 2941:
2942: #if defined(DDB)
2943: static bool
2944: pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2945: {
2946:
2947: return (uintptr_t)ph->ph_page <= addr &&
2948: addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2949: }
2950:
1.138.4.2! bouyer 2951: static bool
! 2952: pool_in_item(struct pool *pp, void *item, uintptr_t addr)
! 2953: {
! 2954:
! 2955: return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
! 2956: }
! 2957:
! 2958: static bool
! 2959: pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
! 2960: {
! 2961: int i;
! 2962:
! 2963: if (pcg == NULL) {
! 2964: return false;
! 2965: }
! 2966: for (i = 0; i < pcg->pcg_avail; i++) {
! 2967: if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
! 2968: return true;
! 2969: }
! 2970: }
! 2971: return false;
! 2972: }
! 2973:
! 2974: static bool
! 2975: pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
! 2976: {
! 2977:
! 2978: if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
! 2979: unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
! 2980: pool_item_bitmap_t *bitmap =
! 2981: ph->ph_bitmap + (idx / BITMAP_SIZE);
! 2982: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
! 2983:
! 2984: return (*bitmap & mask) == 0;
! 2985: } else {
! 2986: struct pool_item *pi;
! 2987:
! 2988: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
! 2989: if (pool_in_item(pp, pi, addr)) {
! 2990: return false;
! 2991: }
! 2992: }
! 2993: return true;
! 2994: }
! 2995: }
! 2996:
1.138.4.1 bouyer 2997: void
2998: pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2999: {
3000: struct pool *pp;
3001:
1.138.4.2! bouyer 3002: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.138.4.1 bouyer 3003: struct pool_item_header *ph;
3004: uintptr_t item;
1.138.4.2! bouyer 3005: bool allocated = true;
! 3006: bool incache = false;
! 3007: bool incpucache = false;
! 3008: char cpucachestr[32];
1.138.4.1 bouyer 3009:
3010: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
3011: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
3012: if (pool_in_page(pp, ph, addr)) {
3013: goto found;
3014: }
3015: }
3016: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
3017: if (pool_in_page(pp, ph, addr)) {
1.138.4.2! bouyer 3018: allocated =
! 3019: pool_allocated(pp, ph, addr);
! 3020: goto found;
! 3021: }
! 3022: }
! 3023: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
! 3024: if (pool_in_page(pp, ph, addr)) {
! 3025: allocated = false;
1.138.4.1 bouyer 3026: goto found;
3027: }
3028: }
3029: continue;
3030: } else {
3031: ph = pr_find_pagehead_noalign(pp, (void *)addr);
3032: if (ph == NULL || !pool_in_page(pp, ph, addr)) {
3033: continue;
3034: }
1.138.4.2! bouyer 3035: allocated = pool_allocated(pp, ph, addr);
1.138.4.1 bouyer 3036: }
3037: found:
1.138.4.2! bouyer 3038: if (allocated && pp->pr_cache) {
! 3039: pool_cache_t pc = pp->pr_cache;
! 3040: struct pool_cache_group *pcg;
! 3041: int i;
! 3042:
! 3043: for (pcg = pc->pc_fullgroups; pcg != NULL;
! 3044: pcg = pcg->pcg_next) {
! 3045: if (pool_in_cg(pp, pcg, addr)) {
! 3046: incache = true;
! 3047: goto print;
! 3048: }
! 3049: }
! 3050: for (i = 0; i < MAXCPUS; i++) {
! 3051: pool_cache_cpu_t *cc;
! 3052:
! 3053: if ((cc = pc->pc_cpus[i]) == NULL) {
! 3054: continue;
! 3055: }
! 3056: if (pool_in_cg(pp, cc->cc_current, addr) ||
! 3057: pool_in_cg(pp, cc->cc_previous, addr)) {
! 3058: struct cpu_info *ci =
! 3059: cpu_lookup_byindex(i);
! 3060:
! 3061: incpucache = true;
! 3062: snprintf(cpucachestr,
! 3063: sizeof(cpucachestr),
! 3064: "cached by CPU %u",
! 3065: (u_int)ci->ci_cpuid);
! 3066: goto print;
! 3067: }
! 3068: }
! 3069: }
! 3070: print:
1.138.4.1 bouyer 3071: item = (uintptr_t)ph->ph_page + ph->ph_off;
3072: item = item + rounddown(addr - item, pp->pr_size);
1.138.4.2! bouyer 3073: (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
1.138.4.1 bouyer 3074: (void *)addr, item, (size_t)(addr - item),
1.138.4.2! bouyer 3075: pp->pr_wchan,
! 3076: incpucache ? cpucachestr :
! 3077: incache ? "cached" : allocated ? "allocated" : "free");
1.138.4.1 bouyer 3078: }
3079: }
3080: #endif /* defined(DDB) */
CVSweb <webmaster@jp.NetBSD.org>