Annotation of src/sys/kern/subr_pool.c, Revision 1.101.2.12
1.101.2.12! yamt 1: /* $NetBSD: subr_pool.c,v 1.101.2.11 2008/02/27 08:36:56 yamt Exp $ */
1.1 pk 2:
3: /*-
1.101.2.6 yamt 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
1.101.2.6 yamt 9: * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.101.2.12! yamt 41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.101.2.11 2008/02/27 08:36:56 yamt Exp $");
1.24 scottr 42:
1.101.2.8 yamt 43: #include "opt_ddb.h"
1.25 thorpej 44: #include "opt_pool.h"
1.24 scottr 45: #include "opt_poollog.h"
1.28 thorpej 46: #include "opt_lockdebug.h"
1.1 pk 47:
48: #include <sys/param.h>
49: #include <sys/systm.h>
1.101.2.6 yamt 50: #include <sys/bitops.h>
1.1 pk 51: #include <sys/proc.h>
52: #include <sys/errno.h>
53: #include <sys/kernel.h>
54: #include <sys/malloc.h>
55: #include <sys/pool.h>
1.20 thorpej 56: #include <sys/syslog.h>
1.101.2.3 yamt 57: #include <sys/debug.h>
1.101.2.6 yamt 58: #include <sys/lockdebug.h>
59: #include <sys/xcall.h>
60: #include <sys/cpu.h>
1.101.2.8 yamt 61: #include <sys/atomic.h>
1.3 pk 62:
63: #include <uvm/uvm.h>
64:
1.1 pk 65: /*
66: * Pool resource management utility.
1.3 pk 67: *
1.88 chs 68: * Memory is allocated in pages which are split into pieces according to
69: * the pool item size. Each page is kept on one of three lists in the
70: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
71: * for empty, full and partially-full pages respectively. The individual
72: * pool items are on a linked list headed by `ph_itemlist' in each page
73: * header. The memory for building the page list is either taken from
74: * the allocated pages themselves (for small pool items) or taken from
75: * an internal pool of page headers (`phpool').
1.1 pk 76: */
77:
1.3 pk 78: /* List of all pools */
1.101.2.8 yamt 79: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.101.2.6 yamt 80:
1.3 pk 81: /* Private pool for page header structures */
1.97 yamt 82: #define PHPOOL_MAX 8
83: static struct pool phpool[PHPOOL_MAX];
1.101.2.6 yamt 84: #define PHPOOL_FREELIST_NELEM(idx) \
85: (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
1.3 pk 86:
1.62 bjh21 87: #ifdef POOL_SUBPAGE
88: /* Pool of subpages for use by normal pools. */
89: static struct pool psppool;
90: #endif
91:
1.101.2.1 yamt 92: static SLIST_HEAD(, pool_allocator) pa_deferinitq =
93: SLIST_HEAD_INITIALIZER(pa_deferinitq);
94:
1.98 yamt 95: static void *pool_page_alloc_meta(struct pool *, int);
96: static void pool_page_free_meta(struct pool *, void *);
97:
98: /* allocator for pool metadata */
1.101.2.6 yamt 99: struct pool_allocator pool_allocator_meta = {
1.101.2.1 yamt 100: pool_page_alloc_meta, pool_page_free_meta,
101: .pa_backingmapptr = &kmem_map,
1.98 yamt 102: };
103:
1.3 pk 104: /* # of seconds to retain page after last use */
105: int pool_inactive_time = 10;
106:
107: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 108: static struct pool *drainpp;
109:
1.101.2.6 yamt 110: /* This lock protects both pool_head and drainpp. */
111: static kmutex_t pool_head_lock;
112: static kcondvar_t pool_busy;
113:
114: typedef uint32_t pool_item_bitmap_t;
115: #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
116: #define BITMAP_MASK (BITMAP_SIZE - 1)
1.99 yamt 117:
1.3 pk 118: struct pool_item_header {
119: /* Page headers */
1.88 chs 120: LIST_ENTRY(pool_item_header)
1.3 pk 121: ph_pagelist; /* pool page list */
1.88 chs 122: SPLAY_ENTRY(pool_item_header)
123: ph_node; /* Off-page page headers */
1.101.2.4 yamt 124: void * ph_page; /* this page's address */
1.101.2.11 yamt 125: uint32_t ph_time; /* last referenced */
1.101.2.6 yamt 126: uint16_t ph_nmissing; /* # of chunks in use */
1.101.2.8 yamt 127: uint16_t ph_off; /* start offset in page */
1.97 yamt 128: union {
129: /* !PR_NOTOUCH */
130: struct {
1.101.2.1 yamt 131: LIST_HEAD(, pool_item)
1.97 yamt 132: phu_itemlist; /* chunk list for this page */
133: } phu_normal;
134: /* PR_NOTOUCH */
135: struct {
1.101.2.8 yamt 136: pool_item_bitmap_t phu_bitmap[1];
1.97 yamt 137: } phu_notouch;
138: } ph_u;
1.3 pk 139: };
1.97 yamt 140: #define ph_itemlist ph_u.phu_normal.phu_itemlist
1.101.2.6 yamt 141: #define ph_bitmap ph_u.phu_notouch.phu_bitmap
1.3 pk 142:
1.1 pk 143: struct pool_item {
1.3 pk 144: #ifdef DIAGNOSTIC
1.82 thorpej 145: u_int pi_magic;
1.33 chs 146: #endif
1.101.2.6 yamt 147: #define PI_MAGIC 0xdeaddeadU
1.3 pk 148: /* Other entries use only this list entry */
1.101.2.1 yamt 149: LIST_ENTRY(pool_item) pi_list;
1.3 pk 150: };
151:
1.53 thorpej 152: #define POOL_NEEDS_CATCHUP(pp) \
153: ((pp)->pr_nitems < (pp)->pr_minitems)
154:
1.43 thorpej 155: /*
156: * Pool cache management.
157: *
158: * Pool caches provide a way for constructed objects to be cached by the
159: * pool subsystem. This can lead to performance improvements by avoiding
160: * needless object construction/destruction; it is deferred until absolutely
161: * necessary.
162: *
1.101.2.6 yamt 163: * Caches are grouped into cache groups. Each cache group references up
164: * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
165: * object from the pool, it calls the object's constructor and places it
166: * into a cache group. When a cache group frees an object back to the
167: * pool, it first calls the object's destructor. This allows the object
168: * to persist in constructed form while freed to the cache.
169: *
170: * The pool references each cache, so that when a pool is drained by the
171: * pagedaemon, it can drain each individual cache as well. Each time a
172: * cache is drained, the most idle cache group is freed to the pool in
173: * its entirety.
1.43 thorpej 174: *
175: * Pool caches are layed on top of pools. By layering them, we can avoid
176: * the complexity of cache management for pools which would not benefit
177: * from it.
178: */
179:
1.101.2.8 yamt 180: static struct pool pcg_normal_pool;
181: static struct pool pcg_large_pool;
1.101.2.6 yamt 182: static struct pool cache_pool;
183: static struct pool cache_cpu_pool;
1.3 pk 184:
1.101.2.8 yamt 185: /* List of all caches. */
186: TAILQ_HEAD(,pool_cache) pool_cache_head =
187: TAILQ_HEAD_INITIALIZER(pool_cache_head);
188:
189: int pool_cache_disable;
190:
191:
1.101.2.6 yamt 192: static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *,
193: void *, paddr_t);
194: static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *,
195: void **, paddr_t *, int);
196: static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
197: static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
198: static void pool_cache_xcall(pool_cache_t);
1.3 pk 199:
1.42 thorpej 200: static int pool_catchup(struct pool *);
1.101.2.4 yamt 201: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 202: struct pool_item_header *);
1.88 chs 203: static void pool_update_curpage(struct pool *);
1.66 thorpej 204:
1.101.2.1 yamt 205: static int pool_grow(struct pool *, int);
206: static void *pool_allocator_alloc(struct pool *, int);
207: static void pool_allocator_free(struct pool *, void *);
1.3 pk 208:
1.97 yamt 209: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 210: void (*)(const char *, ...));
1.42 thorpej 211: static void pool_print1(struct pool *, const char *,
212: void (*)(const char *, ...));
1.3 pk 213:
1.88 chs 214: static int pool_chk_page(struct pool *, const char *,
215: struct pool_item_header *);
216:
1.3 pk 217: /*
1.52 thorpej 218: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 219: */
220: struct pool_log {
221: const char *pl_file;
222: long pl_line;
223: int pl_action;
1.25 thorpej 224: #define PRLOG_GET 1
225: #define PRLOG_PUT 2
1.3 pk 226: void *pl_addr;
1.1 pk 227: };
228:
1.86 matt 229: #ifdef POOL_DIAGNOSTIC
1.3 pk 230: /* Number of entries in pool log buffers */
1.17 thorpej 231: #ifndef POOL_LOGSIZE
232: #define POOL_LOGSIZE 10
233: #endif
234:
235: int pool_logsize = POOL_LOGSIZE;
1.1 pk 236:
1.101.2.1 yamt 237: static inline void
1.42 thorpej 238: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 239: {
240: int n = pp->pr_curlogentry;
241: struct pool_log *pl;
242:
1.20 thorpej 243: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 244: return;
245:
246: /*
247: * Fill in the current entry. Wrap around and overwrite
248: * the oldest entry if necessary.
249: */
250: pl = &pp->pr_log[n];
251: pl->pl_file = file;
252: pl->pl_line = line;
253: pl->pl_action = action;
254: pl->pl_addr = v;
255: if (++n >= pp->pr_logsize)
256: n = 0;
257: pp->pr_curlogentry = n;
258: }
259:
260: static void
1.42 thorpej 261: pr_printlog(struct pool *pp, struct pool_item *pi,
262: void (*pr)(const char *, ...))
1.3 pk 263: {
264: int i = pp->pr_logsize;
265: int n = pp->pr_curlogentry;
266:
1.20 thorpej 267: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 268: return;
269:
270: /*
271: * Print all entries in this pool's log.
272: */
273: while (i-- > 0) {
274: struct pool_log *pl = &pp->pr_log[n];
275: if (pl->pl_action != 0) {
1.25 thorpej 276: if (pi == NULL || pi == pl->pl_addr) {
277: (*pr)("\tlog entry %d:\n", i);
278: (*pr)("\t\taction = %s, addr = %p\n",
279: pl->pl_action == PRLOG_GET ? "get" : "put",
280: pl->pl_addr);
281: (*pr)("\t\tfile: %s at line %lu\n",
282: pl->pl_file, pl->pl_line);
283: }
1.3 pk 284: }
285: if (++n >= pp->pr_logsize)
286: n = 0;
287: }
288: }
1.25 thorpej 289:
1.101.2.1 yamt 290: static inline void
1.42 thorpej 291: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 292: {
293:
1.34 thorpej 294: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 295: printf("pool %s: reentrancy at file %s line %ld\n",
296: pp->pr_wchan, file, line);
297: printf(" previous entry at file %s line %ld\n",
298: pp->pr_entered_file, pp->pr_entered_line);
299: panic("pr_enter");
300: }
301:
302: pp->pr_entered_file = file;
303: pp->pr_entered_line = line;
304: }
305:
1.101.2.1 yamt 306: static inline void
1.42 thorpej 307: pr_leave(struct pool *pp)
1.25 thorpej 308: {
309:
1.34 thorpej 310: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 311: printf("pool %s not entered?\n", pp->pr_wchan);
312: panic("pr_leave");
313: }
314:
315: pp->pr_entered_file = NULL;
316: pp->pr_entered_line = 0;
317: }
318:
1.101.2.1 yamt 319: static inline void
1.42 thorpej 320: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 321: {
322:
323: if (pp->pr_entered_file != NULL)
324: (*pr)("\n\tcurrently entered from file %s line %ld\n",
325: pp->pr_entered_file, pp->pr_entered_line);
326: }
1.3 pk 327: #else
1.25 thorpej 328: #define pr_log(pp, v, action, file, line)
329: #define pr_printlog(pp, pi, pr)
330: #define pr_enter(pp, file, line)
331: #define pr_leave(pp)
332: #define pr_enter_check(pp, pr)
1.59 thorpej 333: #endif /* POOL_DIAGNOSTIC */
1.3 pk 334:
1.101.2.6 yamt 335: static inline unsigned int
1.97 yamt 336: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
337: const void *v)
338: {
339: const char *cp = v;
1.101.2.6 yamt 340: unsigned int idx;
1.97 yamt 341:
342: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.101.2.4 yamt 343: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 344: KASSERT(idx < pp->pr_itemsperpage);
345: return idx;
346: }
347:
1.101.2.1 yamt 348: static inline void
1.97 yamt 349: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
350: void *obj)
351: {
1.101.2.6 yamt 352: unsigned int idx = pr_item_notouch_index(pp, ph, obj);
353: pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
354: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
1.97 yamt 355:
1.101.2.6 yamt 356: KASSERT((*bitmap & mask) == 0);
357: *bitmap |= mask;
1.97 yamt 358: }
359:
1.101.2.1 yamt 360: static inline void *
1.97 yamt 361: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
362: {
1.101.2.6 yamt 363: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
364: unsigned int idx;
365: int i;
366:
367: for (i = 0; ; i++) {
368: int bit;
369:
370: KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
371: bit = ffs32(bitmap[i]);
372: if (bit) {
373: pool_item_bitmap_t mask;
374:
375: bit--;
376: idx = (i * BITMAP_SIZE) + bit;
377: mask = 1 << bit;
378: KASSERT((bitmap[i] & mask) != 0);
379: bitmap[i] &= ~mask;
380: break;
381: }
382: }
383: KASSERT(idx < pp->pr_itemsperpage);
384: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
385: }
1.97 yamt 386:
1.101.2.6 yamt 387: static inline void
388: pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
389: {
390: pool_item_bitmap_t *bitmap = ph->ph_bitmap;
391: const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
392: int i;
1.97 yamt 393:
1.101.2.6 yamt 394: for (i = 0; i < n; i++) {
395: bitmap[i] = (pool_item_bitmap_t)-1;
396: }
1.97 yamt 397: }
398:
1.101.2.1 yamt 399: static inline int
1.88 chs 400: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
401: {
1.101.2.2 yamt 402:
403: /*
404: * we consider pool_item_header with smaller ph_page bigger.
405: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
406: */
407:
1.88 chs 408: if (a->ph_page < b->ph_page)
409: return (1);
1.101.2.2 yamt 410: else if (a->ph_page > b->ph_page)
411: return (-1);
1.88 chs 412: else
413: return (0);
414: }
415:
416: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
417: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
418:
1.101.2.8 yamt 419: static inline struct pool_item_header *
420: pr_find_pagehead_noalign(struct pool *pp, void *v)
421: {
422: struct pool_item_header *ph, tmp;
423:
424: tmp.ph_page = (void *)(uintptr_t)v;
425: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
426: if (ph == NULL) {
427: ph = SPLAY_ROOT(&pp->pr_phtree);
428: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
429: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
430: }
431: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
432: }
433:
434: return ph;
435: }
436:
1.3 pk 437: /*
1.101.2.2 yamt 438: * Return the pool page header based on item address.
1.3 pk 439: */
1.101.2.1 yamt 440: static inline struct pool_item_header *
1.101.2.2 yamt 441: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 442: {
1.88 chs 443: struct pool_item_header *ph, tmp;
1.3 pk 444:
1.101.2.2 yamt 445: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.101.2.8 yamt 446: ph = pr_find_pagehead_noalign(pp, v);
1.101.2.2 yamt 447: } else {
1.101.2.4 yamt 448: void *page =
449: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.101.2.2 yamt 450:
451: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.101.2.4 yamt 452: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.101.2.2 yamt 453: } else {
454: tmp.ph_page = page;
455: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
456: }
457: }
1.3 pk 458:
1.101.2.2 yamt 459: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.101.2.4 yamt 460: ((char *)ph->ph_page <= (char *)v &&
461: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 462: return ph;
1.3 pk 463: }
464:
1.101 thorpej 465: static void
466: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
467: {
468: struct pool_item_header *ph;
469:
470: while ((ph = LIST_FIRST(pq)) != NULL) {
471: LIST_REMOVE(ph, ph_pagelist);
472: pool_allocator_free(pp, ph->ph_page);
1.101.2.6 yamt 473: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 474: pool_put(pp->pr_phpool, ph);
475: }
476: }
477:
1.3 pk 478: /*
479: * Remove a page from the pool.
480: */
1.101.2.1 yamt 481: static inline void
1.61 chs 482: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
483: struct pool_pagelist *pq)
1.3 pk 484: {
485:
1.101.2.6 yamt 486: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 487:
1.3 pk 488: /*
1.7 thorpej 489: * If the page was idle, decrement the idle page count.
1.3 pk 490: */
1.6 thorpej 491: if (ph->ph_nmissing == 0) {
492: #ifdef DIAGNOSTIC
493: if (pp->pr_nidle == 0)
494: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 495: if (pp->pr_nitems < pp->pr_itemsperpage)
496: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 497: #endif
498: pp->pr_nidle--;
499: }
1.7 thorpej 500:
1.20 thorpej 501: pp->pr_nitems -= pp->pr_itemsperpage;
502:
1.7 thorpej 503: /*
1.101 thorpej 504: * Unlink the page from the pool and queue it for release.
1.7 thorpej 505: */
1.88 chs 506: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 507: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
508: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 509: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
510:
1.7 thorpej 511: pp->pr_npages--;
512: pp->pr_npagefree++;
1.6 thorpej 513:
1.88 chs 514: pool_update_curpage(pp);
1.3 pk 515: }
516:
1.101.2.3 yamt 517: static bool
1.101.2.1 yamt 518: pa_starved_p(struct pool_allocator *pa)
519: {
520:
521: if (pa->pa_backingmap != NULL) {
522: return vm_map_starved_p(pa->pa_backingmap);
523: }
1.101.2.3 yamt 524: return false;
1.101.2.1 yamt 525: }
526:
527: static int
528: pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
529: {
530: struct pool *pp = obj;
531: struct pool_allocator *pa = pp->pr_alloc;
532:
533: KASSERT(&pp->pr_reclaimerentry == ce);
534: pool_reclaim(pp);
535: if (!pa_starved_p(pa)) {
536: return CALLBACK_CHAIN_ABORT;
537: }
538: return CALLBACK_CHAIN_CONTINUE;
539: }
540:
541: static void
542: pool_reclaim_register(struct pool *pp)
543: {
544: struct vm_map *map = pp->pr_alloc->pa_backingmap;
545: int s;
546:
547: if (map == NULL) {
548: return;
549: }
550:
551: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
552: callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
553: &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
554: splx(s);
555: }
556:
557: static void
558: pool_reclaim_unregister(struct pool *pp)
559: {
560: struct vm_map *map = pp->pr_alloc->pa_backingmap;
561: int s;
562:
563: if (map == NULL) {
564: return;
565: }
566:
567: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
568: callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
569: &pp->pr_reclaimerentry);
570: splx(s);
571: }
572:
573: static void
574: pa_reclaim_register(struct pool_allocator *pa)
575: {
576: struct vm_map *map = *pa->pa_backingmapptr;
577: struct pool *pp;
578:
579: KASSERT(pa->pa_backingmap == NULL);
580: if (map == NULL) {
581: SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
582: return;
583: }
584: pa->pa_backingmap = map;
585: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
586: pool_reclaim_register(pp);
587: }
588: }
589:
1.3 pk 590: /*
1.94 simonb 591: * Initialize all the pools listed in the "pools" link set.
592: */
593: void
1.101.2.1 yamt 594: pool_subsystem_init(void)
1.94 simonb 595: {
1.101.2.1 yamt 596: struct pool_allocator *pa;
1.94 simonb 597: __link_set_decl(pools, struct link_pool_init);
598: struct link_pool_init * const *pi;
599:
1.101.2.6 yamt 600: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
601: cv_init(&pool_busy, "poolbusy");
602:
1.94 simonb 603: __link_set_foreach(pi, pools)
604: pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
605: (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
1.101.2.4 yamt 606: (*pi)->palloc, (*pi)->ipl);
1.101.2.1 yamt 607:
608: while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
609: KASSERT(pa->pa_backingmapptr != NULL);
610: KASSERT(*pa->pa_backingmapptr != NULL);
611: SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
612: pa_reclaim_register(pa);
613: }
1.101.2.6 yamt 614:
615: pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE,
616: 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);
617:
618: pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE,
619: 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
1.94 simonb 620: }
621:
622: /*
1.3 pk 623: * Initialize the given pool resource structure.
624: *
625: * We export this routine to allow other kernel parts to declare
626: * static pools that must be initialized before malloc() is available.
627: */
628: void
1.42 thorpej 629: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.101.2.4 yamt 630: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 631: {
1.101.2.1 yamt 632: struct pool *pp1;
1.92 enami 633: size_t trysize, phsize;
1.101.2.6 yamt 634: int off, slack;
1.99 yamt 635:
1.101.2.1 yamt 636: #ifdef DEBUG
637: /*
638: * Check that the pool hasn't already been initialised and
639: * added to the list of all pools.
640: */
1.101.2.8 yamt 641: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
1.101.2.1 yamt 642: if (pp == pp1)
643: panic("pool_init: pool %s already initialised",
644: wchan);
645: }
646: #endif
647:
1.25 thorpej 648: #ifdef POOL_DIAGNOSTIC
649: /*
650: * Always log if POOL_DIAGNOSTIC is defined.
651: */
652: if (pool_logsize != 0)
653: flags |= PR_LOGGING;
654: #endif
655:
1.66 thorpej 656: if (palloc == NULL)
657: palloc = &pool_allocator_kmem;
1.101.2.1 yamt 658: #ifdef POOL_SUBPAGE
659: if (size > palloc->pa_pagesz) {
660: if (palloc == &pool_allocator_kmem)
661: palloc = &pool_allocator_kmem_fullpage;
662: else if (palloc == &pool_allocator_nointr)
663: palloc = &pool_allocator_nointr_fullpage;
664: }
1.66 thorpej 665: #endif /* POOL_SUBPAGE */
666: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
1.101.2.1 yamt 667: if (palloc->pa_pagesz == 0)
1.66 thorpej 668: palloc->pa_pagesz = PAGE_SIZE;
669:
670: TAILQ_INIT(&palloc->pa_list);
671:
1.101.2.6 yamt 672: mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
1.66 thorpej 673: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
674: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.101.2.1 yamt 675:
676: if (palloc->pa_backingmapptr != NULL) {
677: pa_reclaim_register(palloc);
678: }
1.66 thorpej 679: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 680: }
1.3 pk 681:
682: if (align == 0)
683: align = ALIGN(1);
1.14 thorpej 684:
1.101.2.2 yamt 685: if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
1.14 thorpej 686: size = sizeof(struct pool_item);
1.3 pk 687:
1.78 thorpej 688: size = roundup(size, align);
1.66 thorpej 689: #ifdef DIAGNOSTIC
690: if (size > palloc->pa_pagesz)
1.101.2.2 yamt 691: panic("pool_init: pool item size (%zu) too large", size);
1.66 thorpej 692: #endif
1.35 pk 693:
1.3 pk 694: /*
695: * Initialize the pool structure.
696: */
1.88 chs 697: LIST_INIT(&pp->pr_emptypages);
698: LIST_INIT(&pp->pr_fullpages);
699: LIST_INIT(&pp->pr_partpages);
1.101.2.6 yamt 700: pp->pr_cache = NULL;
1.3 pk 701: pp->pr_curpage = NULL;
702: pp->pr_npages = 0;
703: pp->pr_minitems = 0;
704: pp->pr_minpages = 0;
705: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 706: pp->pr_roflags = flags;
707: pp->pr_flags = 0;
1.35 pk 708: pp->pr_size = size;
1.3 pk 709: pp->pr_align = align;
710: pp->pr_wchan = wchan;
1.66 thorpej 711: pp->pr_alloc = palloc;
1.20 thorpej 712: pp->pr_nitems = 0;
713: pp->pr_nout = 0;
714: pp->pr_hardlimit = UINT_MAX;
715: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 716: pp->pr_hardlimit_ratecap.tv_sec = 0;
717: pp->pr_hardlimit_ratecap.tv_usec = 0;
718: pp->pr_hardlimit_warning_last.tv_sec = 0;
719: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 720: pp->pr_drain_hook = NULL;
721: pp->pr_drain_hook_arg = NULL;
1.101.2.3 yamt 722: pp->pr_freecheck = NULL;
1.3 pk 723:
724: /*
725: * Decide whether to put the page header off page to avoid
1.92 enami 726: * wasting too large a part of the page or too big item.
727: * Off-page page headers go on a hash table, so we can match
728: * a returned item with its header based on the page address.
729: * We use 1/16 of the page size and about 8 times of the item
730: * size as the threshold (XXX: tune)
731: *
732: * However, we'll put the header into the page if we can put
733: * it without wasting any items.
734: *
735: * Silently enforce `0 <= ioff < align'.
1.3 pk 736: */
1.92 enami 737: pp->pr_itemoffset = ioff %= align;
738: /* See the comment below about reserved bytes. */
739: trysize = palloc->pa_pagesz - ((align - ioff) % align);
740: phsize = ALIGN(sizeof(struct pool_item_header));
1.101.2.2 yamt 741: if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 742: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
743: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 744: /* Use the end of the page for the page header */
1.20 thorpej 745: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 746: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 747: } else {
1.3 pk 748: /* The page header will be taken from our page header pool */
749: pp->pr_phoffset = 0;
1.66 thorpej 750: off = palloc->pa_pagesz;
1.88 chs 751: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 752: }
1.1 pk 753:
1.3 pk 754: /*
755: * Alignment is to take place at `ioff' within the item. This means
756: * we must reserve up to `align - 1' bytes on the page to allow
757: * appropriate positioning of each item.
758: */
759: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 760: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 761: if ((pp->pr_roflags & PR_NOTOUCH)) {
762: int idx;
763:
764: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
765: idx++) {
766: /* nothing */
767: }
768: if (idx >= PHPOOL_MAX) {
769: /*
770: * if you see this panic, consider to tweak
771: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
772: */
773: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
774: pp->pr_wchan, pp->pr_itemsperpage);
775: }
776: pp->pr_phpool = &phpool[idx];
777: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
778: pp->pr_phpool = &phpool[0];
779: }
780: #if defined(DIAGNOSTIC)
781: else {
782: pp->pr_phpool = NULL;
783: }
784: #endif
1.3 pk 785:
786: /*
787: * Use the slack between the chunks and the page header
788: * for "cache coloring".
789: */
790: slack = off - pp->pr_itemsperpage * pp->pr_size;
791: pp->pr_maxcolor = (slack / align) * align;
792: pp->pr_curcolor = 0;
793:
794: pp->pr_nget = 0;
795: pp->pr_nfail = 0;
796: pp->pr_nput = 0;
797: pp->pr_npagealloc = 0;
798: pp->pr_npagefree = 0;
1.1 pk 799: pp->pr_hiwat = 0;
1.8 thorpej 800: pp->pr_nidle = 0;
1.101.2.6 yamt 801: pp->pr_refcnt = 0;
1.3 pk 802:
1.59 thorpej 803: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 804: if (flags & PR_LOGGING) {
805: if (kmem_map == NULL ||
806: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
807: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 808: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 809: pp->pr_curlogentry = 0;
810: pp->pr_logsize = pool_logsize;
811: }
1.59 thorpej 812: #endif
1.25 thorpej 813:
814: pp->pr_entered_file = NULL;
815: pp->pr_entered_line = 0;
1.3 pk 816:
1.101.2.7 yamt 817: /*
818: * XXXAD hack to prevent IP input processing from blocking.
819: */
820: if (ipl == IPL_SOFTNET) {
821: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, IPL_VM);
822: } else {
823: mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
824: }
1.101.2.6 yamt 825: cv_init(&pp->pr_cv, wchan);
826: pp->pr_ipl = ipl;
1.1 pk 827:
1.3 pk 828: /*
1.43 thorpej 829: * Initialize private page header pool and cache magazine pool if we
830: * haven't done so yet.
1.23 thorpej 831: * XXX LOCKING.
1.3 pk 832: */
1.97 yamt 833: if (phpool[0].pr_size == 0) {
834: int idx;
835: for (idx = 0; idx < PHPOOL_MAX; idx++) {
836: static char phpool_names[PHPOOL_MAX][6+1+6+1];
837: int nelem;
838: size_t sz;
839:
840: nelem = PHPOOL_FREELIST_NELEM(idx);
841: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
842: "phpool-%d", nelem);
843: sz = sizeof(struct pool_item_header);
844: if (nelem) {
1.101.2.6 yamt 845: sz = offsetof(struct pool_item_header,
846: ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
1.97 yamt 847: }
848: pool_init(&phpool[idx], sz, 0, 0, 0,
1.101.2.4 yamt 849: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.97 yamt 850: }
1.62 bjh21 851: #ifdef POOL_SUBPAGE
852: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.101.2.4 yamt 853: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
1.62 bjh21 854: #endif
1.101.2.8 yamt 855:
856: size = sizeof(pcg_t) +
857: (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
858: pool_init(&pcg_normal_pool, size, CACHE_LINE_SIZE, 0, 0,
859: "pcgnormal", &pool_allocator_meta, IPL_VM);
860:
861: size = sizeof(pcg_t) +
862: (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
863: pool_init(&pcg_large_pool, size, CACHE_LINE_SIZE, 0, 0,
864: "pcglarge", &pool_allocator_meta, IPL_VM);
1.1 pk 865: }
866:
1.101.2.8 yamt 867: /* Insert into the list of all pools. */
868: if (__predict_true(!cold))
1.101.2.6 yamt 869: mutex_enter(&pool_head_lock);
1.101.2.8 yamt 870: TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
871: if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
872: break;
873: }
874: if (pp1 == NULL)
875: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
876: else
877: TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
878: if (__predict_true(!cold))
1.101.2.6 yamt 879: mutex_exit(&pool_head_lock);
880:
881: /* Insert this into the list of pools using this allocator. */
1.101.2.8 yamt 882: if (__predict_true(!cold))
1.101.2.6 yamt 883: mutex_enter(&palloc->pa_lock);
1.101.2.8 yamt 884: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
885: if (__predict_true(!cold))
1.101.2.6 yamt 886: mutex_exit(&palloc->pa_lock);
1.66 thorpej 887:
1.101.2.1 yamt 888: pool_reclaim_register(pp);
1.1 pk 889: }
890:
891: /*
892: * De-commision a pool resource.
893: */
894: void
1.42 thorpej 895: pool_destroy(struct pool *pp)
1.1 pk 896: {
1.101 thorpej 897: struct pool_pagelist pq;
1.3 pk 898: struct pool_item_header *ph;
1.43 thorpej 899:
1.101 thorpej 900: /* Remove from global pool list */
1.101.2.6 yamt 901: mutex_enter(&pool_head_lock);
902: while (pp->pr_refcnt != 0)
903: cv_wait(&pool_busy, &pool_head_lock);
1.101.2.8 yamt 904: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.101 thorpej 905: if (drainpp == pp)
906: drainpp = NULL;
1.101.2.6 yamt 907: mutex_exit(&pool_head_lock);
1.101 thorpej 908:
909: /* Remove this pool from its allocator's list of pools. */
1.101.2.1 yamt 910: pool_reclaim_unregister(pp);
1.101.2.6 yamt 911: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 912: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.101.2.6 yamt 913: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 914:
1.101.2.6 yamt 915: mutex_enter(&pp->pr_lock);
1.101 thorpej 916:
1.101.2.6 yamt 917: KASSERT(pp->pr_cache == NULL);
1.3 pk 918:
919: #ifdef DIAGNOSTIC
1.20 thorpej 920: if (pp->pr_nout != 0) {
1.25 thorpej 921: pr_printlog(pp, NULL, printf);
1.80 provos 922: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 923: pp->pr_nout);
1.3 pk 924: }
925: #endif
1.1 pk 926:
1.101 thorpej 927: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
928: KASSERT(LIST_EMPTY(&pp->pr_partpages));
929:
1.3 pk 930: /* Remove all pages */
1.101 thorpej 931: LIST_INIT(&pq);
1.88 chs 932: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 933: pr_rmpage(pp, ph, &pq);
934:
1.101.2.6 yamt 935: mutex_exit(&pp->pr_lock);
1.3 pk 936:
1.101 thorpej 937: pr_pagelist_free(pp, &pq);
1.3 pk 938:
1.59 thorpej 939: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 940: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 941: free(pp->pr_log, M_TEMP);
1.59 thorpej 942: #endif
1.101.2.6 yamt 943:
944: cv_destroy(&pp->pr_cv);
945: mutex_destroy(&pp->pr_lock);
1.1 pk 946: }
947:
1.68 thorpej 948: void
949: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
950: {
951:
952: /* XXX no locking -- must be used just after pool_init() */
953: #ifdef DIAGNOSTIC
954: if (pp->pr_drain_hook != NULL)
955: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
956: #endif
957: pp->pr_drain_hook = fn;
958: pp->pr_drain_hook_arg = arg;
959: }
960:
1.88 chs 961: static struct pool_item_header *
1.101.2.4 yamt 962: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 963: {
964: struct pool_item_header *ph;
965:
966: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.101.2.4 yamt 967: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.101.2.6 yamt 968: else
1.97 yamt 969: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 970:
971: return (ph);
972: }
1.1 pk 973:
974: /*
1.101.2.6 yamt 975: * Grab an item from the pool.
1.1 pk 976: */
1.3 pk 977: void *
1.59 thorpej 978: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 979: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 980: #else
981: pool_get(struct pool *pp, int flags)
982: #endif
1.1 pk 983: {
984: struct pool_item *pi;
1.3 pk 985: struct pool_item_header *ph;
1.55 thorpej 986: void *v;
1.1 pk 987:
1.2 pk 988: #ifdef DIAGNOSTIC
1.95 atatat 989: if (__predict_false(pp->pr_itemsperpage == 0))
990: panic("pool_get: pool %p: pr_itemsperpage is zero, "
991: "pool not initialized?", pp);
1.84 thorpej 992: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 993: (flags & PR_WAITOK) != 0))
1.77 matt 994: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 995:
1.101.2.1 yamt 996: #endif /* DIAGNOSTIC */
1.58 thorpej 997: #ifdef LOCKDEBUG
998: if (flags & PR_WAITOK)
1.101.2.12! yamt 999: ASSERT_SLEEPABLE();
1.56 sommerfe 1000: #endif
1.1 pk 1001:
1.101.2.6 yamt 1002: mutex_enter(&pp->pr_lock);
1.25 thorpej 1003: pr_enter(pp, file, line);
1.20 thorpej 1004:
1005: startover:
1006: /*
1007: * Check to see if we've reached the hard limit. If we have,
1008: * and we can wait, then wait until an item has been returned to
1009: * the pool.
1010: */
1011: #ifdef DIAGNOSTIC
1.34 thorpej 1012: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 1013: pr_leave(pp);
1.101.2.6 yamt 1014: mutex_exit(&pp->pr_lock);
1.20 thorpej 1015: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
1016: }
1017: #endif
1.34 thorpej 1018: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 1019: if (pp->pr_drain_hook != NULL) {
1020: /*
1021: * Since the drain hook is going to free things
1022: * back to the pool, unlock, call the hook, re-lock,
1023: * and check the hardlimit condition again.
1024: */
1025: pr_leave(pp);
1.101.2.6 yamt 1026: mutex_exit(&pp->pr_lock);
1.68 thorpej 1027: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.101.2.6 yamt 1028: mutex_enter(&pp->pr_lock);
1.68 thorpej 1029: pr_enter(pp, file, line);
1030: if (pp->pr_nout < pp->pr_hardlimit)
1031: goto startover;
1032: }
1033:
1.29 sommerfe 1034: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 1035: /*
1036: * XXX: A warning isn't logged in this case. Should
1037: * it be?
1038: */
1039: pp->pr_flags |= PR_WANTED;
1.25 thorpej 1040: pr_leave(pp);
1.101.2.6 yamt 1041: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.25 thorpej 1042: pr_enter(pp, file, line);
1.20 thorpej 1043: goto startover;
1044: }
1.31 thorpej 1045:
1046: /*
1047: * Log a message that the hard limit has been hit.
1048: */
1049: if (pp->pr_hardlimit_warning != NULL &&
1050: ratecheck(&pp->pr_hardlimit_warning_last,
1051: &pp->pr_hardlimit_ratecap))
1052: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 1053:
1054: pp->pr_nfail++;
1055:
1.25 thorpej 1056: pr_leave(pp);
1.101.2.6 yamt 1057: mutex_exit(&pp->pr_lock);
1.20 thorpej 1058: return (NULL);
1059: }
1060:
1.3 pk 1061: /*
1062: * The convention we use is that if `curpage' is not NULL, then
1063: * it points at a non-empty bucket. In particular, `curpage'
1064: * never points at a page header which has PR_PHINPAGE set and
1065: * has no items in its bucket.
1066: */
1.20 thorpej 1067: if ((ph = pp->pr_curpage) == NULL) {
1.101.2.1 yamt 1068: int error;
1069:
1.20 thorpej 1070: #ifdef DIAGNOSTIC
1071: if (pp->pr_nitems != 0) {
1.101.2.6 yamt 1072: mutex_exit(&pp->pr_lock);
1.20 thorpej 1073: printf("pool_get: %s: curpage NULL, nitems %u\n",
1074: pp->pr_wchan, pp->pr_nitems);
1.80 provos 1075: panic("pool_get: nitems inconsistent");
1.20 thorpej 1076: }
1077: #endif
1078:
1.21 thorpej 1079: /*
1080: * Call the back-end page allocator for more memory.
1081: * Release the pool lock, as the back-end page allocator
1082: * may block.
1083: */
1.25 thorpej 1084: pr_leave(pp);
1.101.2.1 yamt 1085: error = pool_grow(pp, flags);
1086: pr_enter(pp, file, line);
1087: if (error != 0) {
1.21 thorpej 1088: /*
1.55 thorpej 1089: * We were unable to allocate a page or item
1090: * header, but we released the lock during
1091: * allocation, so perhaps items were freed
1092: * back to the pool. Check for this case.
1.21 thorpej 1093: */
1094: if (pp->pr_curpage != NULL)
1095: goto startover;
1.15 pk 1096:
1.101.2.1 yamt 1097: pp->pr_nfail++;
1.25 thorpej 1098: pr_leave(pp);
1.101.2.6 yamt 1099: mutex_exit(&pp->pr_lock);
1.101.2.1 yamt 1100: return (NULL);
1.1 pk 1101: }
1.3 pk 1102:
1.20 thorpej 1103: /* Start the allocation process over. */
1104: goto startover;
1.3 pk 1105: }
1.97 yamt 1106: if (pp->pr_roflags & PR_NOTOUCH) {
1107: #ifdef DIAGNOSTIC
1108: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1109: pr_leave(pp);
1.101.2.6 yamt 1110: mutex_exit(&pp->pr_lock);
1.97 yamt 1111: panic("pool_get: %s: page empty", pp->pr_wchan);
1112: }
1113: #endif
1114: v = pr_item_notouch_get(pp, ph);
1115: #ifdef POOL_DIAGNOSTIC
1116: pr_log(pp, v, PRLOG_GET, file, line);
1117: #endif
1118: } else {
1.101.2.1 yamt 1119: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 1120: if (__predict_false(v == NULL)) {
1121: pr_leave(pp);
1.101.2.6 yamt 1122: mutex_exit(&pp->pr_lock);
1.97 yamt 1123: panic("pool_get: %s: page empty", pp->pr_wchan);
1124: }
1.20 thorpej 1125: #ifdef DIAGNOSTIC
1.97 yamt 1126: if (__predict_false(pp->pr_nitems == 0)) {
1127: pr_leave(pp);
1.101.2.6 yamt 1128: mutex_exit(&pp->pr_lock);
1.97 yamt 1129: printf("pool_get: %s: items on itemlist, nitems %u\n",
1130: pp->pr_wchan, pp->pr_nitems);
1131: panic("pool_get: nitems inconsistent");
1132: }
1.65 enami 1133: #endif
1.56 sommerfe 1134:
1.65 enami 1135: #ifdef POOL_DIAGNOSTIC
1.97 yamt 1136: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 1137: #endif
1.3 pk 1138:
1.65 enami 1139: #ifdef DIAGNOSTIC
1.97 yamt 1140: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1141: pr_printlog(pp, pi, printf);
1142: panic("pool_get(%s): free list modified: "
1143: "magic=%x; page %p; item addr %p\n",
1144: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1145: }
1.3 pk 1146: #endif
1147:
1.97 yamt 1148: /*
1149: * Remove from item list.
1150: */
1.101.2.1 yamt 1151: LIST_REMOVE(pi, pi_list);
1.97 yamt 1152: }
1.20 thorpej 1153: pp->pr_nitems--;
1154: pp->pr_nout++;
1.6 thorpej 1155: if (ph->ph_nmissing == 0) {
1156: #ifdef DIAGNOSTIC
1.34 thorpej 1157: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 1158: panic("pool_get: nidle inconsistent");
1159: #endif
1160: pp->pr_nidle--;
1.88 chs 1161:
1162: /*
1163: * This page was previously empty. Move it to the list of
1164: * partially-full pages. This page is already curpage.
1165: */
1166: LIST_REMOVE(ph, ph_pagelist);
1167: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 1168: }
1.3 pk 1169: ph->ph_nmissing++;
1.97 yamt 1170: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 1171: #ifdef DIAGNOSTIC
1.97 yamt 1172: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.101.2.1 yamt 1173: !LIST_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 1174: pr_leave(pp);
1.101.2.6 yamt 1175: mutex_exit(&pp->pr_lock);
1.21 thorpej 1176: panic("pool_get: %s: nmissing inconsistent",
1177: pp->pr_wchan);
1178: }
1179: #endif
1.3 pk 1180: /*
1.88 chs 1181: * This page is now full. Move it to the full list
1182: * and select a new current page.
1.3 pk 1183: */
1.88 chs 1184: LIST_REMOVE(ph, ph_pagelist);
1185: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1186: pool_update_curpage(pp);
1.1 pk 1187: }
1.3 pk 1188:
1189: pp->pr_nget++;
1.101.2.1 yamt 1190: pr_leave(pp);
1.20 thorpej 1191:
1192: /*
1193: * If we have a low water mark and we are now below that low
1194: * water mark, add more items to the pool.
1195: */
1.53 thorpej 1196: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1197: /*
1198: * XXX: Should we log a warning? Should we set up a timeout
1199: * to try again in a second or so? The latter could break
1200: * a caller's assumptions about interrupt protection, etc.
1201: */
1202: }
1203:
1.101.2.6 yamt 1204: mutex_exit(&pp->pr_lock);
1.101.2.3 yamt 1205: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
1206: FREECHECK_OUT(&pp->pr_freecheck, v);
1.1 pk 1207: return (v);
1208: }
1209:
1210: /*
1.43 thorpej 1211: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1212: */
1.43 thorpej 1213: static void
1.101 thorpej 1214: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 1215: {
1216: struct pool_item *pi = v;
1.3 pk 1217: struct pool_item_header *ph;
1218:
1.101.2.6 yamt 1219: KASSERT(mutex_owned(&pp->pr_lock));
1.101.2.3 yamt 1220: FREECHECK_IN(&pp->pr_freecheck, v);
1.101.2.6 yamt 1221: LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1.61 chs 1222:
1.30 thorpej 1223: #ifdef DIAGNOSTIC
1.34 thorpej 1224: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1225: printf("pool %s: putting with none out\n",
1226: pp->pr_wchan);
1227: panic("pool_put");
1228: }
1229: #endif
1.3 pk 1230:
1.101.2.2 yamt 1231: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.25 thorpej 1232: pr_printlog(pp, NULL, printf);
1.3 pk 1233: panic("pool_put: %s: page header missing", pp->pr_wchan);
1234: }
1.28 thorpej 1235:
1.3 pk 1236: /*
1237: * Return to item list.
1238: */
1.97 yamt 1239: if (pp->pr_roflags & PR_NOTOUCH) {
1240: pr_item_notouch_put(pp, ph, v);
1241: } else {
1.2 pk 1242: #ifdef DIAGNOSTIC
1.97 yamt 1243: pi->pi_magic = PI_MAGIC;
1.3 pk 1244: #endif
1.32 chs 1245: #ifdef DEBUG
1.97 yamt 1246: {
1247: int i, *ip = v;
1.32 chs 1248:
1.97 yamt 1249: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1250: *ip++ = PI_MAGIC;
1251: }
1.32 chs 1252: }
1253: #endif
1254:
1.101.2.1 yamt 1255: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 1256: }
1.79 thorpej 1257: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1258: ph->ph_nmissing--;
1259: pp->pr_nput++;
1.20 thorpej 1260: pp->pr_nitems++;
1261: pp->pr_nout--;
1.3 pk 1262:
1263: /* Cancel "pool empty" condition if it exists */
1264: if (pp->pr_curpage == NULL)
1265: pp->pr_curpage = ph;
1266:
1267: if (pp->pr_flags & PR_WANTED) {
1268: pp->pr_flags &= ~PR_WANTED;
1.15 pk 1269: if (ph->ph_nmissing == 0)
1270: pp->pr_nidle++;
1.101.2.6 yamt 1271: cv_broadcast(&pp->pr_cv);
1.3 pk 1272: return;
1273: }
1274:
1275: /*
1.88 chs 1276: * If this page is now empty, do one of two things:
1.21 thorpej 1277: *
1.88 chs 1278: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1279: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1280: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1281: * CLAIM.
1.21 thorpej 1282: *
1.88 chs 1283: * (2) Otherwise, move the page to the empty page list.
1284: *
1285: * Either way, select a new current page (so we use a partially-full
1286: * page if one is available).
1.3 pk 1287: */
1288: if (ph->ph_nmissing == 0) {
1.6 thorpej 1289: pp->pr_nidle++;
1.90 thorpej 1290: if (pp->pr_npages > pp->pr_minpages &&
1.101.2.12! yamt 1291: pp->pr_npages > pp->pr_maxpages) {
1.101 thorpej 1292: pr_rmpage(pp, ph, pq);
1.3 pk 1293: } else {
1.88 chs 1294: LIST_REMOVE(ph, ph_pagelist);
1295: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1296:
1.21 thorpej 1297: /*
1298: * Update the timestamp on the page. A page must
1299: * be idle for some period of time before it can
1300: * be reclaimed by the pagedaemon. This minimizes
1301: * ping-pong'ing for memory.
1.101.2.11 yamt 1302: *
1303: * note for 64-bit time_t: truncating to 32-bit is not
1304: * a problem for our usage.
1.21 thorpej 1305: */
1.101.2.11 yamt 1306: ph->ph_time = time_uptime;
1.1 pk 1307: }
1.88 chs 1308: pool_update_curpage(pp);
1.1 pk 1309: }
1.88 chs 1310:
1.21 thorpej 1311: /*
1.88 chs 1312: * If the page was previously completely full, move it to the
1313: * partially-full list and make it the current page. The next
1314: * allocation will get the item from this page, instead of
1315: * further fragmenting the pool.
1.21 thorpej 1316: */
1317: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1318: LIST_REMOVE(ph, ph_pagelist);
1319: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1320: pp->pr_curpage = ph;
1321: }
1.43 thorpej 1322: }
1323:
1324: /*
1.101.2.6 yamt 1325: * Return resource to the pool.
1.43 thorpej 1326: */
1.59 thorpej 1327: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1328: void
1329: _pool_put(struct pool *pp, void *v, const char *file, long line)
1330: {
1.101 thorpej 1331: struct pool_pagelist pq;
1332:
1333: LIST_INIT(&pq);
1.43 thorpej 1334:
1.101.2.6 yamt 1335: mutex_enter(&pp->pr_lock);
1.43 thorpej 1336: pr_enter(pp, file, line);
1337:
1.56 sommerfe 1338: pr_log(pp, v, PRLOG_PUT, file, line);
1339:
1.101 thorpej 1340: pool_do_put(pp, v, &pq);
1.21 thorpej 1341:
1.25 thorpej 1342: pr_leave(pp);
1.101.2.6 yamt 1343: mutex_exit(&pp->pr_lock);
1.101 thorpej 1344:
1.101.2.1 yamt 1345: pr_pagelist_free(pp, &pq);
1.1 pk 1346: }
1.57 sommerfe 1347: #undef pool_put
1.59 thorpej 1348: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1349:
1.56 sommerfe 1350: void
1351: pool_put(struct pool *pp, void *v)
1352: {
1.101 thorpej 1353: struct pool_pagelist pq;
1354:
1355: LIST_INIT(&pq);
1.56 sommerfe 1356:
1.101.2.6 yamt 1357: mutex_enter(&pp->pr_lock);
1.101 thorpej 1358: pool_do_put(pp, v, &pq);
1.101.2.6 yamt 1359: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1360:
1.101.2.1 yamt 1361: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1362: }
1.57 sommerfe 1363:
1.59 thorpej 1364: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1365: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1366: #endif
1.74 thorpej 1367:
1368: /*
1.101.2.1 yamt 1369: * pool_grow: grow a pool by a page.
1370: *
1371: * => called with pool locked.
1372: * => unlock and relock the pool.
1373: * => return with pool locked.
1374: */
1375:
1376: static int
1377: pool_grow(struct pool *pp, int flags)
1378: {
1379: struct pool_item_header *ph = NULL;
1380: char *cp;
1381:
1.101.2.6 yamt 1382: mutex_exit(&pp->pr_lock);
1.101.2.1 yamt 1383: cp = pool_allocator_alloc(pp, flags);
1384: if (__predict_true(cp != NULL)) {
1385: ph = pool_alloc_item_header(pp, cp, flags);
1386: }
1387: if (__predict_false(cp == NULL || ph == NULL)) {
1388: if (cp != NULL) {
1389: pool_allocator_free(pp, cp);
1390: }
1.101.2.6 yamt 1391: mutex_enter(&pp->pr_lock);
1.101.2.1 yamt 1392: return ENOMEM;
1393: }
1394:
1.101.2.6 yamt 1395: mutex_enter(&pp->pr_lock);
1.101.2.1 yamt 1396: pool_prime_page(pp, cp, ph);
1397: pp->pr_npagealloc++;
1398: return 0;
1399: }
1400:
1401: /*
1.74 thorpej 1402: * Add N items to the pool.
1403: */
1404: int
1405: pool_prime(struct pool *pp, int n)
1406: {
1.75 simonb 1407: int newpages;
1.101.2.1 yamt 1408: int error = 0;
1.74 thorpej 1409:
1.101.2.6 yamt 1410: mutex_enter(&pp->pr_lock);
1.74 thorpej 1411:
1412: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1413:
1414: while (newpages-- > 0) {
1.101.2.1 yamt 1415: error = pool_grow(pp, PR_NOWAIT);
1416: if (error) {
1.74 thorpej 1417: break;
1418: }
1419: pp->pr_minpages++;
1420: }
1421:
1422: if (pp->pr_minpages >= pp->pr_maxpages)
1423: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1424:
1.101.2.6 yamt 1425: mutex_exit(&pp->pr_lock);
1.101.2.1 yamt 1426: return error;
1.74 thorpej 1427: }
1.55 thorpej 1428:
1429: /*
1.3 pk 1430: * Add a page worth of items to the pool.
1.21 thorpej 1431: *
1432: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1433: */
1.55 thorpej 1434: static void
1.101.2.4 yamt 1435: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1436: {
1437: struct pool_item *pi;
1.101.2.4 yamt 1438: void *cp = storage;
1.101.2.3 yamt 1439: const unsigned int align = pp->pr_align;
1440: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1441: int n;
1.36 pk 1442:
1.101.2.6 yamt 1443: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 1444:
1.66 thorpej 1445: #ifdef DIAGNOSTIC
1.101.2.2 yamt 1446: if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1.101.2.10 yamt 1447: ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1448: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1449: #endif
1.3 pk 1450:
1451: /*
1452: * Insert page header.
1453: */
1.88 chs 1454: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.101.2.1 yamt 1455: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1456: ph->ph_page = storage;
1457: ph->ph_nmissing = 0;
1.101.2.11 yamt 1458: ph->ph_time = time_uptime;
1.88 chs 1459: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1460: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1461:
1.6 thorpej 1462: pp->pr_nidle++;
1463:
1.3 pk 1464: /*
1465: * Color this page.
1466: */
1.101.2.8 yamt 1467: ph->ph_off = pp->pr_curcolor;
1468: cp = (char *)cp + ph->ph_off;
1.3 pk 1469: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1470: pp->pr_curcolor = 0;
1471:
1472: /*
1473: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1474: */
1475: if (ioff != 0)
1.101.2.4 yamt 1476: cp = (char *)cp + align - ioff;
1.3 pk 1477:
1.101.2.3 yamt 1478: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1479:
1.3 pk 1480: /*
1481: * Insert remaining chunks on the bucket list.
1482: */
1483: n = pp->pr_itemsperpage;
1.20 thorpej 1484: pp->pr_nitems += n;
1.3 pk 1485:
1.97 yamt 1486: if (pp->pr_roflags & PR_NOTOUCH) {
1.101.2.6 yamt 1487: pr_item_notouch_init(pp, ph);
1.97 yamt 1488: } else {
1489: while (n--) {
1490: pi = (struct pool_item *)cp;
1.78 thorpej 1491:
1.97 yamt 1492: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1493:
1.97 yamt 1494: /* Insert on page list */
1.101.2.1 yamt 1495: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1496: #ifdef DIAGNOSTIC
1.97 yamt 1497: pi->pi_magic = PI_MAGIC;
1.3 pk 1498: #endif
1.101.2.4 yamt 1499: cp = (char *)cp + pp->pr_size;
1.101.2.3 yamt 1500:
1501: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1502: }
1.3 pk 1503: }
1504:
1505: /*
1506: * If the pool was depleted, point at the new page.
1507: */
1508: if (pp->pr_curpage == NULL)
1509: pp->pr_curpage = ph;
1510:
1511: if (++pp->pr_npages > pp->pr_hiwat)
1512: pp->pr_hiwat = pp->pr_npages;
1513: }
1514:
1.20 thorpej 1515: /*
1.52 thorpej 1516: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1517: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1518: *
1.21 thorpej 1519: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1520: *
1.73 thorpej 1521: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1522: * with it locked.
1523: */
1524: static int
1.42 thorpej 1525: pool_catchup(struct pool *pp)
1.20 thorpej 1526: {
1527: int error = 0;
1528:
1.54 thorpej 1529: while (POOL_NEEDS_CATCHUP(pp)) {
1.101.2.1 yamt 1530: error = pool_grow(pp, PR_NOWAIT);
1531: if (error) {
1.20 thorpej 1532: break;
1533: }
1534: }
1.101.2.1 yamt 1535: return error;
1.20 thorpej 1536: }
1537:
1.88 chs 1538: static void
1539: pool_update_curpage(struct pool *pp)
1540: {
1541:
1542: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1543: if (pp->pr_curpage == NULL) {
1544: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1545: }
1546: }
1547:
1.3 pk 1548: void
1.42 thorpej 1549: pool_setlowat(struct pool *pp, int n)
1.3 pk 1550: {
1.15 pk 1551:
1.101.2.6 yamt 1552: mutex_enter(&pp->pr_lock);
1.21 thorpej 1553:
1.3 pk 1554: pp->pr_minitems = n;
1.15 pk 1555: pp->pr_minpages = (n == 0)
1556: ? 0
1.18 thorpej 1557: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1558:
1559: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1560: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1561: /*
1562: * XXX: Should we log a warning? Should we set up a timeout
1563: * to try again in a second or so? The latter could break
1564: * a caller's assumptions about interrupt protection, etc.
1565: */
1566: }
1.21 thorpej 1567:
1.101.2.6 yamt 1568: mutex_exit(&pp->pr_lock);
1.3 pk 1569: }
1570:
1571: void
1.42 thorpej 1572: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1573: {
1.15 pk 1574:
1.101.2.6 yamt 1575: mutex_enter(&pp->pr_lock);
1.21 thorpej 1576:
1.15 pk 1577: pp->pr_maxpages = (n == 0)
1578: ? 0
1.18 thorpej 1579: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1580:
1.101.2.6 yamt 1581: mutex_exit(&pp->pr_lock);
1.3 pk 1582: }
1583:
1.20 thorpej 1584: void
1.42 thorpej 1585: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1586: {
1587:
1.101.2.6 yamt 1588: mutex_enter(&pp->pr_lock);
1.20 thorpej 1589:
1590: pp->pr_hardlimit = n;
1591: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1592: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1593: pp->pr_hardlimit_warning_last.tv_sec = 0;
1594: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1595:
1596: /*
1.21 thorpej 1597: * In-line version of pool_sethiwat(), because we don't want to
1598: * release the lock.
1.20 thorpej 1599: */
1600: pp->pr_maxpages = (n == 0)
1601: ? 0
1602: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1603:
1.101.2.6 yamt 1604: mutex_exit(&pp->pr_lock);
1.20 thorpej 1605: }
1.3 pk 1606:
1607: /*
1608: * Release all complete pages that have not been used recently.
1609: */
1.66 thorpej 1610: int
1.59 thorpej 1611: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1612: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1613: #else
1614: pool_reclaim(struct pool *pp)
1615: #endif
1.3 pk 1616: {
1617: struct pool_item_header *ph, *phnext;
1.61 chs 1618: struct pool_pagelist pq;
1.101.2.11 yamt 1619: uint32_t curtime;
1.101.2.6 yamt 1620: bool klock;
1621: int rv;
1.3 pk 1622:
1.68 thorpej 1623: if (pp->pr_drain_hook != NULL) {
1624: /*
1625: * The drain hook must be called with the pool unlocked.
1626: */
1627: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1628: }
1629:
1.101.2.6 yamt 1630: /*
1631: * XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks,
1632: * and we are called from the pagedaemon without kernel_lock.
1633: * Does not apply to IPL_SOFTBIO.
1634: */
1635: if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1636: pp->pr_ipl == IPL_SOFTSERIAL) {
1637: KERNEL_LOCK(1, NULL);
1638: klock = true;
1639: } else
1640: klock = false;
1641:
1642: /* Reclaim items from the pool's cache (if any). */
1643: if (pp->pr_cache != NULL)
1644: pool_cache_invalidate(pp->pr_cache);
1645:
1646: if (mutex_tryenter(&pp->pr_lock) == 0) {
1647: if (klock) {
1648: KERNEL_UNLOCK_ONE(NULL);
1649: }
1.66 thorpej 1650: return (0);
1.101.2.6 yamt 1651: }
1.25 thorpej 1652: pr_enter(pp, file, line);
1.68 thorpej 1653:
1.88 chs 1654: LIST_INIT(&pq);
1.43 thorpej 1655:
1.101.2.11 yamt 1656: curtime = time_uptime;
1.21 thorpej 1657:
1.88 chs 1658: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1659: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1660:
1661: /* Check our minimum page claim */
1662: if (pp->pr_npages <= pp->pr_minpages)
1663: break;
1664:
1.88 chs 1665: KASSERT(ph->ph_nmissing == 0);
1.101.2.11 yamt 1666: if (curtime - ph->ph_time < pool_inactive_time
1.101.2.1 yamt 1667: && !pa_starved_p(pp->pr_alloc))
1.88 chs 1668: continue;
1.21 thorpej 1669:
1.88 chs 1670: /*
1671: * If freeing this page would put us below
1672: * the low water mark, stop now.
1673: */
1674: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1675: pp->pr_minitems)
1676: break;
1.21 thorpej 1677:
1.88 chs 1678: pr_rmpage(pp, ph, &pq);
1.3 pk 1679: }
1680:
1.25 thorpej 1681: pr_leave(pp);
1.101.2.6 yamt 1682: mutex_exit(&pp->pr_lock);
1.66 thorpej 1683:
1.101.2.6 yamt 1684: if (LIST_EMPTY(&pq))
1685: rv = 0;
1686: else {
1687: pr_pagelist_free(pp, &pq);
1688: rv = 1;
1689: }
1690:
1691: if (klock) {
1692: KERNEL_UNLOCK_ONE(NULL);
1693: }
1694:
1695: return (rv);
1.3 pk 1696: }
1697:
1698: /*
1.101.2.6 yamt 1699: * Drain pools, one at a time. This is a two stage process;
1700: * drain_start kicks off a cross call to drain CPU-level caches
1701: * if the pool has an associated pool_cache. drain_end waits
1702: * for those cross calls to finish, and then drains the cache
1703: * (if any) and pool.
1.101.2.4 yamt 1704: *
1.101.2.6 yamt 1705: * Note, must never be called from interrupt context.
1.3 pk 1706: */
1707: void
1.101.2.6 yamt 1708: pool_drain_start(struct pool **ppp, uint64_t *wp)
1.3 pk 1709: {
1710: struct pool *pp;
1.101.2.6 yamt 1711:
1.101.2.8 yamt 1712: KASSERT(!TAILQ_EMPTY(&pool_head));
1.3 pk 1713:
1.61 chs 1714: pp = NULL;
1.101.2.6 yamt 1715:
1716: /* Find next pool to drain, and add a reference. */
1717: mutex_enter(&pool_head_lock);
1718: do {
1719: if (drainpp == NULL) {
1.101.2.8 yamt 1720: drainpp = TAILQ_FIRST(&pool_head);
1.101.2.6 yamt 1721: }
1722: if (drainpp != NULL) {
1723: pp = drainpp;
1.101.2.8 yamt 1724: drainpp = TAILQ_NEXT(pp, pr_poollist);
1.101.2.6 yamt 1725: }
1726: /*
1727: * Skip completely idle pools. We depend on at least
1728: * one pool in the system being active.
1729: */
1730: } while (pp == NULL || pp->pr_npages == 0);
1731: pp->pr_refcnt++;
1732: mutex_exit(&pool_head_lock);
1733:
1734: /* If there is a pool_cache, drain CPU level caches. */
1735: *ppp = pp;
1736: if (pp->pr_cache != NULL) {
1737: *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall,
1738: pp->pr_cache, NULL);
1739: }
1740: }
1741:
1742: void
1743: pool_drain_end(struct pool *pp, uint64_t where)
1744: {
1745:
1746: if (pp == NULL)
1747: return;
1748:
1749: KASSERT(pp->pr_refcnt > 0);
1750:
1751: /* Wait for remote draining to complete. */
1752: if (pp->pr_cache != NULL)
1753: xc_wait(where);
1754:
1755: /* Drain the cache (if any) and pool.. */
1756: pool_reclaim(pp);
1757:
1758: /* Finally, unlock the pool. */
1759: mutex_enter(&pool_head_lock);
1760: pp->pr_refcnt--;
1761: cv_broadcast(&pool_busy);
1762: mutex_exit(&pool_head_lock);
1.3 pk 1763: }
1764:
1765: /*
1766: * Diagnostic helpers.
1767: */
1768: void
1.42 thorpej 1769: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1770: {
1771:
1.25 thorpej 1772: pool_print1(pp, modif, printf);
1.21 thorpej 1773: }
1774:
1.25 thorpej 1775: void
1.101.2.1 yamt 1776: pool_printall(const char *modif, void (*pr)(const char *, ...))
1777: {
1778: struct pool *pp;
1779:
1.101.2.8 yamt 1780: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1.101.2.1 yamt 1781: pool_printit(pp, modif, pr);
1782: }
1783: }
1784:
1785: void
1.42 thorpej 1786: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1787: {
1788:
1789: if (pp == NULL) {
1790: (*pr)("Must specify a pool to print.\n");
1791: return;
1792: }
1793:
1794: pool_print1(pp, modif, pr);
1795: }
1796:
1.21 thorpej 1797: static void
1.97 yamt 1798: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1799: void (*pr)(const char *, ...))
1.88 chs 1800: {
1801: struct pool_item_header *ph;
1802: #ifdef DIAGNOSTIC
1803: struct pool_item *pi;
1804: #endif
1805:
1806: LIST_FOREACH(ph, pl, ph_pagelist) {
1.101.2.11 yamt 1807: (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1808: ph->ph_page, ph->ph_nmissing, ph->ph_time);
1.88 chs 1809: #ifdef DIAGNOSTIC
1.97 yamt 1810: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.101.2.1 yamt 1811: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1812: if (pi->pi_magic != PI_MAGIC) {
1813: (*pr)("\t\t\titem %p, magic 0x%x\n",
1814: pi, pi->pi_magic);
1815: }
1.88 chs 1816: }
1817: }
1818: #endif
1819: }
1820: }
1821:
1822: static void
1.42 thorpej 1823: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1824: {
1.25 thorpej 1825: struct pool_item_header *ph;
1.101.2.6 yamt 1826: pool_cache_t pc;
1827: pcg_t *pcg;
1828: pool_cache_cpu_t *cc;
1829: uint64_t cpuhit, cpumiss;
1.44 thorpej 1830: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1831: char c;
1832:
1833: while ((c = *modif++) != '\0') {
1834: if (c == 'l')
1835: print_log = 1;
1836: if (c == 'p')
1837: print_pagelist = 1;
1.44 thorpej 1838: if (c == 'c')
1839: print_cache = 1;
1.25 thorpej 1840: }
1841:
1.101.2.6 yamt 1842: if ((pc = pp->pr_cache) != NULL) {
1843: (*pr)("POOL CACHE");
1844: } else {
1845: (*pr)("POOL");
1846: }
1847:
1848: (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1.25 thorpej 1849: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1850: pp->pr_roflags);
1.66 thorpej 1851: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1852: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1853: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1854: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1855: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1856:
1.101.2.6 yamt 1857: (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1.25 thorpej 1858: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1859: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1860: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1861:
1862: if (print_pagelist == 0)
1863: goto skip_pagelist;
1864:
1.88 chs 1865: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1866: (*pr)("\n\tempty page list:\n");
1.97 yamt 1867: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1868: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1869: (*pr)("\n\tfull page list:\n");
1.97 yamt 1870: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1871: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1872: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1873: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1874:
1.25 thorpej 1875: if (pp->pr_curpage == NULL)
1876: (*pr)("\tno current page\n");
1877: else
1878: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1879:
1880: skip_pagelist:
1881: if (print_log == 0)
1882: goto skip_log;
1883:
1884: (*pr)("\n");
1885: if ((pp->pr_roflags & PR_LOGGING) == 0)
1886: (*pr)("\tno log\n");
1.101.2.2 yamt 1887: else {
1.25 thorpej 1888: pr_printlog(pp, NULL, pr);
1.101.2.2 yamt 1889: }
1.3 pk 1890:
1.25 thorpej 1891: skip_log:
1.44 thorpej 1892:
1.101.2.1 yamt 1893: #define PR_GROUPLIST(pcg) \
1894: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1.101.2.8 yamt 1895: for (i = 0; i < pcg->pcg_size; i++) { \
1.101.2.1 yamt 1896: if (pcg->pcg_objects[i].pcgo_pa != \
1897: POOL_PADDR_INVALID) { \
1898: (*pr)("\t\t\t%p, 0x%llx\n", \
1899: pcg->pcg_objects[i].pcgo_va, \
1900: (unsigned long long) \
1901: pcg->pcg_objects[i].pcgo_pa); \
1902: } else { \
1903: (*pr)("\t\t\t%p\n", \
1904: pcg->pcg_objects[i].pcgo_va); \
1905: } \
1906: }
1907:
1.101.2.6 yamt 1908: if (pc != NULL) {
1909: cpuhit = 0;
1910: cpumiss = 0;
1911: for (i = 0; i < MAXCPUS; i++) {
1912: if ((cc = pc->pc_cpus[i]) == NULL)
1913: continue;
1914: cpuhit += cc->cc_hits;
1915: cpumiss += cc->cc_misses;
1916: }
1917: (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1918: (*pr)("\tcache layer hits %llu misses %llu\n",
1919: pc->pc_hits, pc->pc_misses);
1920: (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1921: pc->pc_hits + pc->pc_misses - pc->pc_contended,
1922: pc->pc_contended);
1923: (*pr)("\tcache layer empty groups %u full groups %u\n",
1924: pc->pc_nempty, pc->pc_nfull);
1925: if (print_cache) {
1926: (*pr)("\tfull cache groups:\n");
1927: for (pcg = pc->pc_fullgroups; pcg != NULL;
1928: pcg = pcg->pcg_next) {
1929: PR_GROUPLIST(pcg);
1930: }
1931: (*pr)("\tempty cache groups:\n");
1932: for (pcg = pc->pc_emptygroups; pcg != NULL;
1933: pcg = pcg->pcg_next) {
1934: PR_GROUPLIST(pcg);
1935: }
1.44 thorpej 1936: }
1937: }
1.101.2.1 yamt 1938: #undef PR_GROUPLIST
1.44 thorpej 1939:
1.88 chs 1940: pr_enter_check(pp, pr);
1941: }
1942:
1943: static int
1944: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1945: {
1946: struct pool_item *pi;
1.101.2.4 yamt 1947: void *page;
1.88 chs 1948: int n;
1949:
1.101.2.2 yamt 1950: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.101.2.4 yamt 1951: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.101.2.2 yamt 1952: if (page != ph->ph_page &&
1953: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1954: if (label != NULL)
1955: printf("%s: ", label);
1956: printf("pool(%p:%s): page inconsistency: page %p;"
1957: " at page head addr %p (p %p)\n", pp,
1958: pp->pr_wchan, ph->ph_page,
1959: ph, page);
1960: return 1;
1961: }
1.88 chs 1962: }
1.3 pk 1963:
1.97 yamt 1964: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1965: return 0;
1966:
1.101.2.1 yamt 1967: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1968: pi != NULL;
1.101.2.1 yamt 1969: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1970:
1971: #ifdef DIAGNOSTIC
1972: if (pi->pi_magic != PI_MAGIC) {
1973: if (label != NULL)
1974: printf("%s: ", label);
1975: printf("pool(%s): free list modified: magic=%x;"
1.101.2.2 yamt 1976: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1977: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.101.2.2 yamt 1978: n, pi);
1.88 chs 1979: panic("pool");
1980: }
1981: #endif
1.101.2.2 yamt 1982: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1983: continue;
1984: }
1.101.2.4 yamt 1985: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1986: if (page == ph->ph_page)
1987: continue;
1988:
1989: if (label != NULL)
1990: printf("%s: ", label);
1991: printf("pool(%p:%s): page inconsistency: page %p;"
1992: " item ordinal %d; addr %p (p %p)\n", pp,
1993: pp->pr_wchan, ph->ph_page,
1994: n, pi, page);
1995: return 1;
1996: }
1997: return 0;
1.3 pk 1998: }
1999:
1.88 chs 2000:
1.3 pk 2001: int
1.42 thorpej 2002: pool_chk(struct pool *pp, const char *label)
1.3 pk 2003: {
2004: struct pool_item_header *ph;
2005: int r = 0;
2006:
1.101.2.6 yamt 2007: mutex_enter(&pp->pr_lock);
1.88 chs 2008: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2009: r = pool_chk_page(pp, label, ph);
2010: if (r) {
2011: goto out;
2012: }
2013: }
2014: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2015: r = pool_chk_page(pp, label, ph);
2016: if (r) {
1.3 pk 2017: goto out;
2018: }
1.88 chs 2019: }
2020: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2021: r = pool_chk_page(pp, label, ph);
2022: if (r) {
1.3 pk 2023: goto out;
2024: }
2025: }
1.88 chs 2026:
1.3 pk 2027: out:
1.101.2.6 yamt 2028: mutex_exit(&pp->pr_lock);
1.3 pk 2029: return (r);
1.43 thorpej 2030: }
2031:
2032: /*
2033: * pool_cache_init:
2034: *
2035: * Initialize a pool cache.
1.101.2.6 yamt 2036: */
2037: pool_cache_t
2038: pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
2039: const char *wchan, struct pool_allocator *palloc, int ipl,
2040: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
2041: {
2042: pool_cache_t pc;
2043:
2044: pc = pool_get(&cache_pool, PR_WAITOK);
2045: if (pc == NULL)
2046: return NULL;
2047:
2048: pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
2049: palloc, ipl, ctor, dtor, arg);
2050:
2051: return pc;
2052: }
2053:
2054: /*
2055: * pool_cache_bootstrap:
1.43 thorpej 2056: *
1.101.2.6 yamt 2057: * Kernel-private version of pool_cache_init(). The caller
2058: * provides initial storage.
1.43 thorpej 2059: */
2060: void
1.101.2.6 yamt 2061: pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
2062: u_int align_offset, u_int flags, const char *wchan,
2063: struct pool_allocator *palloc, int ipl,
2064: int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1.43 thorpej 2065: void *arg)
2066: {
1.101.2.6 yamt 2067: CPU_INFO_ITERATOR cii;
1.101.2.8 yamt 2068: pool_cache_t pc1;
1.101.2.6 yamt 2069: struct cpu_info *ci;
2070: struct pool *pp;
2071:
2072: pp = &pc->pc_pool;
2073: if (palloc == NULL && ipl == IPL_NONE)
2074: palloc = &pool_allocator_nointr;
2075: pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1.43 thorpej 2076:
1.101.2.7 yamt 2077: /*
2078: * XXXAD hack to prevent IP input processing from blocking.
2079: */
2080: if (ipl == IPL_SOFTNET) {
2081: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, IPL_VM);
2082: } else {
2083: mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
2084: }
1.43 thorpej 2085:
1.101.2.6 yamt 2086: if (ctor == NULL) {
2087: ctor = (int (*)(void *, void *, int))nullop;
2088: }
2089: if (dtor == NULL) {
2090: dtor = (void (*)(void *, void *))nullop;
2091: }
1.43 thorpej 2092:
1.101.2.6 yamt 2093: pc->pc_emptygroups = NULL;
2094: pc->pc_fullgroups = NULL;
2095: pc->pc_partgroups = NULL;
1.43 thorpej 2096: pc->pc_ctor = ctor;
2097: pc->pc_dtor = dtor;
2098: pc->pc_arg = arg;
1.101.2.6 yamt 2099: pc->pc_hits = 0;
1.48 thorpej 2100: pc->pc_misses = 0;
1.101.2.6 yamt 2101: pc->pc_nempty = 0;
2102: pc->pc_npart = 0;
2103: pc->pc_nfull = 0;
2104: pc->pc_contended = 0;
2105: pc->pc_refcnt = 0;
2106: pc->pc_freecheck = NULL;
2107:
1.101.2.8 yamt 2108: if ((flags & PR_LARGECACHE) != 0) {
2109: pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
2110: } else {
2111: pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
2112: }
2113:
1.101.2.6 yamt 2114: /* Allocate per-CPU caches. */
2115: memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
2116: pc->pc_ncpu = 0;
1.101.2.8 yamt 2117: if (ncpu < 2) {
1.101.2.7 yamt 2118: /* XXX For sparc: boot CPU is not attached yet. */
2119: pool_cache_cpu_init1(curcpu(), pc);
2120: } else {
2121: for (CPU_INFO_FOREACH(cii, ci)) {
2122: pool_cache_cpu_init1(ci, pc);
2123: }
1.101.2.6 yamt 2124: }
1.101.2.8 yamt 2125:
2126: /* Add to list of all pools. */
2127: if (__predict_true(!cold))
1.101.2.6 yamt 2128: mutex_enter(&pool_head_lock);
1.101.2.8 yamt 2129: TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
2130: if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
2131: break;
1.101.2.6 yamt 2132: }
1.101.2.8 yamt 2133: if (pc1 == NULL)
2134: TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
2135: else
2136: TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
2137: if (__predict_true(!cold))
2138: mutex_exit(&pool_head_lock);
2139:
2140: membar_sync();
2141: pp->pr_cache = pc;
1.43 thorpej 2142: }
2143:
2144: /*
2145: * pool_cache_destroy:
2146: *
2147: * Destroy a pool cache.
2148: */
2149: void
1.101.2.6 yamt 2150: pool_cache_destroy(pool_cache_t pc)
1.43 thorpej 2151: {
1.101.2.6 yamt 2152: struct pool *pp = &pc->pc_pool;
2153: pool_cache_cpu_t *cc;
2154: pcg_t *pcg;
2155: int i;
2156:
2157: /* Remove it from the global list. */
2158: mutex_enter(&pool_head_lock);
2159: while (pc->pc_refcnt != 0)
2160: cv_wait(&pool_busy, &pool_head_lock);
1.101.2.8 yamt 2161: TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1.101.2.6 yamt 2162: mutex_exit(&pool_head_lock);
1.43 thorpej 2163:
2164: /* First, invalidate the entire cache. */
2165: pool_cache_invalidate(pc);
2166:
1.101.2.6 yamt 2167: /* Disassociate it from the pool. */
2168: mutex_enter(&pp->pr_lock);
2169: pp->pr_cache = NULL;
2170: mutex_exit(&pp->pr_lock);
2171:
2172: /* Destroy per-CPU data */
2173: for (i = 0; i < MAXCPUS; i++) {
2174: if ((cc = pc->pc_cpus[i]) == NULL)
2175: continue;
2176: if ((pcg = cc->cc_current) != NULL) {
2177: pcg->pcg_next = NULL;
2178: pool_cache_invalidate_groups(pc, pcg);
2179: }
2180: if ((pcg = cc->cc_previous) != NULL) {
2181: pcg->pcg_next = NULL;
2182: pool_cache_invalidate_groups(pc, pcg);
2183: }
2184: if (cc != &pc->pc_cpu0)
2185: pool_put(&cache_cpu_pool, cc);
2186: }
2187:
2188: /* Finally, destroy it. */
2189: mutex_destroy(&pc->pc_lock);
2190: pool_destroy(pp);
2191: pool_put(&cache_pool, pc);
1.43 thorpej 2192: }
2193:
1.101.2.6 yamt 2194: /*
2195: * pool_cache_cpu_init1:
2196: *
2197: * Called for each pool_cache whenever a new CPU is attached.
2198: */
2199: static void
2200: pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
1.43 thorpej 2201: {
1.101.2.6 yamt 2202: pool_cache_cpu_t *cc;
1.101.2.7 yamt 2203: int index;
2204:
2205: index = ci->ci_index;
1.43 thorpej 2206:
1.101.2.7 yamt 2207: KASSERT(index < MAXCPUS);
1.101.2.6 yamt 2208: KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0);
1.43 thorpej 2209:
1.101.2.7 yamt 2210: if ((cc = pc->pc_cpus[index]) != NULL) {
2211: KASSERT(cc->cc_cpuindex == index);
1.101.2.6 yamt 2212: return;
2213: }
2214:
2215: /*
2216: * The first CPU is 'free'. This needs to be the case for
2217: * bootstrap - we may not be able to allocate yet.
2218: */
2219: if (pc->pc_ncpu == 0) {
2220: cc = &pc->pc_cpu0;
2221: pc->pc_ncpu = 1;
2222: } else {
2223: mutex_enter(&pc->pc_lock);
2224: pc->pc_ncpu++;
2225: mutex_exit(&pc->pc_lock);
2226: cc = pool_get(&cache_cpu_pool, PR_WAITOK);
2227: }
2228:
2229: cc->cc_ipl = pc->pc_pool.pr_ipl;
2230: cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
2231: cc->cc_cache = pc;
1.101.2.7 yamt 2232: cc->cc_cpuindex = index;
1.101.2.6 yamt 2233: cc->cc_hits = 0;
2234: cc->cc_misses = 0;
2235: cc->cc_current = NULL;
2236: cc->cc_previous = NULL;
2237:
1.101.2.7 yamt 2238: pc->pc_cpus[index] = cc;
1.43 thorpej 2239: }
2240:
1.101.2.6 yamt 2241: /*
2242: * pool_cache_cpu_init:
2243: *
2244: * Called whenever a new CPU is attached.
2245: */
2246: void
2247: pool_cache_cpu_init(struct cpu_info *ci)
1.43 thorpej 2248: {
1.101.2.6 yamt 2249: pool_cache_t pc;
1.43 thorpej 2250:
1.101.2.6 yamt 2251: mutex_enter(&pool_head_lock);
1.101.2.8 yamt 2252: TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1.101.2.6 yamt 2253: pc->pc_refcnt++;
2254: mutex_exit(&pool_head_lock);
1.43 thorpej 2255:
1.101.2.6 yamt 2256: pool_cache_cpu_init1(ci, pc);
2257:
2258: mutex_enter(&pool_head_lock);
2259: pc->pc_refcnt--;
2260: cv_broadcast(&pool_busy);
2261: }
2262: mutex_exit(&pool_head_lock);
2263: }
2264:
2265: /*
2266: * pool_cache_reclaim:
2267: *
2268: * Reclaim memory from a pool cache.
2269: */
2270: bool
2271: pool_cache_reclaim(pool_cache_t pc)
2272: {
2273:
2274: return pool_reclaim(&pc->pc_pool);
1.43 thorpej 2275: }
2276:
1.101.2.1 yamt 2277: static void
1.101.2.6 yamt 2278: pool_cache_destruct_object1(pool_cache_t pc, void *object)
1.101.2.1 yamt 2279: {
2280:
1.101.2.6 yamt 2281: (*pc->pc_dtor)(pc->pc_arg, object);
2282: pool_put(&pc->pc_pool, object);
1.101.2.1 yamt 2283: }
2284:
1.43 thorpej 2285: /*
1.101.2.6 yamt 2286: * pool_cache_destruct_object:
1.43 thorpej 2287: *
1.101.2.6 yamt 2288: * Force destruction of an object and its release back into
2289: * the pool.
1.43 thorpej 2290: */
1.101.2.6 yamt 2291: void
2292: pool_cache_destruct_object(pool_cache_t pc, void *object)
1.43 thorpej 2293: {
1.58 thorpej 2294:
1.101.2.6 yamt 2295: FREECHECK_IN(&pc->pc_freecheck, object);
1.43 thorpej 2296:
1.101.2.6 yamt 2297: pool_cache_destruct_object1(pc, object);
2298: }
1.43 thorpej 2299:
1.101.2.6 yamt 2300: /*
2301: * pool_cache_invalidate_groups:
2302: *
2303: * Invalidate a chain of groups and destruct all objects.
2304: */
2305: static void
2306: pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
2307: {
2308: void *object;
2309: pcg_t *next;
2310: int i;
1.101.2.3 yamt 2311:
1.101.2.6 yamt 2312: for (; pcg != NULL; pcg = next) {
2313: next = pcg->pcg_next;
1.43 thorpej 2314:
1.101.2.6 yamt 2315: for (i = 0; i < pcg->pcg_avail; i++) {
2316: object = pcg->pcg_objects[i].pcgo_va;
2317: pool_cache_destruct_object1(pc, object);
2318: }
1.43 thorpej 2319:
1.101.2.8 yamt 2320: if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
2321: pool_put(&pcg_large_pool, pcg);
2322: } else {
2323: KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
2324: pool_put(&pcg_normal_pool, pcg);
2325: }
1.101.2.1 yamt 2326: }
1.43 thorpej 2327: }
2328:
2329: /*
1.101.2.6 yamt 2330: * pool_cache_invalidate:
1.43 thorpej 2331: *
1.101.2.6 yamt 2332: * Invalidate a pool cache (destruct and release all of the
2333: * cached objects). Does not reclaim objects from the pool.
1.43 thorpej 2334: */
2335: void
1.101.2.6 yamt 2336: pool_cache_invalidate(pool_cache_t pc)
1.43 thorpej 2337: {
1.101.2.6 yamt 2338: pcg_t *full, *empty, *part;
1.43 thorpej 2339:
1.101.2.6 yamt 2340: mutex_enter(&pc->pc_lock);
2341: full = pc->pc_fullgroups;
2342: empty = pc->pc_emptygroups;
2343: part = pc->pc_partgroups;
2344: pc->pc_fullgroups = NULL;
2345: pc->pc_emptygroups = NULL;
2346: pc->pc_partgroups = NULL;
2347: pc->pc_nfull = 0;
2348: pc->pc_nempty = 0;
2349: pc->pc_npart = 0;
2350: mutex_exit(&pc->pc_lock);
2351:
2352: pool_cache_invalidate_groups(pc, full);
2353: pool_cache_invalidate_groups(pc, empty);
2354: pool_cache_invalidate_groups(pc, part);
2355: }
2356:
2357: void
2358: pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2359: {
2360:
2361: pool_set_drain_hook(&pc->pc_pool, fn, arg);
2362: }
2363:
2364: void
2365: pool_cache_setlowat(pool_cache_t pc, int n)
2366: {
2367:
2368: pool_setlowat(&pc->pc_pool, n);
2369: }
2370:
2371: void
2372: pool_cache_sethiwat(pool_cache_t pc, int n)
2373: {
2374:
2375: pool_sethiwat(&pc->pc_pool, n);
2376: }
2377:
2378: void
2379: pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2380: {
2381:
2382: pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2383: }
2384:
2385: static inline pool_cache_cpu_t *
2386: pool_cache_cpu_enter(pool_cache_t pc, int *s)
2387: {
2388: pool_cache_cpu_t *cc;
2389:
2390: /*
2391: * Prevent other users of the cache from accessing our
2392: * CPU-local data. To avoid touching shared state, we
2393: * pull the neccessary information from CPU local data.
2394: */
1.101.2.7 yamt 2395: crit_enter();
2396: cc = pc->pc_cpus[curcpu()->ci_index];
1.101.2.6 yamt 2397: KASSERT(cc->cc_cache == pc);
1.101.2.7 yamt 2398: if (cc->cc_ipl != IPL_NONE) {
1.101.2.6 yamt 2399: *s = splraiseipl(cc->cc_iplcookie);
2400: }
2401: KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0);
1.43 thorpej 2402:
1.101.2.6 yamt 2403: return cc;
2404: }
2405:
2406: static inline void
2407: pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s)
2408: {
2409:
2410: /* No longer need exclusive access to the per-CPU data. */
1.101.2.7 yamt 2411: if (cc->cc_ipl != IPL_NONE) {
1.101.2.6 yamt 2412: splx(*s);
1.101.2.1 yamt 2413: }
1.101.2.7 yamt 2414: crit_exit();
1.101.2.6 yamt 2415: }
2416:
2417: #if __GNUC_PREREQ__(3, 0)
2418: __attribute ((noinline))
2419: #endif
2420: pool_cache_cpu_t *
2421: pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp,
2422: paddr_t *pap, int flags)
2423: {
2424: pcg_t *pcg, *cur;
2425: uint64_t ncsw;
2426: pool_cache_t pc;
2427: void *object;
2428:
2429: pc = cc->cc_cache;
2430: cc->cc_misses++;
2431:
2432: /*
2433: * Nothing was available locally. Try and grab a group
2434: * from the cache.
2435: */
2436: if (!mutex_tryenter(&pc->pc_lock)) {
2437: ncsw = curlwp->l_ncsw;
2438: mutex_enter(&pc->pc_lock);
2439: pc->pc_contended++;
1.43 thorpej 2440:
2441: /*
1.101.2.6 yamt 2442: * If we context switched while locking, then
2443: * our view of the per-CPU data is invalid:
2444: * retry.
1.43 thorpej 2445: */
1.101.2.6 yamt 2446: if (curlwp->l_ncsw != ncsw) {
2447: mutex_exit(&pc->pc_lock);
2448: pool_cache_cpu_exit(cc, s);
2449: return pool_cache_cpu_enter(pc, s);
2450: }
2451: }
1.43 thorpej 2452:
1.101.2.6 yamt 2453: if ((pcg = pc->pc_fullgroups) != NULL) {
2454: /*
2455: * If there's a full group, release our empty
2456: * group back to the cache. Install the full
2457: * group as cc_current and return.
2458: */
2459: if ((cur = cc->cc_current) != NULL) {
2460: KASSERT(cur->pcg_avail == 0);
2461: cur->pcg_next = pc->pc_emptygroups;
2462: pc->pc_emptygroups = cur;
2463: pc->pc_nempty++;
1.101.2.1 yamt 2464: }
1.101.2.8 yamt 2465: KASSERT(pcg->pcg_avail == pcg->pcg_size);
1.101.2.6 yamt 2466: cc->cc_current = pcg;
2467: pc->pc_fullgroups = pcg->pcg_next;
2468: pc->pc_hits++;
2469: pc->pc_nfull--;
2470: mutex_exit(&pc->pc_lock);
2471: return cc;
1.43 thorpej 2472: }
2473:
1.101.2.6 yamt 2474: /*
2475: * Nothing available locally or in cache. Take the slow
2476: * path: fetch a new object from the pool and construct
2477: * it.
2478: */
2479: pc->pc_misses++;
2480: mutex_exit(&pc->pc_lock);
2481: pool_cache_cpu_exit(cc, s);
1.43 thorpej 2482:
1.101.2.6 yamt 2483: object = pool_get(&pc->pc_pool, flags);
2484: *objectp = object;
2485: if (object == NULL)
2486: return NULL;
2487:
2488: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
2489: pool_put(&pc->pc_pool, object);
2490: *objectp = NULL;
2491: return NULL;
1.101.2.1 yamt 2492: }
1.51 thorpej 2493:
1.101.2.6 yamt 2494: KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2495: (pc->pc_pool.pr_align - 1)) == 0);
2496:
2497: if (pap != NULL) {
2498: #ifdef POOL_VTOPHYS
2499: *pap = POOL_VTOPHYS(object);
2500: #else
2501: *pap = POOL_PADDR_INVALID;
2502: #endif
2503: }
1.51 thorpej 2504:
1.101.2.6 yamt 2505: FREECHECK_OUT(&pc->pc_freecheck, object);
2506: return NULL;
1.43 thorpej 2507: }
2508:
1.101.2.4 yamt 2509: /*
1.101.2.6 yamt 2510: * pool_cache_get{,_paddr}:
1.101.2.4 yamt 2511: *
1.101.2.6 yamt 2512: * Get an object from a pool cache (optionally returning
2513: * the physical address of the object).
1.101.2.4 yamt 2514: */
1.101.2.6 yamt 2515: void *
2516: pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
1.101.2.1 yamt 2517: {
1.101.2.6 yamt 2518: pool_cache_cpu_t *cc;
2519: pcg_t *pcg;
1.101.2.1 yamt 2520: void *object;
1.101.2.6 yamt 2521: int s;
1.101.2.1 yamt 2522:
1.101.2.6 yamt 2523: #ifdef LOCKDEBUG
2524: if (flags & PR_WAITOK)
1.101.2.12! yamt 2525: ASSERT_SLEEPABLE();
1.101.2.6 yamt 2526: #endif
1.101.2.4 yamt 2527:
1.101.2.6 yamt 2528: cc = pool_cache_cpu_enter(pc, &s);
2529: do {
2530: /* Try and allocate an object from the current group. */
2531: pcg = cc->cc_current;
2532: if (pcg != NULL && pcg->pcg_avail > 0) {
2533: object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2534: if (pap != NULL)
2535: *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
1.101.2.9 yamt 2536: #if defined(DIAGNOSTIC)
1.101.2.6 yamt 2537: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
1.101.2.9 yamt 2538: #endif /* defined(DIAGNOSTIC) */
1.101.2.8 yamt 2539: KASSERT(pcg->pcg_avail <= pcg->pcg_size);
1.101.2.6 yamt 2540: KASSERT(object != NULL);
2541: cc->cc_hits++;
2542: pool_cache_cpu_exit(cc, &s);
2543: FREECHECK_OUT(&pc->pc_freecheck, object);
2544: return object;
1.101.2.1 yamt 2545: }
1.101.2.4 yamt 2546:
1.101.2.6 yamt 2547: /*
2548: * That failed. If the previous group isn't empty, swap
2549: * it with the current group and allocate from there.
2550: */
2551: pcg = cc->cc_previous;
2552: if (pcg != NULL && pcg->pcg_avail > 0) {
2553: cc->cc_previous = cc->cc_current;
2554: cc->cc_current = pcg;
2555: continue;
2556: }
2557:
2558: /*
2559: * Can't allocate from either group: try the slow path.
2560: * If get_slow() allocated an object for us, or if
2561: * no more objects are available, it will return NULL.
2562: * Otherwise, we need to retry.
2563: */
2564: cc = pool_cache_get_slow(cc, &s, &object, pap, flags);
2565: } while (cc != NULL);
2566:
2567: return object;
1.101.2.1 yamt 2568: }
2569:
1.101.2.6 yamt 2570: #if __GNUC_PREREQ__(3, 0)
2571: __attribute ((noinline))
2572: #endif
2573: pool_cache_cpu_t *
2574: pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa)
1.101.2.1 yamt 2575: {
1.101.2.6 yamt 2576: pcg_t *pcg, *cur;
2577: uint64_t ncsw;
2578: pool_cache_t pc;
1.101.2.8 yamt 2579: u_int nobj;
1.101.2.1 yamt 2580:
1.101.2.6 yamt 2581: pc = cc->cc_cache;
2582: cc->cc_misses++;
1.101.2.1 yamt 2583:
1.101.2.6 yamt 2584: /*
2585: * No free slots locally. Try to grab an empty, unused
2586: * group from the cache.
2587: */
2588: if (!mutex_tryenter(&pc->pc_lock)) {
2589: ncsw = curlwp->l_ncsw;
2590: mutex_enter(&pc->pc_lock);
2591: pc->pc_contended++;
1.101.2.1 yamt 2592:
1.101.2.6 yamt 2593: /*
2594: * If we context switched while locking, then
2595: * our view of the per-CPU data is invalid:
2596: * retry.
2597: */
2598: if (curlwp->l_ncsw != ncsw) {
2599: mutex_exit(&pc->pc_lock);
2600: pool_cache_cpu_exit(cc, s);
2601: return pool_cache_cpu_enter(pc, s);
2602: }
2603: }
2604:
2605: if ((pcg = pc->pc_emptygroups) != NULL) {
2606: /*
2607: * If there's a empty group, release our full
2608: * group back to the cache. Install the empty
1.101.2.8 yamt 2609: * group and return.
1.101.2.6 yamt 2610: */
2611: KASSERT(pcg->pcg_avail == 0);
2612: pc->pc_emptygroups = pcg->pcg_next;
1.101.2.8 yamt 2613: if (cc->cc_previous == NULL) {
2614: cc->cc_previous = pcg;
2615: } else {
2616: if ((cur = cc->cc_current) != NULL) {
2617: KASSERT(cur->pcg_avail == pcg->pcg_size);
2618: cur->pcg_next = pc->pc_fullgroups;
2619: pc->pc_fullgroups = cur;
2620: pc->pc_nfull++;
2621: }
2622: cc->cc_current = pcg;
2623: }
1.101.2.6 yamt 2624: pc->pc_hits++;
2625: pc->pc_nempty--;
2626: mutex_exit(&pc->pc_lock);
2627: return cc;
2628: }
2629:
2630: /*
2631: * Nothing available locally or in cache. Take the
2632: * slow path and try to allocate a new group that we
2633: * can release to.
2634: */
2635: pc->pc_misses++;
2636: mutex_exit(&pc->pc_lock);
2637: pool_cache_cpu_exit(cc, s);
2638:
2639: /*
2640: * If we can't allocate a new group, just throw the
2641: * object away.
2642: */
1.101.2.8 yamt 2643: nobj = pc->pc_pcgsize;
2644: if (pool_cache_disable) {
2645: pcg = NULL;
2646: } else if (nobj == PCG_NOBJECTS_LARGE) {
2647: pcg = pool_get(&pcg_large_pool, PR_NOWAIT);
2648: } else {
2649: pcg = pool_get(&pcg_normal_pool, PR_NOWAIT);
2650: }
1.101.2.6 yamt 2651: if (pcg == NULL) {
2652: pool_cache_destruct_object(pc, object);
2653: return NULL;
2654: }
2655: pcg->pcg_avail = 0;
1.101.2.8 yamt 2656: pcg->pcg_size = nobj;
1.101.2.6 yamt 2657:
2658: /*
2659: * Add the empty group to the cache and try again.
2660: */
2661: mutex_enter(&pc->pc_lock);
2662: pcg->pcg_next = pc->pc_emptygroups;
2663: pc->pc_emptygroups = pcg;
2664: pc->pc_nempty++;
2665: mutex_exit(&pc->pc_lock);
2666:
2667: return pool_cache_cpu_enter(pc, s);
2668: }
1.101.2.1 yamt 2669:
1.43 thorpej 2670: /*
1.101.2.6 yamt 2671: * pool_cache_put{,_paddr}:
1.43 thorpej 2672: *
1.101.2.6 yamt 2673: * Put an object back to the pool cache (optionally caching the
2674: * physical address of the object).
1.43 thorpej 2675: */
1.101 thorpej 2676: void
1.101.2.6 yamt 2677: pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
1.43 thorpej 2678: {
1.101.2.6 yamt 2679: pool_cache_cpu_t *cc;
2680: pcg_t *pcg;
2681: int s;
1.101 thorpej 2682:
1.101.2.6 yamt 2683: FREECHECK_IN(&pc->pc_freecheck, object);
1.43 thorpej 2684:
1.101.2.6 yamt 2685: cc = pool_cache_cpu_enter(pc, &s);
2686: do {
2687: /* If the current group isn't full, release it there. */
2688: pcg = cc->cc_current;
1.101.2.8 yamt 2689: if (pcg != NULL && pcg->pcg_avail < pcg->pcg_size) {
1.101.2.6 yamt 2690: pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2691: pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2692: pcg->pcg_avail++;
2693: cc->cc_hits++;
2694: pool_cache_cpu_exit(cc, &s);
2695: return;
2696: }
1.43 thorpej 2697:
1.101.2.6 yamt 2698: /*
2699: * That failed. If the previous group is empty, swap
2700: * it with the current group and try again.
2701: */
2702: pcg = cc->cc_previous;
2703: if (pcg != NULL && pcg->pcg_avail == 0) {
2704: cc->cc_previous = cc->cc_current;
2705: cc->cc_current = pcg;
2706: continue;
2707: }
1.43 thorpej 2708:
1.101.2.6 yamt 2709: /*
2710: * Can't free to either group: try the slow path.
2711: * If put_slow() releases the object for us, it
2712: * will return NULL. Otherwise we need to retry.
2713: */
2714: cc = pool_cache_put_slow(cc, &s, object, pa);
2715: } while (cc != NULL);
1.43 thorpej 2716: }
2717:
2718: /*
1.101.2.6 yamt 2719: * pool_cache_xcall:
1.43 thorpej 2720: *
1.101.2.6 yamt 2721: * Transfer objects from the per-CPU cache to the global cache.
2722: * Run within a cross-call thread.
1.43 thorpej 2723: */
2724: static void
1.101.2.6 yamt 2725: pool_cache_xcall(pool_cache_t pc)
1.43 thorpej 2726: {
1.101.2.6 yamt 2727: pool_cache_cpu_t *cc;
2728: pcg_t *prev, *cur, **list;
2729: int s = 0; /* XXXgcc */
2730:
2731: cc = pool_cache_cpu_enter(pc, &s);
2732: cur = cc->cc_current;
2733: cc->cc_current = NULL;
2734: prev = cc->cc_previous;
2735: cc->cc_previous = NULL;
2736: pool_cache_cpu_exit(cc, &s);
2737:
2738: /*
2739: * XXXSMP Go to splvm to prevent kernel_lock from being taken,
2740: * because locks at IPL_SOFTXXX are still spinlocks. Does not
2741: * apply to IPL_SOFTBIO. Cross-call threads do not take the
2742: * kernel_lock.
1.101 thorpej 2743: */
1.101.2.6 yamt 2744: s = splvm();
2745: mutex_enter(&pc->pc_lock);
2746: if (cur != NULL) {
1.101.2.8 yamt 2747: if (cur->pcg_avail == cur->pcg_size) {
1.101.2.6 yamt 2748: list = &pc->pc_fullgroups;
2749: pc->pc_nfull++;
2750: } else if (cur->pcg_avail == 0) {
2751: list = &pc->pc_emptygroups;
2752: pc->pc_nempty++;
2753: } else {
2754: list = &pc->pc_partgroups;
2755: pc->pc_npart++;
2756: }
2757: cur->pcg_next = *list;
2758: *list = cur;
2759: }
2760: if (prev != NULL) {
1.101.2.8 yamt 2761: if (prev->pcg_avail == prev->pcg_size) {
1.101.2.6 yamt 2762: list = &pc->pc_fullgroups;
2763: pc->pc_nfull++;
2764: } else if (prev->pcg_avail == 0) {
2765: list = &pc->pc_emptygroups;
2766: pc->pc_nempty++;
2767: } else {
2768: list = &pc->pc_partgroups;
2769: pc->pc_npart++;
2770: }
2771: prev->pcg_next = *list;
2772: *list = prev;
2773: }
2774: mutex_exit(&pc->pc_lock);
2775: splx(s);
1.3 pk 2776: }
1.66 thorpej 2777:
2778: /*
2779: * Pool backend allocators.
2780: *
2781: * Each pool has a backend allocator that handles allocation, deallocation,
2782: * and any additional draining that might be needed.
2783: *
2784: * We provide two standard allocators:
2785: *
2786: * pool_allocator_kmem - the default when no allocator is specified
2787: *
2788: * pool_allocator_nointr - used for pools that will not be accessed
2789: * in interrupt context.
2790: */
2791: void *pool_page_alloc(struct pool *, int);
2792: void pool_page_free(struct pool *, void *);
2793:
1.101.2.1 yamt 2794: #ifdef POOL_SUBPAGE
2795: struct pool_allocator pool_allocator_kmem_fullpage = {
2796: pool_page_alloc, pool_page_free, 0,
2797: .pa_backingmapptr = &kmem_map,
2798: };
2799: #else
1.66 thorpej 2800: struct pool_allocator pool_allocator_kmem = {
2801: pool_page_alloc, pool_page_free, 0,
1.101.2.1 yamt 2802: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2803: };
1.101.2.1 yamt 2804: #endif
1.66 thorpej 2805:
2806: void *pool_page_alloc_nointr(struct pool *, int);
2807: void pool_page_free_nointr(struct pool *, void *);
2808:
1.101.2.1 yamt 2809: #ifdef POOL_SUBPAGE
2810: struct pool_allocator pool_allocator_nointr_fullpage = {
2811: pool_page_alloc_nointr, pool_page_free_nointr, 0,
2812: .pa_backingmapptr = &kernel_map,
2813: };
2814: #else
1.66 thorpej 2815: struct pool_allocator pool_allocator_nointr = {
2816: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.101.2.1 yamt 2817: .pa_backingmapptr = &kernel_map,
1.66 thorpej 2818: };
1.101.2.1 yamt 2819: #endif
1.66 thorpej 2820:
2821: #ifdef POOL_SUBPAGE
2822: void *pool_subpage_alloc(struct pool *, int);
2823: void pool_subpage_free(struct pool *, void *);
2824:
1.101.2.1 yamt 2825: struct pool_allocator pool_allocator_kmem = {
2826: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
2827: .pa_backingmapptr = &kmem_map,
2828: };
2829:
2830: void *pool_subpage_alloc_nointr(struct pool *, int);
2831: void pool_subpage_free_nointr(struct pool *, void *);
2832:
2833: struct pool_allocator pool_allocator_nointr = {
2834: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
2835: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2836: };
2837: #endif /* POOL_SUBPAGE */
2838:
1.101.2.1 yamt 2839: static void *
2840: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2841: {
1.101.2.1 yamt 2842: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2843: void *res;
2844:
1.101.2.1 yamt 2845: res = (*pa->pa_alloc)(pp, flags);
2846: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2847: /*
1.101.2.1 yamt 2848: * We only run the drain hook here if PR_NOWAIT.
2849: * In other cases, the hook will be run in
2850: * pool_reclaim().
1.66 thorpej 2851: */
1.101.2.1 yamt 2852: if (pp->pr_drain_hook != NULL) {
2853: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2854: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2855: }
1.101.2.1 yamt 2856: }
2857: return res;
1.66 thorpej 2858: }
2859:
1.101.2.1 yamt 2860: static void
1.66 thorpej 2861: pool_allocator_free(struct pool *pp, void *v)
2862: {
2863: struct pool_allocator *pa = pp->pr_alloc;
2864:
2865: (*pa->pa_free)(pp, v);
2866: }
2867:
2868: void *
2869: pool_page_alloc(struct pool *pp, int flags)
2870: {
1.101.2.3 yamt 2871: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2872:
1.100 yamt 2873: return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
1.66 thorpej 2874: }
2875:
2876: void
2877: pool_page_free(struct pool *pp, void *v)
2878: {
2879:
1.98 yamt 2880: uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2881: }
2882:
2883: static void *
2884: pool_page_alloc_meta(struct pool *pp, int flags)
2885: {
1.101.2.3 yamt 2886: bool waitok = (flags & PR_WAITOK) ? true : false;
1.98 yamt 2887:
1.100 yamt 2888: return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
1.98 yamt 2889: }
2890:
2891: static void
2892: pool_page_free_meta(struct pool *pp, void *v)
2893: {
2894:
1.100 yamt 2895: uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
1.66 thorpej 2896: }
2897:
2898: #ifdef POOL_SUBPAGE
2899: /* Sub-page allocator, for machines with large hardware pages. */
2900: void *
2901: pool_subpage_alloc(struct pool *pp, int flags)
2902: {
1.101.2.6 yamt 2903: return pool_get(&psppool, flags);
1.66 thorpej 2904: }
2905:
2906: void
2907: pool_subpage_free(struct pool *pp, void *v)
2908: {
2909: pool_put(&psppool, v);
2910: }
2911:
2912: /* We don't provide a real nointr allocator. Maybe later. */
2913: void *
1.101.2.1 yamt 2914: pool_subpage_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2915: {
2916:
2917: return (pool_subpage_alloc(pp, flags));
2918: }
2919:
2920: void
1.101.2.1 yamt 2921: pool_subpage_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2922: {
2923:
2924: pool_subpage_free(pp, v);
2925: }
1.101.2.1 yamt 2926: #endif /* POOL_SUBPAGE */
1.66 thorpej 2927: void *
2928: pool_page_alloc_nointr(struct pool *pp, int flags)
2929: {
1.101.2.3 yamt 2930: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2931:
1.100 yamt 2932: return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
1.66 thorpej 2933: }
2934:
2935: void
2936: pool_page_free_nointr(struct pool *pp, void *v)
2937: {
2938:
1.98 yamt 2939: uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
1.66 thorpej 2940: }
1.101.2.8 yamt 2941:
2942: #if defined(DDB)
2943: static bool
2944: pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2945: {
2946:
2947: return (uintptr_t)ph->ph_page <= addr &&
2948: addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2949: }
2950:
2951: static bool
2952: pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2953: {
2954:
2955: return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2956: }
2957:
2958: static bool
2959: pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2960: {
2961: int i;
2962:
2963: if (pcg == NULL) {
2964: return false;
2965: }
2966: for (i = 0; i < pcg->pcg_avail; i++) {
2967: if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2968: return true;
2969: }
2970: }
2971: return false;
2972: }
2973:
2974: static bool
2975: pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2976: {
2977:
2978: if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2979: unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2980: pool_item_bitmap_t *bitmap =
2981: ph->ph_bitmap + (idx / BITMAP_SIZE);
2982: pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2983:
2984: return (*bitmap & mask) == 0;
2985: } else {
2986: struct pool_item *pi;
2987:
2988: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2989: if (pool_in_item(pp, pi, addr)) {
2990: return false;
2991: }
2992: }
2993: return true;
2994: }
2995: }
2996:
2997: void
2998: pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2999: {
3000: struct pool *pp;
3001:
3002: TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
3003: struct pool_item_header *ph;
3004: uintptr_t item;
3005: bool allocated = true;
3006: bool incache = false;
3007: bool incpucache = false;
3008: char cpucachestr[32];
3009:
3010: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
3011: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
3012: if (pool_in_page(pp, ph, addr)) {
3013: goto found;
3014: }
3015: }
3016: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
3017: if (pool_in_page(pp, ph, addr)) {
3018: allocated =
3019: pool_allocated(pp, ph, addr);
3020: goto found;
3021: }
3022: }
3023: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
3024: if (pool_in_page(pp, ph, addr)) {
3025: allocated = false;
3026: goto found;
3027: }
3028: }
3029: continue;
3030: } else {
3031: ph = pr_find_pagehead_noalign(pp, (void *)addr);
3032: if (ph == NULL || !pool_in_page(pp, ph, addr)) {
3033: continue;
3034: }
3035: allocated = pool_allocated(pp, ph, addr);
3036: }
3037: found:
3038: if (allocated && pp->pr_cache) {
3039: pool_cache_t pc = pp->pr_cache;
3040: struct pool_cache_group *pcg;
3041: int i;
3042:
3043: for (pcg = pc->pc_fullgroups; pcg != NULL;
3044: pcg = pcg->pcg_next) {
3045: if (pool_in_cg(pp, pcg, addr)) {
3046: incache = true;
3047: goto print;
3048: }
3049: }
3050: for (i = 0; i < MAXCPUS; i++) {
3051: pool_cache_cpu_t *cc;
3052:
3053: if ((cc = pc->pc_cpus[i]) == NULL) {
3054: continue;
3055: }
3056: if (pool_in_cg(pp, cc->cc_current, addr) ||
3057: pool_in_cg(pp, cc->cc_previous, addr)) {
3058: struct cpu_info *ci =
3059: cpu_lookup_byindex(i);
3060:
3061: incpucache = true;
3062: snprintf(cpucachestr,
3063: sizeof(cpucachestr),
3064: "cached by CPU %u",
1.101.2.12! yamt 3065: ci->ci_index);
1.101.2.8 yamt 3066: goto print;
3067: }
3068: }
3069: }
3070: print:
3071: item = (uintptr_t)ph->ph_page + ph->ph_off;
3072: item = item + rounddown(addr - item, pp->pr_size);
3073: (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
3074: (void *)addr, item, (size_t)(addr - item),
3075: pp->pr_wchan,
3076: incpucache ? cpucachestr :
3077: incache ? "cached" : allocated ? "allocated" : "free");
3078: }
3079: }
3080: #endif /* defined(DDB) */
CVSweb <webmaster@jp.NetBSD.org>