Annotation of src/sys/kern/subr_pool.c, Revision 1.116
1.116 ! simonb 1: /* $NetBSD: subr_pool.c,v 1.115 2006/04/15 04:47:11 christos Exp $ */
1.1 pk 2:
3: /*-
1.43 thorpej 4: * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.116 ! simonb 41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.115 2006/04/15 04:47:11 christos Exp $");
1.24 scottr 42:
1.25 thorpej 43: #include "opt_pool.h"
1.24 scottr 44: #include "opt_poollog.h"
1.28 thorpej 45: #include "opt_lockdebug.h"
1.1 pk 46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/proc.h>
50: #include <sys/errno.h>
51: #include <sys/kernel.h>
52: #include <sys/malloc.h>
53: #include <sys/lock.h>
54: #include <sys/pool.h>
1.20 thorpej 55: #include <sys/syslog.h>
1.3 pk 56:
57: #include <uvm/uvm.h>
58:
1.1 pk 59: /*
60: * Pool resource management utility.
1.3 pk 61: *
1.88 chs 62: * Memory is allocated in pages which are split into pieces according to
63: * the pool item size. Each page is kept on one of three lists in the
64: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65: * for empty, full and partially-full pages respectively. The individual
66: * pool items are on a linked list headed by `ph_itemlist' in each page
67: * header. The memory for building the page list is either taken from
68: * the allocated pages themselves (for small pool items) or taken from
69: * an internal pool of page headers (`phpool').
1.1 pk 70: */
71:
1.3 pk 72: /* List of all pools */
1.102 chs 73: LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
1.3 pk 74:
75: /* Private pool for page header structures */
1.97 yamt 76: #define PHPOOL_MAX 8
77: static struct pool phpool[PHPOOL_MAX];
78: #define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx)))
1.3 pk 79:
1.62 bjh21 80: #ifdef POOL_SUBPAGE
81: /* Pool of subpages for use by normal pools. */
82: static struct pool psppool;
83: #endif
84:
1.98 yamt 85: static void *pool_page_alloc_meta(struct pool *, int);
86: static void pool_page_free_meta(struct pool *, void *);
87:
88: /* allocator for pool metadata */
89: static struct pool_allocator pool_allocator_meta = {
90: pool_page_alloc_meta, pool_page_free_meta
91: };
92:
1.3 pk 93: /* # of seconds to retain page after last use */
94: int pool_inactive_time = 10;
95:
96: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 97: static struct pool *drainpp;
98:
99: /* This spin lock protects both pool_head and drainpp. */
100: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3 pk 101:
1.99 yamt 102: typedef uint8_t pool_item_freelist_t;
103:
1.3 pk 104: struct pool_item_header {
105: /* Page headers */
1.88 chs 106: LIST_ENTRY(pool_item_header)
1.3 pk 107: ph_pagelist; /* pool page list */
1.88 chs 108: SPLAY_ENTRY(pool_item_header)
109: ph_node; /* Off-page page headers */
1.3 pk 110: caddr_t ph_page; /* this page's address */
111: struct timeval ph_time; /* last referenced */
1.97 yamt 112: union {
113: /* !PR_NOTOUCH */
114: struct {
1.102 chs 115: LIST_HEAD(, pool_item)
1.97 yamt 116: phu_itemlist; /* chunk list for this page */
117: } phu_normal;
118: /* PR_NOTOUCH */
119: struct {
120: uint16_t
121: phu_off; /* start offset in page */
1.99 yamt 122: pool_item_freelist_t
1.97 yamt 123: phu_firstfree; /* first free item */
1.99 yamt 124: /*
125: * XXX it might be better to use
126: * a simple bitmap and ffs(3)
127: */
1.97 yamt 128: } phu_notouch;
129: } ph_u;
130: uint16_t ph_nmissing; /* # of chunks in use */
1.3 pk 131: };
1.97 yamt 132: #define ph_itemlist ph_u.phu_normal.phu_itemlist
133: #define ph_off ph_u.phu_notouch.phu_off
134: #define ph_firstfree ph_u.phu_notouch.phu_firstfree
1.3 pk 135:
1.1 pk 136: struct pool_item {
1.3 pk 137: #ifdef DIAGNOSTIC
1.82 thorpej 138: u_int pi_magic;
1.33 chs 139: #endif
1.82 thorpej 140: #define PI_MAGIC 0xdeadbeefU
1.3 pk 141: /* Other entries use only this list entry */
1.102 chs 142: LIST_ENTRY(pool_item) pi_list;
1.3 pk 143: };
144:
1.53 thorpej 145: #define POOL_NEEDS_CATCHUP(pp) \
146: ((pp)->pr_nitems < (pp)->pr_minitems)
147:
1.43 thorpej 148: /*
149: * Pool cache management.
150: *
151: * Pool caches provide a way for constructed objects to be cached by the
152: * pool subsystem. This can lead to performance improvements by avoiding
153: * needless object construction/destruction; it is deferred until absolutely
154: * necessary.
155: *
156: * Caches are grouped into cache groups. Each cache group references
157: * up to 16 constructed objects. When a cache allocates an object
158: * from the pool, it calls the object's constructor and places it into
159: * a cache group. When a cache group frees an object back to the pool,
160: * it first calls the object's destructor. This allows the object to
161: * persist in constructed form while freed to the cache.
162: *
163: * Multiple caches may exist for each pool. This allows a single
164: * object type to have multiple constructed forms. The pool references
165: * each cache, so that when a pool is drained by the pagedaemon, it can
166: * drain each individual cache as well. Each time a cache is drained,
167: * the most idle cache group is freed to the pool in its entirety.
168: *
169: * Pool caches are layed on top of pools. By layering them, we can avoid
170: * the complexity of cache management for pools which would not benefit
171: * from it.
172: */
173:
174: /* The cache group pool. */
175: static struct pool pcgpool;
1.3 pk 176:
1.102 chs 177: static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *,
178: struct pool_cache_grouplist *);
179: static void pcg_grouplist_free(struct pool_cache_grouplist *);
1.3 pk 180:
1.42 thorpej 181: static int pool_catchup(struct pool *);
1.55 thorpej 182: static void pool_prime_page(struct pool *, caddr_t,
183: struct pool_item_header *);
1.88 chs 184: static void pool_update_curpage(struct pool *);
1.66 thorpej 185:
1.113 yamt 186: static int pool_grow(struct pool *, int);
1.66 thorpej 187: void *pool_allocator_alloc(struct pool *, int);
188: void pool_allocator_free(struct pool *, void *);
1.3 pk 189:
1.97 yamt 190: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 191: void (*)(const char *, ...));
1.42 thorpej 192: static void pool_print1(struct pool *, const char *,
193: void (*)(const char *, ...));
1.3 pk 194:
1.88 chs 195: static int pool_chk_page(struct pool *, const char *,
196: struct pool_item_header *);
197:
1.3 pk 198: /*
1.52 thorpej 199: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 200: */
201: struct pool_log {
202: const char *pl_file;
203: long pl_line;
204: int pl_action;
1.25 thorpej 205: #define PRLOG_GET 1
206: #define PRLOG_PUT 2
1.3 pk 207: void *pl_addr;
1.1 pk 208: };
209:
1.86 matt 210: #ifdef POOL_DIAGNOSTIC
1.3 pk 211: /* Number of entries in pool log buffers */
1.17 thorpej 212: #ifndef POOL_LOGSIZE
213: #define POOL_LOGSIZE 10
214: #endif
215:
216: int pool_logsize = POOL_LOGSIZE;
1.1 pk 217:
1.110 perry 218: static inline void
1.42 thorpej 219: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 220: {
221: int n = pp->pr_curlogentry;
222: struct pool_log *pl;
223:
1.20 thorpej 224: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 225: return;
226:
227: /*
228: * Fill in the current entry. Wrap around and overwrite
229: * the oldest entry if necessary.
230: */
231: pl = &pp->pr_log[n];
232: pl->pl_file = file;
233: pl->pl_line = line;
234: pl->pl_action = action;
235: pl->pl_addr = v;
236: if (++n >= pp->pr_logsize)
237: n = 0;
238: pp->pr_curlogentry = n;
239: }
240:
241: static void
1.42 thorpej 242: pr_printlog(struct pool *pp, struct pool_item *pi,
243: void (*pr)(const char *, ...))
1.3 pk 244: {
245: int i = pp->pr_logsize;
246: int n = pp->pr_curlogentry;
247:
1.20 thorpej 248: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 249: return;
250:
251: /*
252: * Print all entries in this pool's log.
253: */
254: while (i-- > 0) {
255: struct pool_log *pl = &pp->pr_log[n];
256: if (pl->pl_action != 0) {
1.25 thorpej 257: if (pi == NULL || pi == pl->pl_addr) {
258: (*pr)("\tlog entry %d:\n", i);
259: (*pr)("\t\taction = %s, addr = %p\n",
260: pl->pl_action == PRLOG_GET ? "get" : "put",
261: pl->pl_addr);
262: (*pr)("\t\tfile: %s at line %lu\n",
263: pl->pl_file, pl->pl_line);
264: }
1.3 pk 265: }
266: if (++n >= pp->pr_logsize)
267: n = 0;
268: }
269: }
1.25 thorpej 270:
1.110 perry 271: static inline void
1.42 thorpej 272: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 273: {
274:
1.34 thorpej 275: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 276: printf("pool %s: reentrancy at file %s line %ld\n",
277: pp->pr_wchan, file, line);
278: printf(" previous entry at file %s line %ld\n",
279: pp->pr_entered_file, pp->pr_entered_line);
280: panic("pr_enter");
281: }
282:
283: pp->pr_entered_file = file;
284: pp->pr_entered_line = line;
285: }
286:
1.110 perry 287: static inline void
1.42 thorpej 288: pr_leave(struct pool *pp)
1.25 thorpej 289: {
290:
1.34 thorpej 291: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 292: printf("pool %s not entered?\n", pp->pr_wchan);
293: panic("pr_leave");
294: }
295:
296: pp->pr_entered_file = NULL;
297: pp->pr_entered_line = 0;
298: }
299:
1.110 perry 300: static inline void
1.42 thorpej 301: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 302: {
303:
304: if (pp->pr_entered_file != NULL)
305: (*pr)("\n\tcurrently entered from file %s line %ld\n",
306: pp->pr_entered_file, pp->pr_entered_line);
307: }
1.3 pk 308: #else
1.25 thorpej 309: #define pr_log(pp, v, action, file, line)
310: #define pr_printlog(pp, pi, pr)
311: #define pr_enter(pp, file, line)
312: #define pr_leave(pp)
313: #define pr_enter_check(pp, pr)
1.59 thorpej 314: #endif /* POOL_DIAGNOSTIC */
1.3 pk 315:
1.110 perry 316: static inline int
1.97 yamt 317: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
318: const void *v)
319: {
320: const char *cp = v;
321: int idx;
322:
323: KASSERT(pp->pr_roflags & PR_NOTOUCH);
324: idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size;
325: KASSERT(idx < pp->pr_itemsperpage);
326: return idx;
327: }
328:
1.99 yamt 329: #define PR_FREELIST_ALIGN(p) \
330: roundup((uintptr_t)(p), sizeof(pool_item_freelist_t))
331: #define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1))
332: #define PR_INDEX_USED ((pool_item_freelist_t)-1)
333: #define PR_INDEX_EOL ((pool_item_freelist_t)-2)
1.97 yamt 334:
1.110 perry 335: static inline void
1.97 yamt 336: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
337: void *obj)
338: {
339: int idx = pr_item_notouch_index(pp, ph, obj);
1.99 yamt 340: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 341:
342: KASSERT(freelist[idx] == PR_INDEX_USED);
343: freelist[idx] = ph->ph_firstfree;
344: ph->ph_firstfree = idx;
345: }
346:
1.110 perry 347: static inline void *
1.97 yamt 348: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
349: {
350: int idx = ph->ph_firstfree;
1.99 yamt 351: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 352:
353: KASSERT(freelist[idx] != PR_INDEX_USED);
354: ph->ph_firstfree = freelist[idx];
355: freelist[idx] = PR_INDEX_USED;
356:
357: return ph->ph_page + ph->ph_off + idx * pp->pr_size;
358: }
359:
1.110 perry 360: static inline int
1.88 chs 361: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
362: {
363: if (a->ph_page < b->ph_page)
364: return (-1);
365: else if (a->ph_page > b->ph_page)
366: return (1);
367: else
368: return (0);
369: }
370:
371: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
372: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
373:
1.3 pk 374: /*
375: * Return the pool page header based on page address.
376: */
1.110 perry 377: static inline struct pool_item_header *
1.42 thorpej 378: pr_find_pagehead(struct pool *pp, caddr_t page)
1.3 pk 379: {
1.88 chs 380: struct pool_item_header *ph, tmp;
1.3 pk 381:
1.20 thorpej 382: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3 pk 383: return ((struct pool_item_header *)(page + pp->pr_phoffset));
384:
1.88 chs 385: tmp.ph_page = page;
386: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
387: return ph;
1.3 pk 388: }
389:
1.101 thorpej 390: static void
391: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
392: {
393: struct pool_item_header *ph;
394: int s;
395:
396: while ((ph = LIST_FIRST(pq)) != NULL) {
397: LIST_REMOVE(ph, ph_pagelist);
398: pool_allocator_free(pp, ph->ph_page);
399: if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
400: s = splvm();
401: pool_put(pp->pr_phpool, ph);
402: splx(s);
403: }
404: }
405: }
406:
1.3 pk 407: /*
408: * Remove a page from the pool.
409: */
1.110 perry 410: static inline void
1.61 chs 411: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
412: struct pool_pagelist *pq)
1.3 pk 413: {
414:
1.101 thorpej 415: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1.91 yamt 416:
1.3 pk 417: /*
1.7 thorpej 418: * If the page was idle, decrement the idle page count.
1.3 pk 419: */
1.6 thorpej 420: if (ph->ph_nmissing == 0) {
421: #ifdef DIAGNOSTIC
422: if (pp->pr_nidle == 0)
423: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 424: if (pp->pr_nitems < pp->pr_itemsperpage)
425: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 426: #endif
427: pp->pr_nidle--;
428: }
1.7 thorpej 429:
1.20 thorpej 430: pp->pr_nitems -= pp->pr_itemsperpage;
431:
1.7 thorpej 432: /*
1.101 thorpej 433: * Unlink the page from the pool and queue it for release.
1.7 thorpej 434: */
1.88 chs 435: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 436: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
437: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 438: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
439:
1.7 thorpej 440: pp->pr_npages--;
441: pp->pr_npagefree++;
1.6 thorpej 442:
1.88 chs 443: pool_update_curpage(pp);
1.3 pk 444: }
445:
446: /*
1.94 simonb 447: * Initialize all the pools listed in the "pools" link set.
448: */
449: void
450: link_pool_init(void)
451: {
452: __link_set_decl(pools, struct link_pool_init);
453: struct link_pool_init * const *pi;
454:
455: __link_set_foreach(pi, pools)
456: pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
457: (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
458: (*pi)->palloc);
459: }
460:
461: /*
1.3 pk 462: * Initialize the given pool resource structure.
463: *
464: * We export this routine to allow other kernel parts to declare
465: * static pools that must be initialized before malloc() is available.
466: */
467: void
1.42 thorpej 468: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.66 thorpej 469: const char *wchan, struct pool_allocator *palloc)
1.3 pk 470: {
1.116 ! simonb 471: #ifdef DEBUG
! 472: struct pool *pp1;
! 473: #endif
1.92 enami 474: size_t trysize, phsize;
1.116 ! simonb 475: int off, slack, s;
1.3 pk 476:
1.99 yamt 477: KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >=
478: PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1));
479:
1.116 ! simonb 480: #ifdef DEBUG
! 481: /*
! 482: * Check that the pool hasn't already been initialised and
! 483: * added to the list of all pools.
! 484: */
! 485: LIST_FOREACH(pp1, &pool_head, pr_poollist) {
! 486: if (pp == pp1)
! 487: panic("pool_init: pool %s already initialised",
! 488: wchan);
! 489: }
! 490: #endif
! 491:
1.25 thorpej 492: #ifdef POOL_DIAGNOSTIC
493: /*
494: * Always log if POOL_DIAGNOSTIC is defined.
495: */
496: if (pool_logsize != 0)
497: flags |= PR_LOGGING;
498: #endif
499:
1.66 thorpej 500: if (palloc == NULL)
501: palloc = &pool_allocator_kmem;
1.112 bjh21 502: #ifdef POOL_SUBPAGE
503: if (size > palloc->pa_pagesz) {
504: if (palloc == &pool_allocator_kmem)
505: palloc = &pool_allocator_kmem_fullpage;
506: else if (palloc == &pool_allocator_nointr)
507: palloc = &pool_allocator_nointr_fullpage;
508: }
1.66 thorpej 509: #endif /* POOL_SUBPAGE */
510: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
1.112 bjh21 511: if (palloc->pa_pagesz == 0)
1.66 thorpej 512: palloc->pa_pagesz = PAGE_SIZE;
513:
514: TAILQ_INIT(&palloc->pa_list);
515:
516: simple_lock_init(&palloc->pa_slock);
517: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
518: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
519: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 520: }
1.3 pk 521:
522: if (align == 0)
523: align = ALIGN(1);
1.14 thorpej 524:
525: if (size < sizeof(struct pool_item))
526: size = sizeof(struct pool_item);
1.3 pk 527:
1.78 thorpej 528: size = roundup(size, align);
1.66 thorpej 529: #ifdef DIAGNOSTIC
530: if (size > palloc->pa_pagesz)
1.35 pk 531: panic("pool_init: pool item size (%lu) too large",
532: (u_long)size);
1.66 thorpej 533: #endif
1.35 pk 534:
1.3 pk 535: /*
536: * Initialize the pool structure.
537: */
1.88 chs 538: LIST_INIT(&pp->pr_emptypages);
539: LIST_INIT(&pp->pr_fullpages);
540: LIST_INIT(&pp->pr_partpages);
1.102 chs 541: LIST_INIT(&pp->pr_cachelist);
1.3 pk 542: pp->pr_curpage = NULL;
543: pp->pr_npages = 0;
544: pp->pr_minitems = 0;
545: pp->pr_minpages = 0;
546: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 547: pp->pr_roflags = flags;
548: pp->pr_flags = 0;
1.35 pk 549: pp->pr_size = size;
1.3 pk 550: pp->pr_align = align;
551: pp->pr_wchan = wchan;
1.66 thorpej 552: pp->pr_alloc = palloc;
1.20 thorpej 553: pp->pr_nitems = 0;
554: pp->pr_nout = 0;
555: pp->pr_hardlimit = UINT_MAX;
556: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 557: pp->pr_hardlimit_ratecap.tv_sec = 0;
558: pp->pr_hardlimit_ratecap.tv_usec = 0;
559: pp->pr_hardlimit_warning_last.tv_sec = 0;
560: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 561: pp->pr_drain_hook = NULL;
562: pp->pr_drain_hook_arg = NULL;
1.3 pk 563:
564: /*
565: * Decide whether to put the page header off page to avoid
1.92 enami 566: * wasting too large a part of the page or too big item.
567: * Off-page page headers go on a hash table, so we can match
568: * a returned item with its header based on the page address.
569: * We use 1/16 of the page size and about 8 times of the item
570: * size as the threshold (XXX: tune)
571: *
572: * However, we'll put the header into the page if we can put
573: * it without wasting any items.
574: *
575: * Silently enforce `0 <= ioff < align'.
1.3 pk 576: */
1.92 enami 577: pp->pr_itemoffset = ioff %= align;
578: /* See the comment below about reserved bytes. */
579: trysize = palloc->pa_pagesz - ((align - ioff) % align);
580: phsize = ALIGN(sizeof(struct pool_item_header));
1.97 yamt 581: if ((pp->pr_roflags & PR_NOTOUCH) == 0 &&
582: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
583: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 584: /* Use the end of the page for the page header */
1.20 thorpej 585: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 586: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 587: } else {
1.3 pk 588: /* The page header will be taken from our page header pool */
589: pp->pr_phoffset = 0;
1.66 thorpej 590: off = palloc->pa_pagesz;
1.88 chs 591: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 592: }
1.1 pk 593:
1.3 pk 594: /*
595: * Alignment is to take place at `ioff' within the item. This means
596: * we must reserve up to `align - 1' bytes on the page to allow
597: * appropriate positioning of each item.
598: */
599: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 600: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 601: if ((pp->pr_roflags & PR_NOTOUCH)) {
602: int idx;
603:
604: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
605: idx++) {
606: /* nothing */
607: }
608: if (idx >= PHPOOL_MAX) {
609: /*
610: * if you see this panic, consider to tweak
611: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
612: */
613: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
614: pp->pr_wchan, pp->pr_itemsperpage);
615: }
616: pp->pr_phpool = &phpool[idx];
617: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
618: pp->pr_phpool = &phpool[0];
619: }
620: #if defined(DIAGNOSTIC)
621: else {
622: pp->pr_phpool = NULL;
623: }
624: #endif
1.3 pk 625:
626: /*
627: * Use the slack between the chunks and the page header
628: * for "cache coloring".
629: */
630: slack = off - pp->pr_itemsperpage * pp->pr_size;
631: pp->pr_maxcolor = (slack / align) * align;
632: pp->pr_curcolor = 0;
633:
634: pp->pr_nget = 0;
635: pp->pr_nfail = 0;
636: pp->pr_nput = 0;
637: pp->pr_npagealloc = 0;
638: pp->pr_npagefree = 0;
1.1 pk 639: pp->pr_hiwat = 0;
1.8 thorpej 640: pp->pr_nidle = 0;
1.3 pk 641:
1.59 thorpej 642: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 643: if (flags & PR_LOGGING) {
644: if (kmem_map == NULL ||
645: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
646: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 647: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 648: pp->pr_curlogentry = 0;
649: pp->pr_logsize = pool_logsize;
650: }
1.59 thorpej 651: #endif
1.25 thorpej 652:
653: pp->pr_entered_file = NULL;
654: pp->pr_entered_line = 0;
1.3 pk 655:
1.21 thorpej 656: simple_lock_init(&pp->pr_slock);
1.1 pk 657:
1.3 pk 658: /*
1.43 thorpej 659: * Initialize private page header pool and cache magazine pool if we
660: * haven't done so yet.
1.23 thorpej 661: * XXX LOCKING.
1.3 pk 662: */
1.97 yamt 663: if (phpool[0].pr_size == 0) {
664: int idx;
665: for (idx = 0; idx < PHPOOL_MAX; idx++) {
666: static char phpool_names[PHPOOL_MAX][6+1+6+1];
667: int nelem;
668: size_t sz;
669:
670: nelem = PHPOOL_FREELIST_NELEM(idx);
671: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
672: "phpool-%d", nelem);
673: sz = sizeof(struct pool_item_header);
674: if (nelem) {
675: sz = PR_FREELIST_ALIGN(sz)
1.99 yamt 676: + nelem * sizeof(pool_item_freelist_t);
1.97 yamt 677: }
678: pool_init(&phpool[idx], sz, 0, 0, 0,
1.98 yamt 679: phpool_names[idx], &pool_allocator_meta);
1.97 yamt 680: }
1.62 bjh21 681: #ifdef POOL_SUBPAGE
682: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.98 yamt 683: PR_RECURSIVE, "psppool", &pool_allocator_meta);
1.62 bjh21 684: #endif
1.43 thorpej 685: pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
1.98 yamt 686: 0, "pcgpool", &pool_allocator_meta);
1.1 pk 687: }
688:
1.23 thorpej 689: /* Insert into the list of all pools. */
690: simple_lock(&pool_head_slock);
1.102 chs 691: LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
1.23 thorpej 692: simple_unlock(&pool_head_slock);
1.66 thorpej 693:
694: /* Insert this into the list of pools using this allocator. */
1.93 dbj 695: s = splvm();
1.66 thorpej 696: simple_lock(&palloc->pa_slock);
697: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
698: simple_unlock(&palloc->pa_slock);
1.93 dbj 699: splx(s);
1.1 pk 700: }
701:
702: /*
703: * De-commision a pool resource.
704: */
705: void
1.42 thorpej 706: pool_destroy(struct pool *pp)
1.1 pk 707: {
1.101 thorpej 708: struct pool_pagelist pq;
1.3 pk 709: struct pool_item_header *ph;
1.93 dbj 710: int s;
1.43 thorpej 711:
1.101 thorpej 712: /* Remove from global pool list */
713: simple_lock(&pool_head_slock);
1.102 chs 714: LIST_REMOVE(pp, pr_poollist);
1.101 thorpej 715: if (drainpp == pp)
716: drainpp = NULL;
717: simple_unlock(&pool_head_slock);
718:
719: /* Remove this pool from its allocator's list of pools. */
1.93 dbj 720: s = splvm();
1.66 thorpej 721: simple_lock(&pp->pr_alloc->pa_slock);
722: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
723: simple_unlock(&pp->pr_alloc->pa_slock);
1.93 dbj 724: splx(s);
1.66 thorpej 725:
1.101 thorpej 726: s = splvm();
727: simple_lock(&pp->pr_slock);
728:
1.102 chs 729: KASSERT(LIST_EMPTY(&pp->pr_cachelist));
1.3 pk 730:
731: #ifdef DIAGNOSTIC
1.20 thorpej 732: if (pp->pr_nout != 0) {
1.25 thorpej 733: pr_printlog(pp, NULL, printf);
1.80 provos 734: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 735: pp->pr_nout);
1.3 pk 736: }
737: #endif
1.1 pk 738:
1.101 thorpej 739: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
740: KASSERT(LIST_EMPTY(&pp->pr_partpages));
741:
1.3 pk 742: /* Remove all pages */
1.101 thorpej 743: LIST_INIT(&pq);
1.88 chs 744: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 745: pr_rmpage(pp, ph, &pq);
746:
747: simple_unlock(&pp->pr_slock);
748: splx(s);
1.3 pk 749:
1.101 thorpej 750: pr_pagelist_free(pp, &pq);
1.3 pk 751:
1.59 thorpej 752: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 753: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 754: free(pp->pr_log, M_TEMP);
1.59 thorpej 755: #endif
1.1 pk 756: }
757:
1.68 thorpej 758: void
759: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
760: {
761:
762: /* XXX no locking -- must be used just after pool_init() */
763: #ifdef DIAGNOSTIC
764: if (pp->pr_drain_hook != NULL)
765: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
766: #endif
767: pp->pr_drain_hook = fn;
768: pp->pr_drain_hook_arg = arg;
769: }
770:
1.88 chs 771: static struct pool_item_header *
1.55 thorpej 772: pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
773: {
774: struct pool_item_header *ph;
775: int s;
776:
777: LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
778:
779: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
780: ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
781: else {
1.85 pk 782: s = splvm();
1.97 yamt 783: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 784: splx(s);
785: }
786:
787: return (ph);
788: }
1.1 pk 789:
790: /*
1.3 pk 791: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 792: */
1.3 pk 793: void *
1.59 thorpej 794: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 795: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 796: #else
797: pool_get(struct pool *pp, int flags)
798: #endif
1.1 pk 799: {
800: struct pool_item *pi;
1.3 pk 801: struct pool_item_header *ph;
1.55 thorpej 802: void *v;
1.1 pk 803:
1.2 pk 804: #ifdef DIAGNOSTIC
1.95 atatat 805: if (__predict_false(pp->pr_itemsperpage == 0))
806: panic("pool_get: pool %p: pr_itemsperpage is zero, "
807: "pool not initialized?", pp);
1.84 thorpej 808: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 809: (flags & PR_WAITOK) != 0))
1.77 matt 810: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 811:
1.102 chs 812: #endif /* DIAGNOSTIC */
1.58 thorpej 813: #ifdef LOCKDEBUG
814: if (flags & PR_WAITOK)
815: simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
1.102 chs 816: SCHED_ASSERT_UNLOCKED();
1.56 sommerfe 817: #endif
1.1 pk 818:
1.21 thorpej 819: simple_lock(&pp->pr_slock);
1.25 thorpej 820: pr_enter(pp, file, line);
1.20 thorpej 821:
822: startover:
823: /*
824: * Check to see if we've reached the hard limit. If we have,
825: * and we can wait, then wait until an item has been returned to
826: * the pool.
827: */
828: #ifdef DIAGNOSTIC
1.34 thorpej 829: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 830: pr_leave(pp);
1.21 thorpej 831: simple_unlock(&pp->pr_slock);
1.20 thorpej 832: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
833: }
834: #endif
1.34 thorpej 835: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 836: if (pp->pr_drain_hook != NULL) {
837: /*
838: * Since the drain hook is going to free things
839: * back to the pool, unlock, call the hook, re-lock,
840: * and check the hardlimit condition again.
841: */
842: pr_leave(pp);
843: simple_unlock(&pp->pr_slock);
844: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
845: simple_lock(&pp->pr_slock);
846: pr_enter(pp, file, line);
847: if (pp->pr_nout < pp->pr_hardlimit)
848: goto startover;
849: }
850:
1.29 sommerfe 851: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 852: /*
853: * XXX: A warning isn't logged in this case. Should
854: * it be?
855: */
856: pp->pr_flags |= PR_WANTED;
1.25 thorpej 857: pr_leave(pp);
1.40 sommerfe 858: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 859: pr_enter(pp, file, line);
1.20 thorpej 860: goto startover;
861: }
1.31 thorpej 862:
863: /*
864: * Log a message that the hard limit has been hit.
865: */
866: if (pp->pr_hardlimit_warning != NULL &&
867: ratecheck(&pp->pr_hardlimit_warning_last,
868: &pp->pr_hardlimit_ratecap))
869: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 870:
871: pp->pr_nfail++;
872:
1.25 thorpej 873: pr_leave(pp);
1.21 thorpej 874: simple_unlock(&pp->pr_slock);
1.20 thorpej 875: return (NULL);
876: }
877:
1.3 pk 878: /*
879: * The convention we use is that if `curpage' is not NULL, then
880: * it points at a non-empty bucket. In particular, `curpage'
881: * never points at a page header which has PR_PHINPAGE set and
882: * has no items in its bucket.
883: */
1.20 thorpej 884: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 885: int error;
886:
1.20 thorpej 887: #ifdef DIAGNOSTIC
888: if (pp->pr_nitems != 0) {
1.21 thorpej 889: simple_unlock(&pp->pr_slock);
1.20 thorpej 890: printf("pool_get: %s: curpage NULL, nitems %u\n",
891: pp->pr_wchan, pp->pr_nitems);
1.80 provos 892: panic("pool_get: nitems inconsistent");
1.20 thorpej 893: }
894: #endif
895:
1.21 thorpej 896: /*
897: * Call the back-end page allocator for more memory.
898: * Release the pool lock, as the back-end page allocator
899: * may block.
900: */
1.25 thorpej 901: pr_leave(pp);
1.113 yamt 902: error = pool_grow(pp, flags);
903: pr_enter(pp, file, line);
904: if (error != 0) {
1.21 thorpej 905: /*
1.55 thorpej 906: * We were unable to allocate a page or item
907: * header, but we released the lock during
908: * allocation, so perhaps items were freed
909: * back to the pool. Check for this case.
1.21 thorpej 910: */
911: if (pp->pr_curpage != NULL)
912: goto startover;
1.15 pk 913:
1.3 pk 914: if ((flags & PR_WAITOK) == 0) {
915: pp->pr_nfail++;
1.25 thorpej 916: pr_leave(pp);
1.21 thorpej 917: simple_unlock(&pp->pr_slock);
1.1 pk 918: return (NULL);
1.3 pk 919: }
920:
1.15 pk 921: /*
922: * Wait for items to be returned to this pool.
1.21 thorpej 923: *
1.109 christos 924: * wake up once a second and try again,
925: * as the check in pool_cache_put_paddr() is racy.
1.15 pk 926: */
1.1 pk 927: pp->pr_flags |= PR_WANTED;
1.66 thorpej 928: /* PA_WANTED is already set on the allocator. */
1.25 thorpej 929: pr_leave(pp);
1.109 christos 930: ltsleep(pp, PSWP, pp->pr_wchan, hz, &pp->pr_slock);
1.25 thorpej 931: pr_enter(pp, file, line);
1.1 pk 932: }
1.3 pk 933:
1.20 thorpej 934: /* Start the allocation process over. */
935: goto startover;
1.3 pk 936: }
1.97 yamt 937: if (pp->pr_roflags & PR_NOTOUCH) {
938: #ifdef DIAGNOSTIC
939: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
940: pr_leave(pp);
941: simple_unlock(&pp->pr_slock);
942: panic("pool_get: %s: page empty", pp->pr_wchan);
943: }
944: #endif
945: v = pr_item_notouch_get(pp, ph);
946: #ifdef POOL_DIAGNOSTIC
947: pr_log(pp, v, PRLOG_GET, file, line);
948: #endif
949: } else {
1.102 chs 950: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 951: if (__predict_false(v == NULL)) {
952: pr_leave(pp);
953: simple_unlock(&pp->pr_slock);
954: panic("pool_get: %s: page empty", pp->pr_wchan);
955: }
1.20 thorpej 956: #ifdef DIAGNOSTIC
1.97 yamt 957: if (__predict_false(pp->pr_nitems == 0)) {
958: pr_leave(pp);
959: simple_unlock(&pp->pr_slock);
960: printf("pool_get: %s: items on itemlist, nitems %u\n",
961: pp->pr_wchan, pp->pr_nitems);
962: panic("pool_get: nitems inconsistent");
963: }
1.65 enami 964: #endif
1.56 sommerfe 965:
1.65 enami 966: #ifdef POOL_DIAGNOSTIC
1.97 yamt 967: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 968: #endif
1.3 pk 969:
1.65 enami 970: #ifdef DIAGNOSTIC
1.97 yamt 971: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
972: pr_printlog(pp, pi, printf);
973: panic("pool_get(%s): free list modified: "
974: "magic=%x; page %p; item addr %p\n",
975: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
976: }
1.3 pk 977: #endif
978:
1.97 yamt 979: /*
980: * Remove from item list.
981: */
1.102 chs 982: LIST_REMOVE(pi, pi_list);
1.97 yamt 983: }
1.20 thorpej 984: pp->pr_nitems--;
985: pp->pr_nout++;
1.6 thorpej 986: if (ph->ph_nmissing == 0) {
987: #ifdef DIAGNOSTIC
1.34 thorpej 988: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 989: panic("pool_get: nidle inconsistent");
990: #endif
991: pp->pr_nidle--;
1.88 chs 992:
993: /*
994: * This page was previously empty. Move it to the list of
995: * partially-full pages. This page is already curpage.
996: */
997: LIST_REMOVE(ph, ph_pagelist);
998: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 999: }
1.3 pk 1000: ph->ph_nmissing++;
1.97 yamt 1001: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 1002: #ifdef DIAGNOSTIC
1.97 yamt 1003: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 1004: !LIST_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 1005: pr_leave(pp);
1.21 thorpej 1006: simple_unlock(&pp->pr_slock);
1007: panic("pool_get: %s: nmissing inconsistent",
1008: pp->pr_wchan);
1009: }
1010: #endif
1.3 pk 1011: /*
1.88 chs 1012: * This page is now full. Move it to the full list
1013: * and select a new current page.
1.3 pk 1014: */
1.88 chs 1015: LIST_REMOVE(ph, ph_pagelist);
1016: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1017: pool_update_curpage(pp);
1.1 pk 1018: }
1.3 pk 1019:
1020: pp->pr_nget++;
1.111 christos 1021: pr_leave(pp);
1.20 thorpej 1022:
1023: /*
1024: * If we have a low water mark and we are now below that low
1025: * water mark, add more items to the pool.
1026: */
1.53 thorpej 1027: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1028: /*
1029: * XXX: Should we log a warning? Should we set up a timeout
1030: * to try again in a second or so? The latter could break
1031: * a caller's assumptions about interrupt protection, etc.
1032: */
1033: }
1034:
1.21 thorpej 1035: simple_unlock(&pp->pr_slock);
1.1 pk 1036: return (v);
1037: }
1038:
1039: /*
1.43 thorpej 1040: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1041: */
1.43 thorpej 1042: static void
1.101 thorpej 1043: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 1044: {
1045: struct pool_item *pi = v;
1.3 pk 1046: struct pool_item_header *ph;
1047: caddr_t page;
1.21 thorpej 1048: int s;
1.3 pk 1049:
1.61 chs 1050: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1.102 chs 1051: SCHED_ASSERT_UNLOCKED();
1.61 chs 1052:
1.66 thorpej 1053: page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1.1 pk 1054:
1.30 thorpej 1055: #ifdef DIAGNOSTIC
1.34 thorpej 1056: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1057: printf("pool %s: putting with none out\n",
1058: pp->pr_wchan);
1059: panic("pool_put");
1060: }
1061: #endif
1.3 pk 1062:
1.34 thorpej 1063: if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1.25 thorpej 1064: pr_printlog(pp, NULL, printf);
1.3 pk 1065: panic("pool_put: %s: page header missing", pp->pr_wchan);
1066: }
1.28 thorpej 1067:
1068: #ifdef LOCKDEBUG
1069: /*
1070: * Check if we're freeing a locked simple lock.
1071: */
1072: simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
1073: #endif
1.3 pk 1074:
1075: /*
1076: * Return to item list.
1077: */
1.97 yamt 1078: if (pp->pr_roflags & PR_NOTOUCH) {
1079: pr_item_notouch_put(pp, ph, v);
1080: } else {
1.2 pk 1081: #ifdef DIAGNOSTIC
1.97 yamt 1082: pi->pi_magic = PI_MAGIC;
1.3 pk 1083: #endif
1.32 chs 1084: #ifdef DEBUG
1.97 yamt 1085: {
1086: int i, *ip = v;
1.32 chs 1087:
1.97 yamt 1088: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1089: *ip++ = PI_MAGIC;
1090: }
1.32 chs 1091: }
1092: #endif
1093:
1.102 chs 1094: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 1095: }
1.79 thorpej 1096: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1097: ph->ph_nmissing--;
1098: pp->pr_nput++;
1.20 thorpej 1099: pp->pr_nitems++;
1100: pp->pr_nout--;
1.3 pk 1101:
1102: /* Cancel "pool empty" condition if it exists */
1103: if (pp->pr_curpage == NULL)
1104: pp->pr_curpage = ph;
1105:
1106: if (pp->pr_flags & PR_WANTED) {
1107: pp->pr_flags &= ~PR_WANTED;
1.15 pk 1108: if (ph->ph_nmissing == 0)
1109: pp->pr_nidle++;
1.3 pk 1110: wakeup((caddr_t)pp);
1111: return;
1112: }
1113:
1114: /*
1.88 chs 1115: * If this page is now empty, do one of two things:
1.21 thorpej 1116: *
1.88 chs 1117: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1118: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1119: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1120: * CLAIM.
1.21 thorpej 1121: *
1.88 chs 1122: * (2) Otherwise, move the page to the empty page list.
1123: *
1124: * Either way, select a new current page (so we use a partially-full
1125: * page if one is available).
1.3 pk 1126: */
1127: if (ph->ph_nmissing == 0) {
1.6 thorpej 1128: pp->pr_nidle++;
1.90 thorpej 1129: if (pp->pr_npages > pp->pr_minpages &&
1130: (pp->pr_npages > pp->pr_maxpages ||
1131: (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
1.101 thorpej 1132: pr_rmpage(pp, ph, pq);
1.3 pk 1133: } else {
1.88 chs 1134: LIST_REMOVE(ph, ph_pagelist);
1135: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1136:
1.21 thorpej 1137: /*
1138: * Update the timestamp on the page. A page must
1139: * be idle for some period of time before it can
1140: * be reclaimed by the pagedaemon. This minimizes
1141: * ping-pong'ing for memory.
1142: */
1143: s = splclock();
1144: ph->ph_time = mono_time;
1145: splx(s);
1.1 pk 1146: }
1.88 chs 1147: pool_update_curpage(pp);
1.1 pk 1148: }
1.88 chs 1149:
1.21 thorpej 1150: /*
1.88 chs 1151: * If the page was previously completely full, move it to the
1152: * partially-full list and make it the current page. The next
1153: * allocation will get the item from this page, instead of
1154: * further fragmenting the pool.
1.21 thorpej 1155: */
1156: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1157: LIST_REMOVE(ph, ph_pagelist);
1158: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1159: pp->pr_curpage = ph;
1160: }
1.43 thorpej 1161: }
1162:
1163: /*
1164: * Return resource to the pool; must be called at appropriate spl level
1165: */
1.59 thorpej 1166: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1167: void
1168: _pool_put(struct pool *pp, void *v, const char *file, long line)
1169: {
1.101 thorpej 1170: struct pool_pagelist pq;
1171:
1172: LIST_INIT(&pq);
1.43 thorpej 1173:
1174: simple_lock(&pp->pr_slock);
1175: pr_enter(pp, file, line);
1176:
1.56 sommerfe 1177: pr_log(pp, v, PRLOG_PUT, file, line);
1178:
1.101 thorpej 1179: pool_do_put(pp, v, &pq);
1.21 thorpej 1180:
1.25 thorpej 1181: pr_leave(pp);
1.21 thorpej 1182: simple_unlock(&pp->pr_slock);
1.101 thorpej 1183:
1.102 chs 1184: pr_pagelist_free(pp, &pq);
1.1 pk 1185: }
1.57 sommerfe 1186: #undef pool_put
1.59 thorpej 1187: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1188:
1.56 sommerfe 1189: void
1190: pool_put(struct pool *pp, void *v)
1191: {
1.101 thorpej 1192: struct pool_pagelist pq;
1193:
1194: LIST_INIT(&pq);
1.56 sommerfe 1195:
1196: simple_lock(&pp->pr_slock);
1.101 thorpej 1197: pool_do_put(pp, v, &pq);
1198: simple_unlock(&pp->pr_slock);
1.56 sommerfe 1199:
1.102 chs 1200: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1201: }
1.57 sommerfe 1202:
1.59 thorpej 1203: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1204: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1205: #endif
1.74 thorpej 1206:
1207: /*
1.113 yamt 1208: * pool_grow: grow a pool by a page.
1209: *
1210: * => called with pool locked.
1211: * => unlock and relock the pool.
1212: * => return with pool locked.
1213: */
1214:
1215: static int
1216: pool_grow(struct pool *pp, int flags)
1217: {
1218: struct pool_item_header *ph = NULL;
1219: char *cp;
1220:
1221: simple_unlock(&pp->pr_slock);
1222: cp = pool_allocator_alloc(pp, flags);
1223: if (__predict_true(cp != NULL)) {
1224: ph = pool_alloc_item_header(pp, cp, flags);
1225: }
1226: if (__predict_false(cp == NULL || ph == NULL)) {
1227: if (cp != NULL) {
1228: pool_allocator_free(pp, cp);
1229: }
1230: simple_lock(&pp->pr_slock);
1231: return ENOMEM;
1232: }
1233:
1234: simple_lock(&pp->pr_slock);
1235: pool_prime_page(pp, cp, ph);
1236: pp->pr_npagealloc++;
1237: return 0;
1238: }
1239:
1240: /*
1.74 thorpej 1241: * Add N items to the pool.
1242: */
1243: int
1244: pool_prime(struct pool *pp, int n)
1245: {
1.75 simonb 1246: int newpages;
1.113 yamt 1247: int error = 0;
1.74 thorpej 1248:
1249: simple_lock(&pp->pr_slock);
1250:
1251: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1252:
1253: while (newpages-- > 0) {
1.113 yamt 1254: error = pool_grow(pp, PR_NOWAIT);
1255: if (error) {
1.74 thorpej 1256: break;
1257: }
1258: pp->pr_minpages++;
1259: }
1260:
1261: if (pp->pr_minpages >= pp->pr_maxpages)
1262: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1263:
1264: simple_unlock(&pp->pr_slock);
1.113 yamt 1265: return error;
1.74 thorpej 1266: }
1.55 thorpej 1267:
1268: /*
1.3 pk 1269: * Add a page worth of items to the pool.
1.21 thorpej 1270: *
1271: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1272: */
1.55 thorpej 1273: static void
1274: pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1.3 pk 1275: {
1276: struct pool_item *pi;
1277: caddr_t cp = storage;
1278: unsigned int align = pp->pr_align;
1279: unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1280: int n;
1.89 yamt 1281: int s;
1.36 pk 1282:
1.91 yamt 1283: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1284:
1.66 thorpej 1285: #ifdef DIAGNOSTIC
1286: if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1287: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1288: #endif
1.3 pk 1289:
1290: /*
1291: * Insert page header.
1292: */
1.88 chs 1293: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1294: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1295: ph->ph_page = storage;
1296: ph->ph_nmissing = 0;
1.89 yamt 1297: s = splclock();
1298: ph->ph_time = mono_time;
1299: splx(s);
1.88 chs 1300: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1301: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1302:
1.6 thorpej 1303: pp->pr_nidle++;
1304:
1.3 pk 1305: /*
1306: * Color this page.
1307: */
1308: cp = (caddr_t)(cp + pp->pr_curcolor);
1309: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1310: pp->pr_curcolor = 0;
1311:
1312: /*
1313: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1314: */
1315: if (ioff != 0)
1316: cp = (caddr_t)(cp + (align - ioff));
1317:
1318: /*
1319: * Insert remaining chunks on the bucket list.
1320: */
1321: n = pp->pr_itemsperpage;
1.20 thorpej 1322: pp->pr_nitems += n;
1.3 pk 1323:
1.97 yamt 1324: if (pp->pr_roflags & PR_NOTOUCH) {
1.99 yamt 1325: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 1326: int i;
1327:
1.99 yamt 1328: ph->ph_off = cp - storage;
1.97 yamt 1329: ph->ph_firstfree = 0;
1330: for (i = 0; i < n - 1; i++)
1331: freelist[i] = i + 1;
1332: freelist[n - 1] = PR_INDEX_EOL;
1333: } else {
1334: while (n--) {
1335: pi = (struct pool_item *)cp;
1.78 thorpej 1336:
1.97 yamt 1337: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1338:
1.97 yamt 1339: /* Insert on page list */
1.102 chs 1340: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1341: #ifdef DIAGNOSTIC
1.97 yamt 1342: pi->pi_magic = PI_MAGIC;
1.3 pk 1343: #endif
1.97 yamt 1344: cp = (caddr_t)(cp + pp->pr_size);
1345: }
1.3 pk 1346: }
1347:
1348: /*
1349: * If the pool was depleted, point at the new page.
1350: */
1351: if (pp->pr_curpage == NULL)
1352: pp->pr_curpage = ph;
1353:
1354: if (++pp->pr_npages > pp->pr_hiwat)
1355: pp->pr_hiwat = pp->pr_npages;
1356: }
1357:
1.20 thorpej 1358: /*
1.52 thorpej 1359: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1360: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1361: *
1.21 thorpej 1362: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1363: *
1.73 thorpej 1364: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1365: * with it locked.
1366: */
1367: static int
1.42 thorpej 1368: pool_catchup(struct pool *pp)
1.20 thorpej 1369: {
1370: int error = 0;
1371:
1.54 thorpej 1372: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1373: error = pool_grow(pp, PR_NOWAIT);
1374: if (error) {
1.20 thorpej 1375: break;
1376: }
1377: }
1.113 yamt 1378: return error;
1.20 thorpej 1379: }
1380:
1.88 chs 1381: static void
1382: pool_update_curpage(struct pool *pp)
1383: {
1384:
1385: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1386: if (pp->pr_curpage == NULL) {
1387: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1388: }
1389: }
1390:
1.3 pk 1391: void
1.42 thorpej 1392: pool_setlowat(struct pool *pp, int n)
1.3 pk 1393: {
1.15 pk 1394:
1.21 thorpej 1395: simple_lock(&pp->pr_slock);
1396:
1.3 pk 1397: pp->pr_minitems = n;
1.15 pk 1398: pp->pr_minpages = (n == 0)
1399: ? 0
1.18 thorpej 1400: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1401:
1402: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1403: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1404: /*
1405: * XXX: Should we log a warning? Should we set up a timeout
1406: * to try again in a second or so? The latter could break
1407: * a caller's assumptions about interrupt protection, etc.
1408: */
1409: }
1.21 thorpej 1410:
1411: simple_unlock(&pp->pr_slock);
1.3 pk 1412: }
1413:
1414: void
1.42 thorpej 1415: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1416: {
1.15 pk 1417:
1.21 thorpej 1418: simple_lock(&pp->pr_slock);
1419:
1.15 pk 1420: pp->pr_maxpages = (n == 0)
1421: ? 0
1.18 thorpej 1422: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1423:
1424: simple_unlock(&pp->pr_slock);
1.3 pk 1425: }
1426:
1.20 thorpej 1427: void
1.42 thorpej 1428: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1429: {
1430:
1.21 thorpej 1431: simple_lock(&pp->pr_slock);
1.20 thorpej 1432:
1433: pp->pr_hardlimit = n;
1434: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1435: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1436: pp->pr_hardlimit_warning_last.tv_sec = 0;
1437: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1438:
1439: /*
1.21 thorpej 1440: * In-line version of pool_sethiwat(), because we don't want to
1441: * release the lock.
1.20 thorpej 1442: */
1443: pp->pr_maxpages = (n == 0)
1444: ? 0
1445: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1446:
1447: simple_unlock(&pp->pr_slock);
1.20 thorpej 1448: }
1.3 pk 1449:
1450: /*
1451: * Release all complete pages that have not been used recently.
1452: */
1.66 thorpej 1453: int
1.59 thorpej 1454: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1455: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1456: #else
1457: pool_reclaim(struct pool *pp)
1458: #endif
1.3 pk 1459: {
1460: struct pool_item_header *ph, *phnext;
1.43 thorpej 1461: struct pool_cache *pc;
1.61 chs 1462: struct pool_pagelist pq;
1.102 chs 1463: struct pool_cache_grouplist pcgl;
1464: struct timeval curtime, diff;
1.21 thorpej 1465: int s;
1.3 pk 1466:
1.68 thorpej 1467: if (pp->pr_drain_hook != NULL) {
1468: /*
1469: * The drain hook must be called with the pool unlocked.
1470: */
1471: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1472: }
1473:
1.21 thorpej 1474: if (simple_lock_try(&pp->pr_slock) == 0)
1.66 thorpej 1475: return (0);
1.25 thorpej 1476: pr_enter(pp, file, line);
1.68 thorpej 1477:
1.88 chs 1478: LIST_INIT(&pq);
1.102 chs 1479: LIST_INIT(&pcgl);
1.3 pk 1480:
1.43 thorpej 1481: /*
1482: * Reclaim items from the pool's caches.
1483: */
1.102 chs 1484: LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1485: pool_cache_reclaim(pc, &pq, &pcgl);
1.43 thorpej 1486:
1.21 thorpej 1487: s = splclock();
1488: curtime = mono_time;
1489: splx(s);
1490:
1.88 chs 1491: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1492: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1493:
1494: /* Check our minimum page claim */
1495: if (pp->pr_npages <= pp->pr_minpages)
1496: break;
1497:
1.88 chs 1498: KASSERT(ph->ph_nmissing == 0);
1499: timersub(&curtime, &ph->ph_time, &diff);
1500: if (diff.tv_sec < pool_inactive_time)
1501: continue;
1.21 thorpej 1502:
1.88 chs 1503: /*
1504: * If freeing this page would put us below
1505: * the low water mark, stop now.
1506: */
1507: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1508: pp->pr_minitems)
1509: break;
1.21 thorpej 1510:
1.88 chs 1511: pr_rmpage(pp, ph, &pq);
1.3 pk 1512: }
1513:
1.25 thorpej 1514: pr_leave(pp);
1.21 thorpej 1515: simple_unlock(&pp->pr_slock);
1.102 chs 1516: if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl))
1517: return 0;
1.66 thorpej 1518:
1.101 thorpej 1519: pr_pagelist_free(pp, &pq);
1.102 chs 1520: pcg_grouplist_free(&pcgl);
1.66 thorpej 1521: return (1);
1.3 pk 1522: }
1523:
1524: /*
1525: * Drain pools, one at a time.
1.21 thorpej 1526: *
1527: * Note, we must never be called from an interrupt context.
1.3 pk 1528: */
1529: void
1.42 thorpej 1530: pool_drain(void *arg)
1.3 pk 1531: {
1532: struct pool *pp;
1.23 thorpej 1533: int s;
1.3 pk 1534:
1.61 chs 1535: pp = NULL;
1.49 thorpej 1536: s = splvm();
1.23 thorpej 1537: simple_lock(&pool_head_slock);
1.61 chs 1538: if (drainpp == NULL) {
1.102 chs 1539: drainpp = LIST_FIRST(&pool_head);
1.61 chs 1540: }
1541: if (drainpp) {
1542: pp = drainpp;
1.102 chs 1543: drainpp = LIST_NEXT(pp, pr_poollist);
1.61 chs 1544: }
1545: simple_unlock(&pool_head_slock);
1.115 christos 1546: if (pp)
1547: pool_reclaim(pp);
1.61 chs 1548: splx(s);
1.3 pk 1549: }
1550:
1551: /*
1552: * Diagnostic helpers.
1553: */
1554: void
1.42 thorpej 1555: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1556: {
1557: int s;
1558:
1.49 thorpej 1559: s = splvm();
1.25 thorpej 1560: if (simple_lock_try(&pp->pr_slock) == 0) {
1561: printf("pool %s is locked; try again later\n",
1562: pp->pr_wchan);
1563: splx(s);
1564: return;
1565: }
1566: pool_print1(pp, modif, printf);
1.21 thorpej 1567: simple_unlock(&pp->pr_slock);
1568: splx(s);
1569: }
1570:
1.25 thorpej 1571: void
1.108 yamt 1572: pool_printall(const char *modif, void (*pr)(const char *, ...))
1573: {
1574: struct pool *pp;
1575:
1576: if (simple_lock_try(&pool_head_slock) == 0) {
1577: (*pr)("WARNING: pool_head_slock is locked\n");
1578: } else {
1579: simple_unlock(&pool_head_slock);
1580: }
1581:
1582: LIST_FOREACH(pp, &pool_head, pr_poollist) {
1583: pool_printit(pp, modif, pr);
1584: }
1585: }
1586:
1587: void
1.42 thorpej 1588: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1589: {
1590:
1591: if (pp == NULL) {
1592: (*pr)("Must specify a pool to print.\n");
1593: return;
1594: }
1595:
1596: /*
1597: * Called from DDB; interrupts should be blocked, and all
1598: * other processors should be paused. We can skip locking
1599: * the pool in this case.
1600: *
1601: * We do a simple_lock_try() just to print the lock
1602: * status, however.
1603: */
1604:
1605: if (simple_lock_try(&pp->pr_slock) == 0)
1606: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1607: else
1.107 yamt 1608: simple_unlock(&pp->pr_slock);
1.25 thorpej 1609:
1610: pool_print1(pp, modif, pr);
1611: }
1612:
1.21 thorpej 1613: static void
1.97 yamt 1614: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1615: void (*pr)(const char *, ...))
1.88 chs 1616: {
1617: struct pool_item_header *ph;
1618: #ifdef DIAGNOSTIC
1619: struct pool_item *pi;
1620: #endif
1621:
1622: LIST_FOREACH(ph, pl, ph_pagelist) {
1623: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1624: ph->ph_page, ph->ph_nmissing,
1625: (u_long)ph->ph_time.tv_sec,
1626: (u_long)ph->ph_time.tv_usec);
1627: #ifdef DIAGNOSTIC
1.97 yamt 1628: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1629: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1630: if (pi->pi_magic != PI_MAGIC) {
1631: (*pr)("\t\t\titem %p, magic 0x%x\n",
1632: pi, pi->pi_magic);
1633: }
1.88 chs 1634: }
1635: }
1636: #endif
1637: }
1638: }
1639:
1640: static void
1.42 thorpej 1641: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1642: {
1.25 thorpej 1643: struct pool_item_header *ph;
1.44 thorpej 1644: struct pool_cache *pc;
1645: struct pool_cache_group *pcg;
1646: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1647: char c;
1648:
1649: while ((c = *modif++) != '\0') {
1650: if (c == 'l')
1651: print_log = 1;
1652: if (c == 'p')
1653: print_pagelist = 1;
1.44 thorpej 1654: if (c == 'c')
1655: print_cache = 1;
1.25 thorpej 1656: }
1657:
1658: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1659: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1660: pp->pr_roflags);
1.66 thorpej 1661: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1662: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1663: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1664: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1665: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1666:
1667: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1668: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1669: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1670: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1671:
1672: if (print_pagelist == 0)
1673: goto skip_pagelist;
1674:
1.88 chs 1675: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1676: (*pr)("\n\tempty page list:\n");
1.97 yamt 1677: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1678: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1679: (*pr)("\n\tfull page list:\n");
1.97 yamt 1680: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1681: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1682: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1683: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1684:
1.25 thorpej 1685: if (pp->pr_curpage == NULL)
1686: (*pr)("\tno current page\n");
1687: else
1688: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1689:
1690: skip_pagelist:
1691: if (print_log == 0)
1692: goto skip_log;
1693:
1694: (*pr)("\n");
1695: if ((pp->pr_roflags & PR_LOGGING) == 0)
1696: (*pr)("\tno log\n");
1697: else
1698: pr_printlog(pp, NULL, pr);
1.3 pk 1699:
1.25 thorpej 1700: skip_log:
1.44 thorpej 1701: if (print_cache == 0)
1702: goto skip_cache;
1703:
1.102 chs 1704: #define PR_GROUPLIST(pcg) \
1705: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1706: for (i = 0; i < PCG_NOBJECTS; i++) { \
1707: if (pcg->pcg_objects[i].pcgo_pa != \
1708: POOL_PADDR_INVALID) { \
1709: (*pr)("\t\t\t%p, 0x%llx\n", \
1710: pcg->pcg_objects[i].pcgo_va, \
1711: (unsigned long long) \
1712: pcg->pcg_objects[i].pcgo_pa); \
1713: } else { \
1714: (*pr)("\t\t\t%p\n", \
1715: pcg->pcg_objects[i].pcgo_va); \
1716: } \
1717: }
1718:
1719: LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1.103 chs 1720: (*pr)("\tcache %p\n", pc);
1.48 thorpej 1721: (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1722: pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1.102 chs 1723: (*pr)("\t full groups:\n");
1.103 chs 1724: LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) {
1.102 chs 1725: PR_GROUPLIST(pcg);
1.103 chs 1726: }
1.102 chs 1727: (*pr)("\t partial groups:\n");
1.103 chs 1728: LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) {
1.102 chs 1729: PR_GROUPLIST(pcg);
1.103 chs 1730: }
1.102 chs 1731: (*pr)("\t empty groups:\n");
1.103 chs 1732: LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) {
1.102 chs 1733: PR_GROUPLIST(pcg);
1.103 chs 1734: }
1.44 thorpej 1735: }
1.102 chs 1736: #undef PR_GROUPLIST
1.44 thorpej 1737:
1738: skip_cache:
1.88 chs 1739: pr_enter_check(pp, pr);
1740: }
1741:
1742: static int
1743: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1744: {
1745: struct pool_item *pi;
1746: caddr_t page;
1747: int n;
1748:
1749: page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1750: if (page != ph->ph_page &&
1751: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1752: if (label != NULL)
1753: printf("%s: ", label);
1754: printf("pool(%p:%s): page inconsistency: page %p;"
1755: " at page head addr %p (p %p)\n", pp,
1756: pp->pr_wchan, ph->ph_page,
1757: ph, page);
1758: return 1;
1759: }
1.3 pk 1760:
1.97 yamt 1761: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1762: return 0;
1763:
1.102 chs 1764: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1765: pi != NULL;
1.102 chs 1766: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1767:
1768: #ifdef DIAGNOSTIC
1769: if (pi->pi_magic != PI_MAGIC) {
1770: if (label != NULL)
1771: printf("%s: ", label);
1772: printf("pool(%s): free list modified: magic=%x;"
1773: " page %p; item ordinal %d;"
1774: " addr %p (p %p)\n",
1775: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1776: n, pi, page);
1777: panic("pool");
1778: }
1779: #endif
1780: page =
1781: (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1782: if (page == ph->ph_page)
1783: continue;
1784:
1785: if (label != NULL)
1786: printf("%s: ", label);
1787: printf("pool(%p:%s): page inconsistency: page %p;"
1788: " item ordinal %d; addr %p (p %p)\n", pp,
1789: pp->pr_wchan, ph->ph_page,
1790: n, pi, page);
1791: return 1;
1792: }
1793: return 0;
1.3 pk 1794: }
1795:
1.88 chs 1796:
1.3 pk 1797: int
1.42 thorpej 1798: pool_chk(struct pool *pp, const char *label)
1.3 pk 1799: {
1800: struct pool_item_header *ph;
1801: int r = 0;
1802:
1.21 thorpej 1803: simple_lock(&pp->pr_slock);
1.88 chs 1804: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1805: r = pool_chk_page(pp, label, ph);
1806: if (r) {
1807: goto out;
1808: }
1809: }
1810: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1811: r = pool_chk_page(pp, label, ph);
1812: if (r) {
1.3 pk 1813: goto out;
1814: }
1.88 chs 1815: }
1816: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1817: r = pool_chk_page(pp, label, ph);
1818: if (r) {
1.3 pk 1819: goto out;
1820: }
1821: }
1.88 chs 1822:
1.3 pk 1823: out:
1.21 thorpej 1824: simple_unlock(&pp->pr_slock);
1.3 pk 1825: return (r);
1.43 thorpej 1826: }
1827:
1828: /*
1829: * pool_cache_init:
1830: *
1831: * Initialize a pool cache.
1832: *
1833: * NOTE: If the pool must be protected from interrupts, we expect
1834: * to be called at the appropriate interrupt priority level.
1835: */
1836: void
1837: pool_cache_init(struct pool_cache *pc, struct pool *pp,
1838: int (*ctor)(void *, void *, int),
1839: void (*dtor)(void *, void *),
1840: void *arg)
1841: {
1842:
1.102 chs 1843: LIST_INIT(&pc->pc_emptygroups);
1844: LIST_INIT(&pc->pc_fullgroups);
1845: LIST_INIT(&pc->pc_partgroups);
1.43 thorpej 1846: simple_lock_init(&pc->pc_slock);
1847:
1848: pc->pc_pool = pp;
1849:
1850: pc->pc_ctor = ctor;
1851: pc->pc_dtor = dtor;
1852: pc->pc_arg = arg;
1853:
1.48 thorpej 1854: pc->pc_hits = 0;
1855: pc->pc_misses = 0;
1856:
1857: pc->pc_ngroups = 0;
1858:
1859: pc->pc_nitems = 0;
1860:
1.43 thorpej 1861: simple_lock(&pp->pr_slock);
1.102 chs 1862: LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist);
1.43 thorpej 1863: simple_unlock(&pp->pr_slock);
1864: }
1865:
1866: /*
1867: * pool_cache_destroy:
1868: *
1869: * Destroy a pool cache.
1870: */
1871: void
1872: pool_cache_destroy(struct pool_cache *pc)
1873: {
1874: struct pool *pp = pc->pc_pool;
1875:
1876: /* First, invalidate the entire cache. */
1877: pool_cache_invalidate(pc);
1878:
1879: /* ...and remove it from the pool's cache list. */
1880: simple_lock(&pp->pr_slock);
1.102 chs 1881: LIST_REMOVE(pc, pc_poollist);
1.43 thorpej 1882: simple_unlock(&pp->pr_slock);
1883: }
1884:
1.110 perry 1885: static inline void *
1.87 thorpej 1886: pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1.43 thorpej 1887: {
1888: void *object;
1889: u_int idx;
1890:
1891: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45 thorpej 1892: KASSERT(pcg->pcg_avail != 0);
1.43 thorpej 1893: idx = --pcg->pcg_avail;
1894:
1.87 thorpej 1895: KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1896: object = pcg->pcg_objects[idx].pcgo_va;
1897: if (pap != NULL)
1898: *pap = pcg->pcg_objects[idx].pcgo_pa;
1899: pcg->pcg_objects[idx].pcgo_va = NULL;
1.43 thorpej 1900:
1901: return (object);
1902: }
1903:
1.110 perry 1904: static inline void
1.87 thorpej 1905: pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1.43 thorpej 1906: {
1907: u_int idx;
1908:
1909: KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1910: idx = pcg->pcg_avail++;
1911:
1.87 thorpej 1912: KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1913: pcg->pcg_objects[idx].pcgo_va = object;
1914: pcg->pcg_objects[idx].pcgo_pa = pa;
1.43 thorpej 1915: }
1916:
1.102 chs 1917: static void
1918: pcg_grouplist_free(struct pool_cache_grouplist *pcgl)
1919: {
1920: struct pool_cache_group *pcg;
1921: int s;
1922:
1923: s = splvm();
1924: while ((pcg = LIST_FIRST(pcgl)) != NULL) {
1925: LIST_REMOVE(pcg, pcg_list);
1926: pool_put(&pcgpool, pcg);
1927: }
1928: splx(s);
1929: }
1930:
1.43 thorpej 1931: /*
1.87 thorpej 1932: * pool_cache_get{,_paddr}:
1.43 thorpej 1933: *
1.87 thorpej 1934: * Get an object from a pool cache (optionally returning
1935: * the physical address of the object).
1.43 thorpej 1936: */
1937: void *
1.87 thorpej 1938: pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1.43 thorpej 1939: {
1940: struct pool_cache_group *pcg;
1941: void *object;
1.58 thorpej 1942:
1943: #ifdef LOCKDEBUG
1944: if (flags & PR_WAITOK)
1945: simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1946: #endif
1.43 thorpej 1947:
1948: simple_lock(&pc->pc_slock);
1949:
1.102 chs 1950: pcg = LIST_FIRST(&pc->pc_partgroups);
1951: if (pcg == NULL) {
1952: pcg = LIST_FIRST(&pc->pc_fullgroups);
1953: if (pcg != NULL) {
1954: LIST_REMOVE(pcg, pcg_list);
1955: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 1956: }
1.102 chs 1957: }
1958: if (pcg == NULL) {
1.43 thorpej 1959:
1960: /*
1961: * No groups with any available objects. Allocate
1962: * a new object, construct it, and return it to
1963: * the caller. We will allocate a group, if necessary,
1964: * when the object is freed back to the cache.
1965: */
1.48 thorpej 1966: pc->pc_misses++;
1.43 thorpej 1967: simple_unlock(&pc->pc_slock);
1968: object = pool_get(pc->pc_pool, flags);
1969: if (object != NULL && pc->pc_ctor != NULL) {
1970: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1971: pool_put(pc->pc_pool, object);
1972: return (NULL);
1973: }
1974: }
1.87 thorpej 1975: if (object != NULL && pap != NULL) {
1976: #ifdef POOL_VTOPHYS
1977: *pap = POOL_VTOPHYS(object);
1978: #else
1979: *pap = POOL_PADDR_INVALID;
1980: #endif
1981: }
1.43 thorpej 1982: return (object);
1983: }
1984:
1.48 thorpej 1985: pc->pc_hits++;
1986: pc->pc_nitems--;
1.87 thorpej 1987: object = pcg_get(pcg, pap);
1.43 thorpej 1988:
1.102 chs 1989: if (pcg->pcg_avail == 0) {
1990: LIST_REMOVE(pcg, pcg_list);
1991: LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list);
1992: }
1.43 thorpej 1993: simple_unlock(&pc->pc_slock);
1994:
1995: return (object);
1996: }
1997:
1998: /*
1.87 thorpej 1999: * pool_cache_put{,_paddr}:
1.43 thorpej 2000: *
1.87 thorpej 2001: * Put an object back to the pool cache (optionally caching the
2002: * physical address of the object).
1.43 thorpej 2003: */
2004: void
1.87 thorpej 2005: pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1.43 thorpej 2006: {
2007: struct pool_cache_group *pcg;
1.60 thorpej 2008: int s;
1.43 thorpej 2009:
1.109 christos 2010: if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) {
2011: goto destruct;
2012: }
2013:
1.43 thorpej 2014: simple_lock(&pc->pc_slock);
2015:
1.102 chs 2016: pcg = LIST_FIRST(&pc->pc_partgroups);
2017: if (pcg == NULL) {
2018: pcg = LIST_FIRST(&pc->pc_emptygroups);
2019: if (pcg != NULL) {
2020: LIST_REMOVE(pcg, pcg_list);
2021: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 2022: }
1.102 chs 2023: }
2024: if (pcg == NULL) {
1.43 thorpej 2025:
2026: /*
2027: * No empty groups to free the object to. Attempt to
1.47 thorpej 2028: * allocate one.
1.43 thorpej 2029: */
1.47 thorpej 2030: simple_unlock(&pc->pc_slock);
1.60 thorpej 2031: s = splvm();
1.43 thorpej 2032: pcg = pool_get(&pcgpool, PR_NOWAIT);
1.60 thorpej 2033: splx(s);
1.102 chs 2034: if (pcg == NULL) {
1.109 christos 2035: destruct:
1.102 chs 2036:
2037: /*
2038: * Unable to allocate a cache group; destruct the object
2039: * and free it back to the pool.
2040: */
2041: pool_cache_destruct_object(pc, object);
2042: return;
1.43 thorpej 2043: }
1.102 chs 2044: memset(pcg, 0, sizeof(*pcg));
2045: simple_lock(&pc->pc_slock);
2046: pc->pc_ngroups++;
2047: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 2048: }
2049:
1.48 thorpej 2050: pc->pc_nitems++;
1.87 thorpej 2051: pcg_put(pcg, object, pa);
1.43 thorpej 2052:
1.102 chs 2053: if (pcg->pcg_avail == PCG_NOBJECTS) {
2054: LIST_REMOVE(pcg, pcg_list);
2055: LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list);
2056: }
1.43 thorpej 2057: simple_unlock(&pc->pc_slock);
1.51 thorpej 2058: }
2059:
2060: /*
2061: * pool_cache_destruct_object:
2062: *
2063: * Force destruction of an object and its release back into
2064: * the pool.
2065: */
2066: void
2067: pool_cache_destruct_object(struct pool_cache *pc, void *object)
2068: {
2069:
2070: if (pc->pc_dtor != NULL)
2071: (*pc->pc_dtor)(pc->pc_arg, object);
2072: pool_put(pc->pc_pool, object);
1.43 thorpej 2073: }
2074:
1.102 chs 2075: static void
1.106 christos 2076: pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl,
1.105 christos 2077: struct pool_cache *pc, struct pool_pagelist *pq,
1.106 christos 2078: struct pool_cache_grouplist *pcgdl)
1.102 chs 2079: {
1.106 christos 2080: struct pool_cache_group *pcg, *npcg;
1.102 chs 2081: void *object;
2082:
1.106 christos 2083: for (pcg = LIST_FIRST(pcgsl); pcg != NULL; pcg = npcg) {
1.102 chs 2084: npcg = LIST_NEXT(pcg, pcg_list);
2085: while (pcg->pcg_avail != 0) {
2086: pc->pc_nitems--;
2087: object = pcg_get(pcg, NULL);
2088: if (pc->pc_dtor != NULL)
2089: (*pc->pc_dtor)(pc->pc_arg, object);
2090: pool_do_put(pc->pc_pool, object, pq);
2091: }
1.103 chs 2092: pc->pc_ngroups--;
1.102 chs 2093: LIST_REMOVE(pcg, pcg_list);
1.106 christos 2094: LIST_INSERT_HEAD(pcgdl, pcg, pcg_list);
1.102 chs 2095: }
1.105 christos 2096: }
2097:
2098: static void
2099: pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq,
2100: struct pool_cache_grouplist *pcgl)
2101: {
2102:
2103: LOCK_ASSERT(simple_lock_held(&pc->pc_slock));
2104: LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock));
2105:
1.106 christos 2106: pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl);
2107: pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl);
1.103 chs 2108:
2109: KASSERT(LIST_EMPTY(&pc->pc_partgroups));
2110: KASSERT(LIST_EMPTY(&pc->pc_fullgroups));
2111: KASSERT(pc->pc_nitems == 0);
1.102 chs 2112: }
2113:
1.43 thorpej 2114: /*
1.101 thorpej 2115: * pool_cache_invalidate:
1.43 thorpej 2116: *
1.101 thorpej 2117: * Invalidate a pool cache (destruct and release all of the
2118: * cached objects).
1.43 thorpej 2119: */
1.101 thorpej 2120: void
2121: pool_cache_invalidate(struct pool_cache *pc)
1.43 thorpej 2122: {
1.101 thorpej 2123: struct pool_pagelist pq;
1.102 chs 2124: struct pool_cache_grouplist pcgl;
1.101 thorpej 2125:
2126: LIST_INIT(&pq);
1.102 chs 2127: LIST_INIT(&pcgl);
1.101 thorpej 2128:
2129: simple_lock(&pc->pc_slock);
2130: simple_lock(&pc->pc_pool->pr_slock);
1.43 thorpej 2131:
1.102 chs 2132: pool_do_cache_invalidate(pc, &pq, &pcgl);
1.43 thorpej 2133:
1.101 thorpej 2134: simple_unlock(&pc->pc_pool->pr_slock);
2135: simple_unlock(&pc->pc_slock);
1.43 thorpej 2136:
1.102 chs 2137: pr_pagelist_free(pc->pc_pool, &pq);
2138: pcg_grouplist_free(&pcgl);
1.43 thorpej 2139: }
2140:
2141: /*
2142: * pool_cache_reclaim:
2143: *
2144: * Reclaim a pool cache for pool_reclaim().
2145: */
2146: static void
1.102 chs 2147: pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq,
2148: struct pool_cache_grouplist *pcgl)
1.43 thorpej 2149: {
1.101 thorpej 2150:
2151: /*
2152: * We're locking in the wrong order (normally pool_cache -> pool,
2153: * but the pool is already locked when we get here), so we have
2154: * to use trylock. If we can't lock the pool_cache, it's not really
2155: * a big deal here.
2156: */
2157: if (simple_lock_try(&pc->pc_slock) == 0)
2158: return;
2159:
1.102 chs 2160: pool_do_cache_invalidate(pc, pq, pcgl);
1.43 thorpej 2161:
2162: simple_unlock(&pc->pc_slock);
1.3 pk 2163: }
1.66 thorpej 2164:
2165: /*
2166: * Pool backend allocators.
2167: *
2168: * Each pool has a backend allocator that handles allocation, deallocation,
2169: * and any additional draining that might be needed.
2170: *
2171: * We provide two standard allocators:
2172: *
2173: * pool_allocator_kmem - the default when no allocator is specified
2174: *
2175: * pool_allocator_nointr - used for pools that will not be accessed
2176: * in interrupt context.
2177: */
2178: void *pool_page_alloc(struct pool *, int);
2179: void pool_page_free(struct pool *, void *);
2180:
1.112 bjh21 2181: #ifdef POOL_SUBPAGE
2182: struct pool_allocator pool_allocator_kmem_fullpage = {
2183: pool_page_alloc, pool_page_free, 0,
2184: };
2185: #else
1.66 thorpej 2186: struct pool_allocator pool_allocator_kmem = {
2187: pool_page_alloc, pool_page_free, 0,
2188: };
1.112 bjh21 2189: #endif
1.66 thorpej 2190:
2191: void *pool_page_alloc_nointr(struct pool *, int);
2192: void pool_page_free_nointr(struct pool *, void *);
2193:
1.112 bjh21 2194: #ifdef POOL_SUBPAGE
2195: struct pool_allocator pool_allocator_nointr_fullpage = {
2196: pool_page_alloc_nointr, pool_page_free_nointr, 0,
2197: };
2198: #else
1.66 thorpej 2199: struct pool_allocator pool_allocator_nointr = {
2200: pool_page_alloc_nointr, pool_page_free_nointr, 0,
2201: };
1.112 bjh21 2202: #endif
1.66 thorpej 2203:
2204: #ifdef POOL_SUBPAGE
2205: void *pool_subpage_alloc(struct pool *, int);
2206: void pool_subpage_free(struct pool *, void *);
2207:
1.112 bjh21 2208: struct pool_allocator pool_allocator_kmem = {
2209: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
2210: };
2211:
2212: void *pool_subpage_alloc_nointr(struct pool *, int);
2213: void pool_subpage_free_nointr(struct pool *, void *);
2214:
2215: struct pool_allocator pool_allocator_nointr = {
2216: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.66 thorpej 2217: };
2218: #endif /* POOL_SUBPAGE */
2219:
2220: /*
2221: * We have at least three different resources for the same allocation and
2222: * each resource can be depleted. First, we have the ready elements in the
2223: * pool. Then we have the resource (typically a vm_map) for this allocator.
2224: * Finally, we have physical memory. Waiting for any of these can be
2225: * unnecessary when any other is freed, but the kernel doesn't support
2226: * sleeping on multiple wait channels, so we have to employ another strategy.
2227: *
2228: * The caller sleeps on the pool (so that it can be awakened when an item
2229: * is returned to the pool), but we set PA_WANT on the allocator. When a
2230: * page is returned to the allocator and PA_WANT is set, pool_allocator_free
2231: * will wake up all sleeping pools belonging to this allocator.
2232: *
2233: * XXX Thundering herd.
2234: */
2235: void *
2236: pool_allocator_alloc(struct pool *org, int flags)
2237: {
2238: struct pool_allocator *pa = org->pr_alloc;
2239: struct pool *pp, *start;
2240: int s, freed;
2241: void *res;
2242:
1.91 yamt 2243: LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
2244:
1.66 thorpej 2245: do {
2246: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2247: return (res);
1.68 thorpej 2248: if ((flags & PR_WAITOK) == 0) {
2249: /*
2250: * We only run the drain hookhere if PR_NOWAIT.
2251: * In other cases, the hook will be run in
2252: * pool_reclaim().
2253: */
2254: if (org->pr_drain_hook != NULL) {
2255: (*org->pr_drain_hook)(org->pr_drain_hook_arg,
2256: flags);
2257: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2258: return (res);
2259: }
1.66 thorpej 2260: break;
1.68 thorpej 2261: }
1.66 thorpej 2262:
2263: /*
1.109 christos 2264: * Drain all pools, that use this allocator.
2265: * We do this to reclaim VA space.
1.66 thorpej 2266: * pa_alloc is responsible for waiting for
2267: * physical memory.
2268: *
2269: * XXX We risk looping forever if start if someone
2270: * calls pool_destroy on "start". But there is no
2271: * other way to have potentially sleeping pool_reclaim,
2272: * non-sleeping locks on pool_allocator, and some
2273: * stirring of drained pools in the allocator.
1.68 thorpej 2274: *
2275: * XXX Maybe we should use pool_head_slock for locking
2276: * the allocators?
1.66 thorpej 2277: */
2278: freed = 0;
2279:
2280: s = splvm();
2281: simple_lock(&pa->pa_slock);
2282: pp = start = TAILQ_FIRST(&pa->pa_list);
2283: do {
2284: TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
2285: TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
1.73 thorpej 2286: simple_unlock(&pa->pa_slock);
1.66 thorpej 2287: freed = pool_reclaim(pp);
1.73 thorpej 2288: simple_lock(&pa->pa_slock);
1.66 thorpej 2289: } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
2290: freed == 0);
2291:
2292: if (freed == 0) {
2293: /*
2294: * We set PA_WANT here, the caller will most likely
2295: * sleep waiting for pages (if not, this won't hurt
2296: * that much), and there is no way to set this in
2297: * the caller without violating locking order.
2298: */
2299: pa->pa_flags |= PA_WANT;
2300: }
2301: simple_unlock(&pa->pa_slock);
2302: splx(s);
2303: } while (freed);
2304: return (NULL);
2305: }
2306:
2307: void
2308: pool_allocator_free(struct pool *pp, void *v)
2309: {
2310: struct pool_allocator *pa = pp->pr_alloc;
2311: int s;
2312:
1.91 yamt 2313: LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
2314:
1.66 thorpej 2315: (*pa->pa_free)(pp, v);
2316:
2317: s = splvm();
2318: simple_lock(&pa->pa_slock);
2319: if ((pa->pa_flags & PA_WANT) == 0) {
2320: simple_unlock(&pa->pa_slock);
2321: splx(s);
2322: return;
2323: }
2324:
2325: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2326: simple_lock(&pp->pr_slock);
2327: if ((pp->pr_flags & PR_WANTED) != 0) {
2328: pp->pr_flags &= ~PR_WANTED;
2329: wakeup(pp);
2330: }
1.69 thorpej 2331: simple_unlock(&pp->pr_slock);
1.66 thorpej 2332: }
2333: pa->pa_flags &= ~PA_WANT;
2334: simple_unlock(&pa->pa_slock);
2335: splx(s);
2336: }
2337:
2338: void *
2339: pool_page_alloc(struct pool *pp, int flags)
2340: {
2341: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2342:
1.100 yamt 2343: return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
1.66 thorpej 2344: }
2345:
2346: void
2347: pool_page_free(struct pool *pp, void *v)
2348: {
2349:
1.98 yamt 2350: uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2351: }
2352:
2353: static void *
2354: pool_page_alloc_meta(struct pool *pp, int flags)
2355: {
2356: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2357:
1.100 yamt 2358: return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
1.98 yamt 2359: }
2360:
2361: static void
2362: pool_page_free_meta(struct pool *pp, void *v)
2363: {
2364:
1.100 yamt 2365: uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
1.66 thorpej 2366: }
2367:
2368: #ifdef POOL_SUBPAGE
2369: /* Sub-page allocator, for machines with large hardware pages. */
2370: void *
2371: pool_subpage_alloc(struct pool *pp, int flags)
2372: {
1.93 dbj 2373: void *v;
2374: int s;
2375: s = splvm();
2376: v = pool_get(&psppool, flags);
2377: splx(s);
2378: return v;
1.66 thorpej 2379: }
2380:
2381: void
2382: pool_subpage_free(struct pool *pp, void *v)
2383: {
1.93 dbj 2384: int s;
2385: s = splvm();
1.66 thorpej 2386: pool_put(&psppool, v);
1.93 dbj 2387: splx(s);
1.66 thorpej 2388: }
2389:
2390: /* We don't provide a real nointr allocator. Maybe later. */
2391: void *
1.112 bjh21 2392: pool_subpage_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2393: {
2394:
2395: return (pool_subpage_alloc(pp, flags));
2396: }
2397:
2398: void
1.112 bjh21 2399: pool_subpage_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2400: {
2401:
2402: pool_subpage_free(pp, v);
2403: }
1.112 bjh21 2404: #endif /* POOL_SUBPAGE */
1.66 thorpej 2405: void *
2406: pool_page_alloc_nointr(struct pool *pp, int flags)
2407: {
2408: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2409:
1.100 yamt 2410: return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
1.66 thorpej 2411: }
2412:
2413: void
2414: pool_page_free_nointr(struct pool *pp, void *v)
2415: {
2416:
1.98 yamt 2417: uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
1.66 thorpej 2418: }
CVSweb <webmaster@jp.NetBSD.org>