Annotation of src/sys/kern/subr_pool.c, Revision 1.91
1.91 ! yamt 1: /* $NetBSD: subr_pool.c,v 1.90 2004/01/09 19:00:16 thorpej Exp $ */
1.1 pk 2:
3: /*-
1.43 thorpej 4: * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.91 ! yamt 41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.90 2004/01/09 19:00:16 thorpej Exp $");
1.24 scottr 42:
1.25 thorpej 43: #include "opt_pool.h"
1.24 scottr 44: #include "opt_poollog.h"
1.28 thorpej 45: #include "opt_lockdebug.h"
1.1 pk 46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/proc.h>
50: #include <sys/errno.h>
51: #include <sys/kernel.h>
52: #include <sys/malloc.h>
53: #include <sys/lock.h>
54: #include <sys/pool.h>
1.20 thorpej 55: #include <sys/syslog.h>
1.3 pk 56:
57: #include <uvm/uvm.h>
58:
1.1 pk 59: /*
60: * Pool resource management utility.
1.3 pk 61: *
1.88 chs 62: * Memory is allocated in pages which are split into pieces according to
63: * the pool item size. Each page is kept on one of three lists in the
64: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65: * for empty, full and partially-full pages respectively. The individual
66: * pool items are on a linked list headed by `ph_itemlist' in each page
67: * header. The memory for building the page list is either taken from
68: * the allocated pages themselves (for small pool items) or taken from
69: * an internal pool of page headers (`phpool').
1.1 pk 70: */
71:
1.3 pk 72: /* List of all pools */
1.5 thorpej 73: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.3 pk 74:
75: /* Private pool for page header structures */
76: static struct pool phpool;
77:
1.62 bjh21 78: #ifdef POOL_SUBPAGE
79: /* Pool of subpages for use by normal pools. */
80: static struct pool psppool;
81: #endif
82:
1.3 pk 83: /* # of seconds to retain page after last use */
84: int pool_inactive_time = 10;
85:
86: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 87: static struct pool *drainpp;
88:
89: /* This spin lock protects both pool_head and drainpp. */
90: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3 pk 91:
92: struct pool_item_header {
93: /* Page headers */
1.88 chs 94: LIST_ENTRY(pool_item_header)
1.3 pk 95: ph_pagelist; /* pool page list */
96: TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
1.88 chs 97: SPLAY_ENTRY(pool_item_header)
98: ph_node; /* Off-page page headers */
1.79 thorpej 99: unsigned int ph_nmissing; /* # of chunks in use */
1.3 pk 100: caddr_t ph_page; /* this page's address */
101: struct timeval ph_time; /* last referenced */
102: };
103:
1.1 pk 104: struct pool_item {
1.3 pk 105: #ifdef DIAGNOSTIC
1.82 thorpej 106: u_int pi_magic;
1.33 chs 107: #endif
1.82 thorpej 108: #define PI_MAGIC 0xdeadbeefU
1.3 pk 109: /* Other entries use only this list entry */
110: TAILQ_ENTRY(pool_item) pi_list;
111: };
112:
1.53 thorpej 113: #define POOL_NEEDS_CATCHUP(pp) \
114: ((pp)->pr_nitems < (pp)->pr_minitems)
115:
1.43 thorpej 116: /*
117: * Pool cache management.
118: *
119: * Pool caches provide a way for constructed objects to be cached by the
120: * pool subsystem. This can lead to performance improvements by avoiding
121: * needless object construction/destruction; it is deferred until absolutely
122: * necessary.
123: *
124: * Caches are grouped into cache groups. Each cache group references
125: * up to 16 constructed objects. When a cache allocates an object
126: * from the pool, it calls the object's constructor and places it into
127: * a cache group. When a cache group frees an object back to the pool,
128: * it first calls the object's destructor. This allows the object to
129: * persist in constructed form while freed to the cache.
130: *
131: * Multiple caches may exist for each pool. This allows a single
132: * object type to have multiple constructed forms. The pool references
133: * each cache, so that when a pool is drained by the pagedaemon, it can
134: * drain each individual cache as well. Each time a cache is drained,
135: * the most idle cache group is freed to the pool in its entirety.
136: *
137: * Pool caches are layed on top of pools. By layering them, we can avoid
138: * the complexity of cache management for pools which would not benefit
139: * from it.
140: */
141:
142: /* The cache group pool. */
143: static struct pool pcgpool;
1.3 pk 144:
1.43 thorpej 145: static void pool_cache_reclaim(struct pool_cache *);
1.3 pk 146:
1.42 thorpej 147: static int pool_catchup(struct pool *);
1.55 thorpej 148: static void pool_prime_page(struct pool *, caddr_t,
149: struct pool_item_header *);
1.88 chs 150: static void pool_update_curpage(struct pool *);
1.66 thorpej 151:
152: void *pool_allocator_alloc(struct pool *, int);
153: void pool_allocator_free(struct pool *, void *);
1.3 pk 154:
1.88 chs 155: static void pool_print_pagelist(struct pool_pagelist *,
156: void (*)(const char *, ...));
1.42 thorpej 157: static void pool_print1(struct pool *, const char *,
158: void (*)(const char *, ...));
1.3 pk 159:
1.88 chs 160: static int pool_chk_page(struct pool *, const char *,
161: struct pool_item_header *);
162:
1.3 pk 163: /*
1.52 thorpej 164: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 165: */
166: struct pool_log {
167: const char *pl_file;
168: long pl_line;
169: int pl_action;
1.25 thorpej 170: #define PRLOG_GET 1
171: #define PRLOG_PUT 2
1.3 pk 172: void *pl_addr;
1.1 pk 173: };
174:
1.86 matt 175: #ifdef POOL_DIAGNOSTIC
1.3 pk 176: /* Number of entries in pool log buffers */
1.17 thorpej 177: #ifndef POOL_LOGSIZE
178: #define POOL_LOGSIZE 10
179: #endif
180:
181: int pool_logsize = POOL_LOGSIZE;
1.1 pk 182:
1.42 thorpej 183: static __inline void
184: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 185: {
186: int n = pp->pr_curlogentry;
187: struct pool_log *pl;
188:
1.20 thorpej 189: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 190: return;
191:
192: /*
193: * Fill in the current entry. Wrap around and overwrite
194: * the oldest entry if necessary.
195: */
196: pl = &pp->pr_log[n];
197: pl->pl_file = file;
198: pl->pl_line = line;
199: pl->pl_action = action;
200: pl->pl_addr = v;
201: if (++n >= pp->pr_logsize)
202: n = 0;
203: pp->pr_curlogentry = n;
204: }
205:
206: static void
1.42 thorpej 207: pr_printlog(struct pool *pp, struct pool_item *pi,
208: void (*pr)(const char *, ...))
1.3 pk 209: {
210: int i = pp->pr_logsize;
211: int n = pp->pr_curlogentry;
212:
1.20 thorpej 213: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 214: return;
215:
216: /*
217: * Print all entries in this pool's log.
218: */
219: while (i-- > 0) {
220: struct pool_log *pl = &pp->pr_log[n];
221: if (pl->pl_action != 0) {
1.25 thorpej 222: if (pi == NULL || pi == pl->pl_addr) {
223: (*pr)("\tlog entry %d:\n", i);
224: (*pr)("\t\taction = %s, addr = %p\n",
225: pl->pl_action == PRLOG_GET ? "get" : "put",
226: pl->pl_addr);
227: (*pr)("\t\tfile: %s at line %lu\n",
228: pl->pl_file, pl->pl_line);
229: }
1.3 pk 230: }
231: if (++n >= pp->pr_logsize)
232: n = 0;
233: }
234: }
1.25 thorpej 235:
1.42 thorpej 236: static __inline void
237: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 238: {
239:
1.34 thorpej 240: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 241: printf("pool %s: reentrancy at file %s line %ld\n",
242: pp->pr_wchan, file, line);
243: printf(" previous entry at file %s line %ld\n",
244: pp->pr_entered_file, pp->pr_entered_line);
245: panic("pr_enter");
246: }
247:
248: pp->pr_entered_file = file;
249: pp->pr_entered_line = line;
250: }
251:
1.42 thorpej 252: static __inline void
253: pr_leave(struct pool *pp)
1.25 thorpej 254: {
255:
1.34 thorpej 256: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 257: printf("pool %s not entered?\n", pp->pr_wchan);
258: panic("pr_leave");
259: }
260:
261: pp->pr_entered_file = NULL;
262: pp->pr_entered_line = 0;
263: }
264:
1.42 thorpej 265: static __inline void
266: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 267: {
268:
269: if (pp->pr_entered_file != NULL)
270: (*pr)("\n\tcurrently entered from file %s line %ld\n",
271: pp->pr_entered_file, pp->pr_entered_line);
272: }
1.3 pk 273: #else
1.25 thorpej 274: #define pr_log(pp, v, action, file, line)
275: #define pr_printlog(pp, pi, pr)
276: #define pr_enter(pp, file, line)
277: #define pr_leave(pp)
278: #define pr_enter_check(pp, pr)
1.59 thorpej 279: #endif /* POOL_DIAGNOSTIC */
1.3 pk 280:
1.88 chs 281: static __inline int
282: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
283: {
284: if (a->ph_page < b->ph_page)
285: return (-1);
286: else if (a->ph_page > b->ph_page)
287: return (1);
288: else
289: return (0);
290: }
291:
292: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
293: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
294:
1.3 pk 295: /*
296: * Return the pool page header based on page address.
297: */
1.42 thorpej 298: static __inline struct pool_item_header *
299: pr_find_pagehead(struct pool *pp, caddr_t page)
1.3 pk 300: {
1.88 chs 301: struct pool_item_header *ph, tmp;
1.3 pk 302:
1.20 thorpej 303: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3 pk 304: return ((struct pool_item_header *)(page + pp->pr_phoffset));
305:
1.88 chs 306: tmp.ph_page = page;
307: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
308: return ph;
1.3 pk 309: }
310:
311: /*
312: * Remove a page from the pool.
313: */
1.42 thorpej 314: static __inline void
1.61 chs 315: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
316: struct pool_pagelist *pq)
1.3 pk 317: {
1.61 chs 318: int s;
1.3 pk 319:
1.91 ! yamt 320: LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL);
! 321:
1.3 pk 322: /*
1.7 thorpej 323: * If the page was idle, decrement the idle page count.
1.3 pk 324: */
1.6 thorpej 325: if (ph->ph_nmissing == 0) {
326: #ifdef DIAGNOSTIC
327: if (pp->pr_nidle == 0)
328: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 329: if (pp->pr_nitems < pp->pr_itemsperpage)
330: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 331: #endif
332: pp->pr_nidle--;
333: }
1.7 thorpej 334:
1.20 thorpej 335: pp->pr_nitems -= pp->pr_itemsperpage;
336:
1.7 thorpej 337: /*
1.61 chs 338: * Unlink a page from the pool and release it (or queue it for release).
1.7 thorpej 339: */
1.88 chs 340: LIST_REMOVE(ph, ph_pagelist);
1.91 ! yamt 341: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
! 342: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.61 chs 343: if (pq) {
1.88 chs 344: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
1.61 chs 345: } else {
1.66 thorpej 346: pool_allocator_free(pp, ph->ph_page);
1.61 chs 347: if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
1.85 pk 348: s = splvm();
1.61 chs 349: pool_put(&phpool, ph);
350: splx(s);
351: }
352: }
1.7 thorpej 353: pp->pr_npages--;
354: pp->pr_npagefree++;
1.6 thorpej 355:
1.88 chs 356: pool_update_curpage(pp);
1.3 pk 357: }
358:
359: /*
360: * Initialize the given pool resource structure.
361: *
362: * We export this routine to allow other kernel parts to declare
363: * static pools that must be initialized before malloc() is available.
364: */
365: void
1.42 thorpej 366: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.66 thorpej 367: const char *wchan, struct pool_allocator *palloc)
1.3 pk 368: {
1.88 chs 369: int off, slack;
1.3 pk 370:
1.25 thorpej 371: #ifdef POOL_DIAGNOSTIC
372: /*
373: * Always log if POOL_DIAGNOSTIC is defined.
374: */
375: if (pool_logsize != 0)
376: flags |= PR_LOGGING;
377: #endif
378:
1.66 thorpej 379: #ifdef POOL_SUBPAGE
380: /*
381: * XXX We don't provide a real `nointr' back-end
382: * yet; all sub-pages come from a kmem back-end.
383: * maybe some day...
384: */
385: if (palloc == NULL) {
386: extern struct pool_allocator pool_allocator_kmem_subpage;
387: palloc = &pool_allocator_kmem_subpage;
388: }
1.3 pk 389: /*
1.66 thorpej 390: * We'll assume any user-specified back-end allocator
391: * will deal with sub-pages, or simply don't care.
1.3 pk 392: */
1.66 thorpej 393: #else
394: if (palloc == NULL)
395: palloc = &pool_allocator_kmem;
396: #endif /* POOL_SUBPAGE */
397: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
398: if (palloc->pa_pagesz == 0) {
1.62 bjh21 399: #ifdef POOL_SUBPAGE
1.66 thorpej 400: if (palloc == &pool_allocator_kmem)
401: palloc->pa_pagesz = PAGE_SIZE;
402: else
403: palloc->pa_pagesz = POOL_SUBPAGE;
1.62 bjh21 404: #else
1.66 thorpej 405: palloc->pa_pagesz = PAGE_SIZE;
406: #endif /* POOL_SUBPAGE */
407: }
408:
409: TAILQ_INIT(&palloc->pa_list);
410:
411: simple_lock_init(&palloc->pa_slock);
412: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
413: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
414: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 415: }
1.3 pk 416:
417: if (align == 0)
418: align = ALIGN(1);
1.14 thorpej 419:
420: if (size < sizeof(struct pool_item))
421: size = sizeof(struct pool_item);
1.3 pk 422:
1.78 thorpej 423: size = roundup(size, align);
1.66 thorpej 424: #ifdef DIAGNOSTIC
425: if (size > palloc->pa_pagesz)
1.35 pk 426: panic("pool_init: pool item size (%lu) too large",
427: (u_long)size);
1.66 thorpej 428: #endif
1.35 pk 429:
1.3 pk 430: /*
431: * Initialize the pool structure.
432: */
1.88 chs 433: LIST_INIT(&pp->pr_emptypages);
434: LIST_INIT(&pp->pr_fullpages);
435: LIST_INIT(&pp->pr_partpages);
1.43 thorpej 436: TAILQ_INIT(&pp->pr_cachelist);
1.3 pk 437: pp->pr_curpage = NULL;
438: pp->pr_npages = 0;
439: pp->pr_minitems = 0;
440: pp->pr_minpages = 0;
441: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 442: pp->pr_roflags = flags;
443: pp->pr_flags = 0;
1.35 pk 444: pp->pr_size = size;
1.3 pk 445: pp->pr_align = align;
446: pp->pr_wchan = wchan;
1.66 thorpej 447: pp->pr_alloc = palloc;
1.20 thorpej 448: pp->pr_nitems = 0;
449: pp->pr_nout = 0;
450: pp->pr_hardlimit = UINT_MAX;
451: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 452: pp->pr_hardlimit_ratecap.tv_sec = 0;
453: pp->pr_hardlimit_ratecap.tv_usec = 0;
454: pp->pr_hardlimit_warning_last.tv_sec = 0;
455: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 456: pp->pr_drain_hook = NULL;
457: pp->pr_drain_hook_arg = NULL;
1.3 pk 458:
459: /*
460: * Decide whether to put the page header off page to avoid
461: * wasting too large a part of the page. Off-page page headers
462: * go on a hash table, so we can match a returned item
463: * with its header based on the page address.
464: * We use 1/16 of the page size as the threshold (XXX: tune)
465: */
1.66 thorpej 466: if (pp->pr_size < palloc->pa_pagesz/16) {
1.3 pk 467: /* Use the end of the page for the page header */
1.20 thorpej 468: pp->pr_roflags |= PR_PHINPAGE;
1.66 thorpej 469: pp->pr_phoffset = off = palloc->pa_pagesz -
470: ALIGN(sizeof(struct pool_item_header));
1.2 pk 471: } else {
1.3 pk 472: /* The page header will be taken from our page header pool */
473: pp->pr_phoffset = 0;
1.66 thorpej 474: off = palloc->pa_pagesz;
1.88 chs 475: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 476: }
1.1 pk 477:
1.3 pk 478: /*
479: * Alignment is to take place at `ioff' within the item. This means
480: * we must reserve up to `align - 1' bytes on the page to allow
481: * appropriate positioning of each item.
482: *
483: * Silently enforce `0 <= ioff < align'.
484: */
485: pp->pr_itemoffset = ioff = ioff % align;
486: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 487: KASSERT(pp->pr_itemsperpage != 0);
1.3 pk 488:
489: /*
490: * Use the slack between the chunks and the page header
491: * for "cache coloring".
492: */
493: slack = off - pp->pr_itemsperpage * pp->pr_size;
494: pp->pr_maxcolor = (slack / align) * align;
495: pp->pr_curcolor = 0;
496:
497: pp->pr_nget = 0;
498: pp->pr_nfail = 0;
499: pp->pr_nput = 0;
500: pp->pr_npagealloc = 0;
501: pp->pr_npagefree = 0;
1.1 pk 502: pp->pr_hiwat = 0;
1.8 thorpej 503: pp->pr_nidle = 0;
1.3 pk 504:
1.59 thorpej 505: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 506: if (flags & PR_LOGGING) {
507: if (kmem_map == NULL ||
508: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
509: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 510: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 511: pp->pr_curlogentry = 0;
512: pp->pr_logsize = pool_logsize;
513: }
1.59 thorpej 514: #endif
1.25 thorpej 515:
516: pp->pr_entered_file = NULL;
517: pp->pr_entered_line = 0;
1.3 pk 518:
1.21 thorpej 519: simple_lock_init(&pp->pr_slock);
1.1 pk 520:
1.3 pk 521: /*
1.43 thorpej 522: * Initialize private page header pool and cache magazine pool if we
523: * haven't done so yet.
1.23 thorpej 524: * XXX LOCKING.
1.3 pk 525: */
526: if (phpool.pr_size == 0) {
1.62 bjh21 527: #ifdef POOL_SUBPAGE
528: pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
1.66 thorpej 529: "phpool", &pool_allocator_kmem);
1.62 bjh21 530: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.66 thorpej 531: PR_RECURSIVE, "psppool", &pool_allocator_kmem);
1.62 bjh21 532: #else
1.3 pk 533: pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
1.66 thorpej 534: 0, "phpool", NULL);
1.62 bjh21 535: #endif
1.43 thorpej 536: pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
1.66 thorpej 537: 0, "pcgpool", NULL);
1.1 pk 538: }
539:
1.23 thorpej 540: /* Insert into the list of all pools. */
541: simple_lock(&pool_head_slock);
542: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
543: simple_unlock(&pool_head_slock);
1.66 thorpej 544:
545: /* Insert this into the list of pools using this allocator. */
546: simple_lock(&palloc->pa_slock);
547: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
548: simple_unlock(&palloc->pa_slock);
1.1 pk 549: }
550:
551: /*
552: * De-commision a pool resource.
553: */
554: void
1.42 thorpej 555: pool_destroy(struct pool *pp)
1.1 pk 556: {
1.3 pk 557: struct pool_item_header *ph;
1.43 thorpej 558: struct pool_cache *pc;
559:
1.66 thorpej 560: /* Locking order: pool_allocator -> pool */
561: simple_lock(&pp->pr_alloc->pa_slock);
562: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
563: simple_unlock(&pp->pr_alloc->pa_slock);
564:
1.43 thorpej 565: /* Destroy all caches for this pool. */
566: while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
567: pool_cache_destroy(pc);
1.3 pk 568:
569: #ifdef DIAGNOSTIC
1.20 thorpej 570: if (pp->pr_nout != 0) {
1.25 thorpej 571: pr_printlog(pp, NULL, printf);
1.80 provos 572: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 573: pp->pr_nout);
1.3 pk 574: }
575: #endif
1.1 pk 576:
1.3 pk 577: /* Remove all pages */
1.88 chs 578: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.70 thorpej 579: pr_rmpage(pp, ph, NULL);
1.88 chs 580: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
581: KASSERT(LIST_EMPTY(&pp->pr_partpages));
1.3 pk 582:
583: /* Remove from global pool list */
1.23 thorpej 584: simple_lock(&pool_head_slock);
1.3 pk 585: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.61 chs 586: if (drainpp == pp) {
587: drainpp = NULL;
588: }
1.23 thorpej 589: simple_unlock(&pool_head_slock);
1.3 pk 590:
1.59 thorpej 591: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 592: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 593: free(pp->pr_log, M_TEMP);
1.59 thorpej 594: #endif
1.1 pk 595: }
596:
1.68 thorpej 597: void
598: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
599: {
600:
601: /* XXX no locking -- must be used just after pool_init() */
602: #ifdef DIAGNOSTIC
603: if (pp->pr_drain_hook != NULL)
604: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
605: #endif
606: pp->pr_drain_hook = fn;
607: pp->pr_drain_hook_arg = arg;
608: }
609:
1.88 chs 610: static struct pool_item_header *
1.55 thorpej 611: pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
612: {
613: struct pool_item_header *ph;
614: int s;
615:
616: LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
617:
618: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
619: ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
620: else {
1.85 pk 621: s = splvm();
1.55 thorpej 622: ph = pool_get(&phpool, flags);
623: splx(s);
624: }
625:
626: return (ph);
627: }
1.1 pk 628:
629: /*
1.3 pk 630: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 631: */
1.3 pk 632: void *
1.59 thorpej 633: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 634: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 635: #else
636: pool_get(struct pool *pp, int flags)
637: #endif
1.1 pk 638: {
639: struct pool_item *pi;
1.3 pk 640: struct pool_item_header *ph;
1.55 thorpej 641: void *v;
1.1 pk 642:
1.2 pk 643: #ifdef DIAGNOSTIC
1.84 thorpej 644: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 645: (flags & PR_WAITOK) != 0))
1.77 matt 646: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 647:
648: #ifdef LOCKDEBUG
649: if (flags & PR_WAITOK)
650: simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
1.56 sommerfe 651: #endif
1.58 thorpej 652: #endif /* DIAGNOSTIC */
1.1 pk 653:
1.21 thorpej 654: simple_lock(&pp->pr_slock);
1.25 thorpej 655: pr_enter(pp, file, line);
1.20 thorpej 656:
657: startover:
658: /*
659: * Check to see if we've reached the hard limit. If we have,
660: * and we can wait, then wait until an item has been returned to
661: * the pool.
662: */
663: #ifdef DIAGNOSTIC
1.34 thorpej 664: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 665: pr_leave(pp);
1.21 thorpej 666: simple_unlock(&pp->pr_slock);
1.20 thorpej 667: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
668: }
669: #endif
1.34 thorpej 670: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 671: if (pp->pr_drain_hook != NULL) {
672: /*
673: * Since the drain hook is going to free things
674: * back to the pool, unlock, call the hook, re-lock,
675: * and check the hardlimit condition again.
676: */
677: pr_leave(pp);
678: simple_unlock(&pp->pr_slock);
679: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
680: simple_lock(&pp->pr_slock);
681: pr_enter(pp, file, line);
682: if (pp->pr_nout < pp->pr_hardlimit)
683: goto startover;
684: }
685:
1.29 sommerfe 686: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 687: /*
688: * XXX: A warning isn't logged in this case. Should
689: * it be?
690: */
691: pp->pr_flags |= PR_WANTED;
1.25 thorpej 692: pr_leave(pp);
1.40 sommerfe 693: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 694: pr_enter(pp, file, line);
1.20 thorpej 695: goto startover;
696: }
1.31 thorpej 697:
698: /*
699: * Log a message that the hard limit has been hit.
700: */
701: if (pp->pr_hardlimit_warning != NULL &&
702: ratecheck(&pp->pr_hardlimit_warning_last,
703: &pp->pr_hardlimit_ratecap))
704: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 705:
706: pp->pr_nfail++;
707:
1.25 thorpej 708: pr_leave(pp);
1.21 thorpej 709: simple_unlock(&pp->pr_slock);
1.20 thorpej 710: return (NULL);
711: }
712:
1.3 pk 713: /*
714: * The convention we use is that if `curpage' is not NULL, then
715: * it points at a non-empty bucket. In particular, `curpage'
716: * never points at a page header which has PR_PHINPAGE set and
717: * has no items in its bucket.
718: */
1.20 thorpej 719: if ((ph = pp->pr_curpage) == NULL) {
720: #ifdef DIAGNOSTIC
721: if (pp->pr_nitems != 0) {
1.21 thorpej 722: simple_unlock(&pp->pr_slock);
1.20 thorpej 723: printf("pool_get: %s: curpage NULL, nitems %u\n",
724: pp->pr_wchan, pp->pr_nitems);
1.80 provos 725: panic("pool_get: nitems inconsistent");
1.20 thorpej 726: }
727: #endif
728:
1.21 thorpej 729: /*
730: * Call the back-end page allocator for more memory.
731: * Release the pool lock, as the back-end page allocator
732: * may block.
733: */
1.25 thorpej 734: pr_leave(pp);
1.21 thorpej 735: simple_unlock(&pp->pr_slock);
1.66 thorpej 736: v = pool_allocator_alloc(pp, flags);
1.55 thorpej 737: if (__predict_true(v != NULL))
738: ph = pool_alloc_item_header(pp, v, flags);
1.15 pk 739:
1.55 thorpej 740: if (__predict_false(v == NULL || ph == NULL)) {
741: if (v != NULL)
1.66 thorpej 742: pool_allocator_free(pp, v);
1.55 thorpej 743:
1.91 ! yamt 744: simple_lock(&pp->pr_slock);
! 745: pr_enter(pp, file, line);
! 746:
1.21 thorpej 747: /*
1.55 thorpej 748: * We were unable to allocate a page or item
749: * header, but we released the lock during
750: * allocation, so perhaps items were freed
751: * back to the pool. Check for this case.
1.21 thorpej 752: */
753: if (pp->pr_curpage != NULL)
754: goto startover;
1.15 pk 755:
1.3 pk 756: if ((flags & PR_WAITOK) == 0) {
757: pp->pr_nfail++;
1.25 thorpej 758: pr_leave(pp);
1.21 thorpej 759: simple_unlock(&pp->pr_slock);
1.1 pk 760: return (NULL);
1.3 pk 761: }
762:
1.15 pk 763: /*
764: * Wait for items to be returned to this pool.
1.21 thorpej 765: *
1.20 thorpej 766: * XXX: maybe we should wake up once a second and
767: * try again?
1.15 pk 768: */
1.1 pk 769: pp->pr_flags |= PR_WANTED;
1.66 thorpej 770: /* PA_WANTED is already set on the allocator. */
1.25 thorpej 771: pr_leave(pp);
1.40 sommerfe 772: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 773: pr_enter(pp, file, line);
1.20 thorpej 774: goto startover;
1.1 pk 775: }
1.3 pk 776:
1.15 pk 777: /* We have more memory; add it to the pool */
1.91 ! yamt 778: simple_lock(&pp->pr_slock);
! 779: pr_enter(pp, file, line);
1.55 thorpej 780: pool_prime_page(pp, v, ph);
1.15 pk 781: pp->pr_npagealloc++;
782:
1.20 thorpej 783: /* Start the allocation process over. */
784: goto startover;
1.3 pk 785: }
1.34 thorpej 786: if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
1.25 thorpej 787: pr_leave(pp);
1.21 thorpej 788: simple_unlock(&pp->pr_slock);
1.3 pk 789: panic("pool_get: %s: page empty", pp->pr_wchan);
1.21 thorpej 790: }
1.20 thorpej 791: #ifdef DIAGNOSTIC
1.34 thorpej 792: if (__predict_false(pp->pr_nitems == 0)) {
1.25 thorpej 793: pr_leave(pp);
1.21 thorpej 794: simple_unlock(&pp->pr_slock);
1.20 thorpej 795: printf("pool_get: %s: items on itemlist, nitems %u\n",
796: pp->pr_wchan, pp->pr_nitems);
1.80 provos 797: panic("pool_get: nitems inconsistent");
1.20 thorpej 798: }
1.65 enami 799: #endif
1.56 sommerfe 800:
1.65 enami 801: #ifdef POOL_DIAGNOSTIC
1.3 pk 802: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 803: #endif
1.3 pk 804:
1.65 enami 805: #ifdef DIAGNOSTIC
1.34 thorpej 806: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1.25 thorpej 807: pr_printlog(pp, pi, printf);
1.3 pk 808: panic("pool_get(%s): free list modified: magic=%x; page %p;"
809: " item addr %p\n",
810: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
811: }
812: #endif
813:
814: /*
815: * Remove from item list.
816: */
817: TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
1.20 thorpej 818: pp->pr_nitems--;
819: pp->pr_nout++;
1.6 thorpej 820: if (ph->ph_nmissing == 0) {
821: #ifdef DIAGNOSTIC
1.34 thorpej 822: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 823: panic("pool_get: nidle inconsistent");
824: #endif
825: pp->pr_nidle--;
1.88 chs 826:
827: /*
828: * This page was previously empty. Move it to the list of
829: * partially-full pages. This page is already curpage.
830: */
831: LIST_REMOVE(ph, ph_pagelist);
832: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 833: }
1.3 pk 834: ph->ph_nmissing++;
1.88 chs 835: if (TAILQ_EMPTY(&ph->ph_itemlist)) {
1.21 thorpej 836: #ifdef DIAGNOSTIC
1.34 thorpej 837: if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
1.25 thorpej 838: pr_leave(pp);
1.21 thorpej 839: simple_unlock(&pp->pr_slock);
840: panic("pool_get: %s: nmissing inconsistent",
841: pp->pr_wchan);
842: }
843: #endif
1.3 pk 844: /*
1.88 chs 845: * This page is now full. Move it to the full list
846: * and select a new current page.
1.3 pk 847: */
1.88 chs 848: LIST_REMOVE(ph, ph_pagelist);
849: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
850: pool_update_curpage(pp);
1.1 pk 851: }
1.3 pk 852:
853: pp->pr_nget++;
1.20 thorpej 854:
855: /*
856: * If we have a low water mark and we are now below that low
857: * water mark, add more items to the pool.
858: */
1.53 thorpej 859: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 860: /*
861: * XXX: Should we log a warning? Should we set up a timeout
862: * to try again in a second or so? The latter could break
863: * a caller's assumptions about interrupt protection, etc.
864: */
865: }
866:
1.25 thorpej 867: pr_leave(pp);
1.21 thorpej 868: simple_unlock(&pp->pr_slock);
1.1 pk 869: return (v);
870: }
871:
872: /*
1.43 thorpej 873: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 874: */
1.43 thorpej 875: static void
1.56 sommerfe 876: pool_do_put(struct pool *pp, void *v)
1.1 pk 877: {
878: struct pool_item *pi = v;
1.3 pk 879: struct pool_item_header *ph;
880: caddr_t page;
1.21 thorpej 881: int s;
1.3 pk 882:
1.61 chs 883: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
884:
1.66 thorpej 885: page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1.1 pk 886:
1.30 thorpej 887: #ifdef DIAGNOSTIC
1.34 thorpej 888: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 889: printf("pool %s: putting with none out\n",
890: pp->pr_wchan);
891: panic("pool_put");
892: }
893: #endif
1.3 pk 894:
1.34 thorpej 895: if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1.25 thorpej 896: pr_printlog(pp, NULL, printf);
1.3 pk 897: panic("pool_put: %s: page header missing", pp->pr_wchan);
898: }
1.28 thorpej 899:
900: #ifdef LOCKDEBUG
901: /*
902: * Check if we're freeing a locked simple lock.
903: */
904: simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
905: #endif
1.3 pk 906:
907: /*
908: * Return to item list.
909: */
1.2 pk 910: #ifdef DIAGNOSTIC
1.3 pk 911: pi->pi_magic = PI_MAGIC;
912: #endif
1.32 chs 913: #ifdef DEBUG
914: {
915: int i, *ip = v;
916:
917: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
918: *ip++ = PI_MAGIC;
919: }
920: }
921: #endif
922:
1.3 pk 923: TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.79 thorpej 924: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 925: ph->ph_nmissing--;
926: pp->pr_nput++;
1.20 thorpej 927: pp->pr_nitems++;
928: pp->pr_nout--;
1.3 pk 929:
930: /* Cancel "pool empty" condition if it exists */
931: if (pp->pr_curpage == NULL)
932: pp->pr_curpage = ph;
933:
934: if (pp->pr_flags & PR_WANTED) {
935: pp->pr_flags &= ~PR_WANTED;
1.15 pk 936: if (ph->ph_nmissing == 0)
937: pp->pr_nidle++;
1.3 pk 938: wakeup((caddr_t)pp);
939: return;
940: }
941:
942: /*
1.88 chs 943: * If this page is now empty, do one of two things:
1.21 thorpej 944: *
1.88 chs 945: * (1) If we have more pages than the page high water mark,
1.90 thorpej 946: * or if we are flagged as immediately freeing back idle
947: * pages, free the page back to the system. ONLY CONSIDER
948: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
949: * CLAIM.
1.21 thorpej 950: *
1.88 chs 951: * (2) Otherwise, move the page to the empty page list.
952: *
953: * Either way, select a new current page (so we use a partially-full
954: * page if one is available).
1.3 pk 955: */
956: if (ph->ph_nmissing == 0) {
1.6 thorpej 957: pp->pr_nidle++;
1.90 thorpej 958: if (pp->pr_npages > pp->pr_minpages &&
959: (pp->pr_npages > pp->pr_maxpages ||
960: (pp->pr_roflags & PR_IMMEDRELEASE) != 0 ||
961: (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
1.91 ! yamt 962: simple_unlock(&pp->pr_slock);
1.61 chs 963: pr_rmpage(pp, ph, NULL);
1.91 ! yamt 964: simple_lock(&pp->pr_slock);
1.3 pk 965: } else {
1.88 chs 966: LIST_REMOVE(ph, ph_pagelist);
967: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 968:
1.21 thorpej 969: /*
970: * Update the timestamp on the page. A page must
971: * be idle for some period of time before it can
972: * be reclaimed by the pagedaemon. This minimizes
973: * ping-pong'ing for memory.
974: */
975: s = splclock();
976: ph->ph_time = mono_time;
977: splx(s);
1.1 pk 978: }
1.88 chs 979: pool_update_curpage(pp);
1.1 pk 980: }
1.88 chs 981:
1.21 thorpej 982: /*
1.88 chs 983: * If the page was previously completely full, move it to the
984: * partially-full list and make it the current page. The next
985: * allocation will get the item from this page, instead of
986: * further fragmenting the pool.
1.21 thorpej 987: */
988: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 989: LIST_REMOVE(ph, ph_pagelist);
990: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 991: pp->pr_curpage = ph;
992: }
1.43 thorpej 993: }
994:
995: /*
996: * Return resource to the pool; must be called at appropriate spl level
997: */
1.59 thorpej 998: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 999: void
1000: _pool_put(struct pool *pp, void *v, const char *file, long line)
1001: {
1002:
1003: simple_lock(&pp->pr_slock);
1004: pr_enter(pp, file, line);
1005:
1.56 sommerfe 1006: pr_log(pp, v, PRLOG_PUT, file, line);
1007:
1008: pool_do_put(pp, v);
1.21 thorpej 1009:
1.25 thorpej 1010: pr_leave(pp);
1.21 thorpej 1011: simple_unlock(&pp->pr_slock);
1.1 pk 1012: }
1.57 sommerfe 1013: #undef pool_put
1.59 thorpej 1014: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1015:
1.56 sommerfe 1016: void
1017: pool_put(struct pool *pp, void *v)
1018: {
1019:
1020: simple_lock(&pp->pr_slock);
1021:
1022: pool_do_put(pp, v);
1023:
1024: simple_unlock(&pp->pr_slock);
1025: }
1.57 sommerfe 1026:
1.59 thorpej 1027: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1028: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1029: #endif
1.74 thorpej 1030:
1031: /*
1032: * Add N items to the pool.
1033: */
1034: int
1035: pool_prime(struct pool *pp, int n)
1036: {
1.83 scw 1037: struct pool_item_header *ph = NULL;
1.74 thorpej 1038: caddr_t cp;
1.75 simonb 1039: int newpages;
1.74 thorpej 1040:
1041: simple_lock(&pp->pr_slock);
1042:
1043: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1044:
1045: while (newpages-- > 0) {
1046: simple_unlock(&pp->pr_slock);
1047: cp = pool_allocator_alloc(pp, PR_NOWAIT);
1048: if (__predict_true(cp != NULL))
1049: ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1050:
1051: if (__predict_false(cp == NULL || ph == NULL)) {
1052: if (cp != NULL)
1053: pool_allocator_free(pp, cp);
1.91 ! yamt 1054: simple_lock(&pp->pr_slock);
1.74 thorpej 1055: break;
1056: }
1057:
1.91 ! yamt 1058: simple_lock(&pp->pr_slock);
1.74 thorpej 1059: pool_prime_page(pp, cp, ph);
1060: pp->pr_npagealloc++;
1061: pp->pr_minpages++;
1062: }
1063:
1064: if (pp->pr_minpages >= pp->pr_maxpages)
1065: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1066:
1067: simple_unlock(&pp->pr_slock);
1068: return (0);
1069: }
1.55 thorpej 1070:
1071: /*
1.3 pk 1072: * Add a page worth of items to the pool.
1.21 thorpej 1073: *
1074: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1075: */
1.55 thorpej 1076: static void
1077: pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1.3 pk 1078: {
1079: struct pool_item *pi;
1080: caddr_t cp = storage;
1081: unsigned int align = pp->pr_align;
1082: unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1083: int n;
1.89 yamt 1084: int s;
1.36 pk 1085:
1.91 ! yamt 1086: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
! 1087:
1.66 thorpej 1088: #ifdef DIAGNOSTIC
1089: if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1090: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1091: #endif
1.3 pk 1092:
1093: /*
1094: * Insert page header.
1095: */
1.88 chs 1096: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1097: TAILQ_INIT(&ph->ph_itemlist);
1098: ph->ph_page = storage;
1099: ph->ph_nmissing = 0;
1.89 yamt 1100: s = splclock();
1101: ph->ph_time = mono_time;
1102: splx(s);
1.88 chs 1103: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1104: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1105:
1.6 thorpej 1106: pp->pr_nidle++;
1107:
1.3 pk 1108: /*
1109: * Color this page.
1110: */
1111: cp = (caddr_t)(cp + pp->pr_curcolor);
1112: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1113: pp->pr_curcolor = 0;
1114:
1115: /*
1116: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1117: */
1118: if (ioff != 0)
1119: cp = (caddr_t)(cp + (align - ioff));
1120:
1121: /*
1122: * Insert remaining chunks on the bucket list.
1123: */
1124: n = pp->pr_itemsperpage;
1.20 thorpej 1125: pp->pr_nitems += n;
1.3 pk 1126:
1127: while (n--) {
1128: pi = (struct pool_item *)cp;
1.78 thorpej 1129:
1130: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1131:
1132: /* Insert on page list */
1133: TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1134: #ifdef DIAGNOSTIC
1135: pi->pi_magic = PI_MAGIC;
1136: #endif
1137: cp = (caddr_t)(cp + pp->pr_size);
1138: }
1139:
1140: /*
1141: * If the pool was depleted, point at the new page.
1142: */
1143: if (pp->pr_curpage == NULL)
1144: pp->pr_curpage = ph;
1145:
1146: if (++pp->pr_npages > pp->pr_hiwat)
1147: pp->pr_hiwat = pp->pr_npages;
1148: }
1149:
1.20 thorpej 1150: /*
1.52 thorpej 1151: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1152: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1153: *
1.21 thorpej 1154: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1155: *
1.73 thorpej 1156: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1157: * with it locked.
1158: */
1159: static int
1.42 thorpej 1160: pool_catchup(struct pool *pp)
1.20 thorpej 1161: {
1.83 scw 1162: struct pool_item_header *ph = NULL;
1.20 thorpej 1163: caddr_t cp;
1164: int error = 0;
1165:
1.54 thorpej 1166: while (POOL_NEEDS_CATCHUP(pp)) {
1.20 thorpej 1167: /*
1.21 thorpej 1168: * Call the page back-end allocator for more memory.
1169: *
1170: * XXX: We never wait, so should we bother unlocking
1171: * the pool descriptor?
1.20 thorpej 1172: */
1.21 thorpej 1173: simple_unlock(&pp->pr_slock);
1.66 thorpej 1174: cp = pool_allocator_alloc(pp, PR_NOWAIT);
1.55 thorpej 1175: if (__predict_true(cp != NULL))
1176: ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1177: if (__predict_false(cp == NULL || ph == NULL)) {
1178: if (cp != NULL)
1.66 thorpej 1179: pool_allocator_free(pp, cp);
1.20 thorpej 1180: error = ENOMEM;
1.91 ! yamt 1181: simple_lock(&pp->pr_slock);
1.20 thorpej 1182: break;
1183: }
1.91 ! yamt 1184: simple_lock(&pp->pr_slock);
1.55 thorpej 1185: pool_prime_page(pp, cp, ph);
1.26 thorpej 1186: pp->pr_npagealloc++;
1.20 thorpej 1187: }
1188:
1189: return (error);
1190: }
1191:
1.88 chs 1192: static void
1193: pool_update_curpage(struct pool *pp)
1194: {
1195:
1196: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1197: if (pp->pr_curpage == NULL) {
1198: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1199: }
1200: }
1201:
1.3 pk 1202: void
1.42 thorpej 1203: pool_setlowat(struct pool *pp, int n)
1.3 pk 1204: {
1.15 pk 1205:
1.21 thorpej 1206: simple_lock(&pp->pr_slock);
1207:
1.3 pk 1208: pp->pr_minitems = n;
1.15 pk 1209: pp->pr_minpages = (n == 0)
1210: ? 0
1.18 thorpej 1211: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1212:
1213: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1214: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1215: /*
1216: * XXX: Should we log a warning? Should we set up a timeout
1217: * to try again in a second or so? The latter could break
1218: * a caller's assumptions about interrupt protection, etc.
1219: */
1220: }
1.21 thorpej 1221:
1222: simple_unlock(&pp->pr_slock);
1.3 pk 1223: }
1224:
1225: void
1.42 thorpej 1226: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1227: {
1.15 pk 1228:
1.21 thorpej 1229: simple_lock(&pp->pr_slock);
1230:
1.15 pk 1231: pp->pr_maxpages = (n == 0)
1232: ? 0
1.18 thorpej 1233: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1234:
1235: simple_unlock(&pp->pr_slock);
1.3 pk 1236: }
1237:
1.20 thorpej 1238: void
1.42 thorpej 1239: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1240: {
1241:
1.21 thorpej 1242: simple_lock(&pp->pr_slock);
1.20 thorpej 1243:
1244: pp->pr_hardlimit = n;
1245: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1246: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1247: pp->pr_hardlimit_warning_last.tv_sec = 0;
1248: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1249:
1250: /*
1.21 thorpej 1251: * In-line version of pool_sethiwat(), because we don't want to
1252: * release the lock.
1.20 thorpej 1253: */
1254: pp->pr_maxpages = (n == 0)
1255: ? 0
1256: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1257:
1258: simple_unlock(&pp->pr_slock);
1.20 thorpej 1259: }
1.3 pk 1260:
1261: /*
1262: * Release all complete pages that have not been used recently.
1263: */
1.66 thorpej 1264: int
1.59 thorpej 1265: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1266: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1267: #else
1268: pool_reclaim(struct pool *pp)
1269: #endif
1.3 pk 1270: {
1271: struct pool_item_header *ph, *phnext;
1.43 thorpej 1272: struct pool_cache *pc;
1.21 thorpej 1273: struct timeval curtime;
1.61 chs 1274: struct pool_pagelist pq;
1.88 chs 1275: struct timeval diff;
1.21 thorpej 1276: int s;
1.3 pk 1277:
1.68 thorpej 1278: if (pp->pr_drain_hook != NULL) {
1279: /*
1280: * The drain hook must be called with the pool unlocked.
1281: */
1282: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1283: }
1284:
1.21 thorpej 1285: if (simple_lock_try(&pp->pr_slock) == 0)
1.66 thorpej 1286: return (0);
1.25 thorpej 1287: pr_enter(pp, file, line);
1.68 thorpej 1288:
1.88 chs 1289: LIST_INIT(&pq);
1.3 pk 1290:
1.43 thorpej 1291: /*
1292: * Reclaim items from the pool's caches.
1293: */
1.61 chs 1294: TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1.43 thorpej 1295: pool_cache_reclaim(pc);
1296:
1.21 thorpej 1297: s = splclock();
1298: curtime = mono_time;
1299: splx(s);
1300:
1.88 chs 1301: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1302: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1303:
1304: /* Check our minimum page claim */
1305: if (pp->pr_npages <= pp->pr_minpages)
1306: break;
1307:
1.88 chs 1308: KASSERT(ph->ph_nmissing == 0);
1309: timersub(&curtime, &ph->ph_time, &diff);
1310: if (diff.tv_sec < pool_inactive_time)
1311: continue;
1.21 thorpej 1312:
1.88 chs 1313: /*
1314: * If freeing this page would put us below
1315: * the low water mark, stop now.
1316: */
1317: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1318: pp->pr_minitems)
1319: break;
1.21 thorpej 1320:
1.88 chs 1321: pr_rmpage(pp, ph, &pq);
1.3 pk 1322: }
1323:
1.25 thorpej 1324: pr_leave(pp);
1.21 thorpej 1325: simple_unlock(&pp->pr_slock);
1.88 chs 1326: if (LIST_EMPTY(&pq))
1.66 thorpej 1327: return (0);
1328:
1.88 chs 1329: while ((ph = LIST_FIRST(&pq)) != NULL) {
1330: LIST_REMOVE(ph, ph_pagelist);
1.66 thorpej 1331: pool_allocator_free(pp, ph->ph_page);
1.61 chs 1332: if (pp->pr_roflags & PR_PHINPAGE) {
1333: continue;
1334: }
1.85 pk 1335: s = splvm();
1.61 chs 1336: pool_put(&phpool, ph);
1337: splx(s);
1338: }
1.66 thorpej 1339:
1340: return (1);
1.3 pk 1341: }
1342:
1343: /*
1344: * Drain pools, one at a time.
1.21 thorpej 1345: *
1346: * Note, we must never be called from an interrupt context.
1.3 pk 1347: */
1348: void
1.42 thorpej 1349: pool_drain(void *arg)
1.3 pk 1350: {
1351: struct pool *pp;
1.23 thorpej 1352: int s;
1.3 pk 1353:
1.61 chs 1354: pp = NULL;
1.49 thorpej 1355: s = splvm();
1.23 thorpej 1356: simple_lock(&pool_head_slock);
1.61 chs 1357: if (drainpp == NULL) {
1358: drainpp = TAILQ_FIRST(&pool_head);
1359: }
1360: if (drainpp) {
1361: pp = drainpp;
1362: drainpp = TAILQ_NEXT(pp, pr_poollist);
1363: }
1364: simple_unlock(&pool_head_slock);
1.63 chs 1365: pool_reclaim(pp);
1.61 chs 1366: splx(s);
1.3 pk 1367: }
1368:
1369: /*
1370: * Diagnostic helpers.
1371: */
1372: void
1.42 thorpej 1373: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1374: {
1375: int s;
1376:
1.49 thorpej 1377: s = splvm();
1.25 thorpej 1378: if (simple_lock_try(&pp->pr_slock) == 0) {
1379: printf("pool %s is locked; try again later\n",
1380: pp->pr_wchan);
1381: splx(s);
1382: return;
1383: }
1384: pool_print1(pp, modif, printf);
1.21 thorpej 1385: simple_unlock(&pp->pr_slock);
1386: splx(s);
1387: }
1388:
1.25 thorpej 1389: void
1.42 thorpej 1390: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1391: {
1392: int didlock = 0;
1393:
1394: if (pp == NULL) {
1395: (*pr)("Must specify a pool to print.\n");
1396: return;
1397: }
1398:
1399: /*
1400: * Called from DDB; interrupts should be blocked, and all
1401: * other processors should be paused. We can skip locking
1402: * the pool in this case.
1403: *
1404: * We do a simple_lock_try() just to print the lock
1405: * status, however.
1406: */
1407:
1408: if (simple_lock_try(&pp->pr_slock) == 0)
1409: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1410: else
1411: didlock = 1;
1412:
1413: pool_print1(pp, modif, pr);
1414:
1415: if (didlock)
1416: simple_unlock(&pp->pr_slock);
1417: }
1418:
1.21 thorpej 1419: static void
1.88 chs 1420: pool_print_pagelist(struct pool_pagelist *pl, void (*pr)(const char *, ...))
1421: {
1422: struct pool_item_header *ph;
1423: #ifdef DIAGNOSTIC
1424: struct pool_item *pi;
1425: #endif
1426:
1427: LIST_FOREACH(ph, pl, ph_pagelist) {
1428: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1429: ph->ph_page, ph->ph_nmissing,
1430: (u_long)ph->ph_time.tv_sec,
1431: (u_long)ph->ph_time.tv_usec);
1432: #ifdef DIAGNOSTIC
1433: TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1434: if (pi->pi_magic != PI_MAGIC) {
1435: (*pr)("\t\t\titem %p, magic 0x%x\n",
1436: pi, pi->pi_magic);
1437: }
1438: }
1439: #endif
1440: }
1441: }
1442:
1443: static void
1.42 thorpej 1444: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1445: {
1.25 thorpej 1446: struct pool_item_header *ph;
1.44 thorpej 1447: struct pool_cache *pc;
1448: struct pool_cache_group *pcg;
1449: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1450: char c;
1451:
1452: while ((c = *modif++) != '\0') {
1453: if (c == 'l')
1454: print_log = 1;
1455: if (c == 'p')
1456: print_pagelist = 1;
1.44 thorpej 1457: if (c == 'c')
1458: print_cache = 1;
1.25 thorpej 1459: }
1460:
1461: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1462: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1463: pp->pr_roflags);
1.66 thorpej 1464: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1465: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1466: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1467: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1468: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1469:
1470: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1471: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1472: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1473: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1474:
1475: if (print_pagelist == 0)
1476: goto skip_pagelist;
1477:
1.88 chs 1478: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1479: (*pr)("\n\tempty page list:\n");
1480: pool_print_pagelist(&pp->pr_emptypages, pr);
1481: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1482: (*pr)("\n\tfull page list:\n");
1483: pool_print_pagelist(&pp->pr_fullpages, pr);
1484: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1485: (*pr)("\n\tpartial-page list:\n");
1486: pool_print_pagelist(&pp->pr_partpages, pr);
1487:
1.25 thorpej 1488: if (pp->pr_curpage == NULL)
1489: (*pr)("\tno current page\n");
1490: else
1491: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1492:
1493: skip_pagelist:
1494: if (print_log == 0)
1495: goto skip_log;
1496:
1497: (*pr)("\n");
1498: if ((pp->pr_roflags & PR_LOGGING) == 0)
1499: (*pr)("\tno log\n");
1500: else
1501: pr_printlog(pp, NULL, pr);
1.3 pk 1502:
1.25 thorpej 1503: skip_log:
1.44 thorpej 1504: if (print_cache == 0)
1505: goto skip_cache;
1506:
1.61 chs 1507: TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1.44 thorpej 1508: (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1509: pc->pc_allocfrom, pc->pc_freeto);
1.48 thorpej 1510: (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1511: pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1.61 chs 1512: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.44 thorpej 1513: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1.87 thorpej 1514: for (i = 0; i < PCG_NOBJECTS; i++) {
1515: if (pcg->pcg_objects[i].pcgo_pa !=
1516: POOL_PADDR_INVALID) {
1517: (*pr)("\t\t\t%p, 0x%llx\n",
1518: pcg->pcg_objects[i].pcgo_va,
1519: (unsigned long long)
1520: pcg->pcg_objects[i].pcgo_pa);
1521: } else {
1522: (*pr)("\t\t\t%p\n",
1523: pcg->pcg_objects[i].pcgo_va);
1524: }
1525: }
1.44 thorpej 1526: }
1527: }
1528:
1529: skip_cache:
1.88 chs 1530: pr_enter_check(pp, pr);
1531: }
1532:
1533: static int
1534: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1535: {
1536: struct pool_item *pi;
1537: caddr_t page;
1538: int n;
1539:
1540: page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1541: if (page != ph->ph_page &&
1542: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1543: if (label != NULL)
1544: printf("%s: ", label);
1545: printf("pool(%p:%s): page inconsistency: page %p;"
1546: " at page head addr %p (p %p)\n", pp,
1547: pp->pr_wchan, ph->ph_page,
1548: ph, page);
1549: return 1;
1550: }
1.3 pk 1551:
1.88 chs 1552: for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1553: pi != NULL;
1554: pi = TAILQ_NEXT(pi,pi_list), n++) {
1555:
1556: #ifdef DIAGNOSTIC
1557: if (pi->pi_magic != PI_MAGIC) {
1558: if (label != NULL)
1559: printf("%s: ", label);
1560: printf("pool(%s): free list modified: magic=%x;"
1561: " page %p; item ordinal %d;"
1562: " addr %p (p %p)\n",
1563: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1564: n, pi, page);
1565: panic("pool");
1566: }
1567: #endif
1568: page =
1569: (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1570: if (page == ph->ph_page)
1571: continue;
1572:
1573: if (label != NULL)
1574: printf("%s: ", label);
1575: printf("pool(%p:%s): page inconsistency: page %p;"
1576: " item ordinal %d; addr %p (p %p)\n", pp,
1577: pp->pr_wchan, ph->ph_page,
1578: n, pi, page);
1579: return 1;
1580: }
1581: return 0;
1.3 pk 1582: }
1583:
1.88 chs 1584:
1.3 pk 1585: int
1.42 thorpej 1586: pool_chk(struct pool *pp, const char *label)
1.3 pk 1587: {
1588: struct pool_item_header *ph;
1589: int r = 0;
1590:
1.21 thorpej 1591: simple_lock(&pp->pr_slock);
1.88 chs 1592: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1593: r = pool_chk_page(pp, label, ph);
1594: if (r) {
1595: goto out;
1596: }
1597: }
1598: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1599: r = pool_chk_page(pp, label, ph);
1600: if (r) {
1.3 pk 1601: goto out;
1602: }
1.88 chs 1603: }
1604: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1605: r = pool_chk_page(pp, label, ph);
1606: if (r) {
1.3 pk 1607: goto out;
1608: }
1609: }
1.88 chs 1610:
1.3 pk 1611: out:
1.21 thorpej 1612: simple_unlock(&pp->pr_slock);
1.3 pk 1613: return (r);
1.43 thorpej 1614: }
1615:
1616: /*
1617: * pool_cache_init:
1618: *
1619: * Initialize a pool cache.
1620: *
1621: * NOTE: If the pool must be protected from interrupts, we expect
1622: * to be called at the appropriate interrupt priority level.
1623: */
1624: void
1625: pool_cache_init(struct pool_cache *pc, struct pool *pp,
1626: int (*ctor)(void *, void *, int),
1627: void (*dtor)(void *, void *),
1628: void *arg)
1629: {
1630:
1631: TAILQ_INIT(&pc->pc_grouplist);
1632: simple_lock_init(&pc->pc_slock);
1633:
1634: pc->pc_allocfrom = NULL;
1635: pc->pc_freeto = NULL;
1636: pc->pc_pool = pp;
1637:
1638: pc->pc_ctor = ctor;
1639: pc->pc_dtor = dtor;
1640: pc->pc_arg = arg;
1641:
1.48 thorpej 1642: pc->pc_hits = 0;
1643: pc->pc_misses = 0;
1644:
1645: pc->pc_ngroups = 0;
1646:
1647: pc->pc_nitems = 0;
1648:
1.43 thorpej 1649: simple_lock(&pp->pr_slock);
1650: TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1651: simple_unlock(&pp->pr_slock);
1652: }
1653:
1654: /*
1655: * pool_cache_destroy:
1656: *
1657: * Destroy a pool cache.
1658: */
1659: void
1660: pool_cache_destroy(struct pool_cache *pc)
1661: {
1662: struct pool *pp = pc->pc_pool;
1663:
1664: /* First, invalidate the entire cache. */
1665: pool_cache_invalidate(pc);
1666:
1667: /* ...and remove it from the pool's cache list. */
1668: simple_lock(&pp->pr_slock);
1669: TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1670: simple_unlock(&pp->pr_slock);
1671: }
1672:
1673: static __inline void *
1.87 thorpej 1674: pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1.43 thorpej 1675: {
1676: void *object;
1677: u_int idx;
1678:
1679: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45 thorpej 1680: KASSERT(pcg->pcg_avail != 0);
1.43 thorpej 1681: idx = --pcg->pcg_avail;
1682:
1.87 thorpej 1683: KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1684: object = pcg->pcg_objects[idx].pcgo_va;
1685: if (pap != NULL)
1686: *pap = pcg->pcg_objects[idx].pcgo_pa;
1687: pcg->pcg_objects[idx].pcgo_va = NULL;
1.43 thorpej 1688:
1689: return (object);
1690: }
1691:
1692: static __inline void
1.87 thorpej 1693: pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1.43 thorpej 1694: {
1695: u_int idx;
1696:
1697: KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1698: idx = pcg->pcg_avail++;
1699:
1.87 thorpej 1700: KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1701: pcg->pcg_objects[idx].pcgo_va = object;
1702: pcg->pcg_objects[idx].pcgo_pa = pa;
1.43 thorpej 1703: }
1704:
1705: /*
1.87 thorpej 1706: * pool_cache_get{,_paddr}:
1.43 thorpej 1707: *
1.87 thorpej 1708: * Get an object from a pool cache (optionally returning
1709: * the physical address of the object).
1.43 thorpej 1710: */
1711: void *
1.87 thorpej 1712: pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1.43 thorpej 1713: {
1714: struct pool_cache_group *pcg;
1715: void *object;
1.58 thorpej 1716:
1717: #ifdef LOCKDEBUG
1718: if (flags & PR_WAITOK)
1719: simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1720: #endif
1.43 thorpej 1721:
1722: simple_lock(&pc->pc_slock);
1723:
1724: if ((pcg = pc->pc_allocfrom) == NULL) {
1.61 chs 1725: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43 thorpej 1726: if (pcg->pcg_avail != 0) {
1727: pc->pc_allocfrom = pcg;
1728: goto have_group;
1729: }
1730: }
1731:
1732: /*
1733: * No groups with any available objects. Allocate
1734: * a new object, construct it, and return it to
1735: * the caller. We will allocate a group, if necessary,
1736: * when the object is freed back to the cache.
1737: */
1.48 thorpej 1738: pc->pc_misses++;
1.43 thorpej 1739: simple_unlock(&pc->pc_slock);
1740: object = pool_get(pc->pc_pool, flags);
1741: if (object != NULL && pc->pc_ctor != NULL) {
1742: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1743: pool_put(pc->pc_pool, object);
1744: return (NULL);
1745: }
1746: }
1.87 thorpej 1747: if (object != NULL && pap != NULL) {
1748: #ifdef POOL_VTOPHYS
1749: *pap = POOL_VTOPHYS(object);
1750: #else
1751: *pap = POOL_PADDR_INVALID;
1752: #endif
1753: }
1.43 thorpej 1754: return (object);
1755: }
1756:
1757: have_group:
1.48 thorpej 1758: pc->pc_hits++;
1759: pc->pc_nitems--;
1.87 thorpej 1760: object = pcg_get(pcg, pap);
1.43 thorpej 1761:
1762: if (pcg->pcg_avail == 0)
1763: pc->pc_allocfrom = NULL;
1.45 thorpej 1764:
1.43 thorpej 1765: simple_unlock(&pc->pc_slock);
1766:
1767: return (object);
1768: }
1769:
1770: /*
1.87 thorpej 1771: * pool_cache_put{,_paddr}:
1.43 thorpej 1772: *
1.87 thorpej 1773: * Put an object back to the pool cache (optionally caching the
1774: * physical address of the object).
1.43 thorpej 1775: */
1776: void
1.87 thorpej 1777: pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1.43 thorpej 1778: {
1779: struct pool_cache_group *pcg;
1.60 thorpej 1780: int s;
1.43 thorpej 1781:
1782: simple_lock(&pc->pc_slock);
1783:
1784: if ((pcg = pc->pc_freeto) == NULL) {
1.61 chs 1785: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43 thorpej 1786: if (pcg->pcg_avail != PCG_NOBJECTS) {
1787: pc->pc_freeto = pcg;
1788: goto have_group;
1789: }
1790: }
1791:
1792: /*
1793: * No empty groups to free the object to. Attempt to
1.47 thorpej 1794: * allocate one.
1.43 thorpej 1795: */
1.47 thorpej 1796: simple_unlock(&pc->pc_slock);
1.60 thorpej 1797: s = splvm();
1.43 thorpej 1798: pcg = pool_get(&pcgpool, PR_NOWAIT);
1.60 thorpej 1799: splx(s);
1.43 thorpej 1800: if (pcg != NULL) {
1801: memset(pcg, 0, sizeof(*pcg));
1.47 thorpej 1802: simple_lock(&pc->pc_slock);
1.48 thorpej 1803: pc->pc_ngroups++;
1.43 thorpej 1804: TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1.47 thorpej 1805: if (pc->pc_freeto == NULL)
1806: pc->pc_freeto = pcg;
1.43 thorpej 1807: goto have_group;
1808: }
1809:
1810: /*
1811: * Unable to allocate a cache group; destruct the object
1812: * and free it back to the pool.
1813: */
1.51 thorpej 1814: pool_cache_destruct_object(pc, object);
1.43 thorpej 1815: return;
1816: }
1817:
1818: have_group:
1.48 thorpej 1819: pc->pc_nitems++;
1.87 thorpej 1820: pcg_put(pcg, object, pa);
1.43 thorpej 1821:
1822: if (pcg->pcg_avail == PCG_NOBJECTS)
1823: pc->pc_freeto = NULL;
1824:
1825: simple_unlock(&pc->pc_slock);
1.51 thorpej 1826: }
1827:
1828: /*
1829: * pool_cache_destruct_object:
1830: *
1831: * Force destruction of an object and its release back into
1832: * the pool.
1833: */
1834: void
1835: pool_cache_destruct_object(struct pool_cache *pc, void *object)
1836: {
1837:
1838: if (pc->pc_dtor != NULL)
1839: (*pc->pc_dtor)(pc->pc_arg, object);
1840: pool_put(pc->pc_pool, object);
1.43 thorpej 1841: }
1842:
1843: /*
1844: * pool_cache_do_invalidate:
1845: *
1846: * This internal function implements pool_cache_invalidate() and
1847: * pool_cache_reclaim().
1848: */
1849: static void
1850: pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1.56 sommerfe 1851: void (*putit)(struct pool *, void *))
1.43 thorpej 1852: {
1853: struct pool_cache_group *pcg, *npcg;
1854: void *object;
1.60 thorpej 1855: int s;
1.43 thorpej 1856:
1857: for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1858: pcg = npcg) {
1859: npcg = TAILQ_NEXT(pcg, pcg_list);
1860: while (pcg->pcg_avail != 0) {
1.48 thorpej 1861: pc->pc_nitems--;
1.87 thorpej 1862: object = pcg_get(pcg, NULL);
1.45 thorpej 1863: if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1864: pc->pc_allocfrom = NULL;
1.43 thorpej 1865: if (pc->pc_dtor != NULL)
1866: (*pc->pc_dtor)(pc->pc_arg, object);
1.56 sommerfe 1867: (*putit)(pc->pc_pool, object);
1.43 thorpej 1868: }
1869: if (free_groups) {
1.48 thorpej 1870: pc->pc_ngroups--;
1.43 thorpej 1871: TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1.46 thorpej 1872: if (pc->pc_freeto == pcg)
1873: pc->pc_freeto = NULL;
1.60 thorpej 1874: s = splvm();
1.43 thorpej 1875: pool_put(&pcgpool, pcg);
1.60 thorpej 1876: splx(s);
1.43 thorpej 1877: }
1878: }
1879: }
1880:
1881: /*
1882: * pool_cache_invalidate:
1883: *
1884: * Invalidate a pool cache (destruct and release all of the
1885: * cached objects).
1886: */
1887: void
1888: pool_cache_invalidate(struct pool_cache *pc)
1889: {
1890:
1891: simple_lock(&pc->pc_slock);
1.56 sommerfe 1892: pool_cache_do_invalidate(pc, 0, pool_put);
1.43 thorpej 1893: simple_unlock(&pc->pc_slock);
1894: }
1895:
1896: /*
1897: * pool_cache_reclaim:
1898: *
1899: * Reclaim a pool cache for pool_reclaim().
1900: */
1901: static void
1902: pool_cache_reclaim(struct pool_cache *pc)
1903: {
1904:
1.47 thorpej 1905: simple_lock(&pc->pc_slock);
1.43 thorpej 1906: pool_cache_do_invalidate(pc, 1, pool_do_put);
1907: simple_unlock(&pc->pc_slock);
1.3 pk 1908: }
1.66 thorpej 1909:
1910: /*
1911: * Pool backend allocators.
1912: *
1913: * Each pool has a backend allocator that handles allocation, deallocation,
1914: * and any additional draining that might be needed.
1915: *
1916: * We provide two standard allocators:
1917: *
1918: * pool_allocator_kmem - the default when no allocator is specified
1919: *
1920: * pool_allocator_nointr - used for pools that will not be accessed
1921: * in interrupt context.
1922: */
1923: void *pool_page_alloc(struct pool *, int);
1924: void pool_page_free(struct pool *, void *);
1925:
1926: struct pool_allocator pool_allocator_kmem = {
1927: pool_page_alloc, pool_page_free, 0,
1928: };
1929:
1930: void *pool_page_alloc_nointr(struct pool *, int);
1931: void pool_page_free_nointr(struct pool *, void *);
1932:
1933: struct pool_allocator pool_allocator_nointr = {
1934: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1935: };
1936:
1937: #ifdef POOL_SUBPAGE
1938: void *pool_subpage_alloc(struct pool *, int);
1939: void pool_subpage_free(struct pool *, void *);
1940:
1941: struct pool_allocator pool_allocator_kmem_subpage = {
1942: pool_subpage_alloc, pool_subpage_free, 0,
1943: };
1944: #endif /* POOL_SUBPAGE */
1945:
1946: /*
1947: * We have at least three different resources for the same allocation and
1948: * each resource can be depleted. First, we have the ready elements in the
1949: * pool. Then we have the resource (typically a vm_map) for this allocator.
1950: * Finally, we have physical memory. Waiting for any of these can be
1951: * unnecessary when any other is freed, but the kernel doesn't support
1952: * sleeping on multiple wait channels, so we have to employ another strategy.
1953: *
1954: * The caller sleeps on the pool (so that it can be awakened when an item
1955: * is returned to the pool), but we set PA_WANT on the allocator. When a
1956: * page is returned to the allocator and PA_WANT is set, pool_allocator_free
1957: * will wake up all sleeping pools belonging to this allocator.
1958: *
1959: * XXX Thundering herd.
1960: */
1961: void *
1962: pool_allocator_alloc(struct pool *org, int flags)
1963: {
1964: struct pool_allocator *pa = org->pr_alloc;
1965: struct pool *pp, *start;
1966: int s, freed;
1967: void *res;
1968:
1.91 ! yamt 1969: LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
! 1970:
1.66 thorpej 1971: do {
1972: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1973: return (res);
1.68 thorpej 1974: if ((flags & PR_WAITOK) == 0) {
1975: /*
1976: * We only run the drain hookhere if PR_NOWAIT.
1977: * In other cases, the hook will be run in
1978: * pool_reclaim().
1979: */
1980: if (org->pr_drain_hook != NULL) {
1981: (*org->pr_drain_hook)(org->pr_drain_hook_arg,
1982: flags);
1983: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1984: return (res);
1985: }
1.66 thorpej 1986: break;
1.68 thorpej 1987: }
1.66 thorpej 1988:
1989: /*
1990: * Drain all pools, except "org", that use this
1991: * allocator. We do this to reclaim VA space.
1992: * pa_alloc is responsible for waiting for
1993: * physical memory.
1994: *
1995: * XXX We risk looping forever if start if someone
1996: * calls pool_destroy on "start". But there is no
1997: * other way to have potentially sleeping pool_reclaim,
1998: * non-sleeping locks on pool_allocator, and some
1999: * stirring of drained pools in the allocator.
1.68 thorpej 2000: *
2001: * XXX Maybe we should use pool_head_slock for locking
2002: * the allocators?
1.66 thorpej 2003: */
2004: freed = 0;
2005:
2006: s = splvm();
2007: simple_lock(&pa->pa_slock);
2008: pp = start = TAILQ_FIRST(&pa->pa_list);
2009: do {
2010: TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
2011: TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
2012: if (pp == org)
2013: continue;
1.73 thorpej 2014: simple_unlock(&pa->pa_slock);
1.66 thorpej 2015: freed = pool_reclaim(pp);
1.73 thorpej 2016: simple_lock(&pa->pa_slock);
1.66 thorpej 2017: } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
2018: freed == 0);
2019:
2020: if (freed == 0) {
2021: /*
2022: * We set PA_WANT here, the caller will most likely
2023: * sleep waiting for pages (if not, this won't hurt
2024: * that much), and there is no way to set this in
2025: * the caller without violating locking order.
2026: */
2027: pa->pa_flags |= PA_WANT;
2028: }
2029: simple_unlock(&pa->pa_slock);
2030: splx(s);
2031: } while (freed);
2032: return (NULL);
2033: }
2034:
2035: void
2036: pool_allocator_free(struct pool *pp, void *v)
2037: {
2038: struct pool_allocator *pa = pp->pr_alloc;
2039: int s;
2040:
1.91 ! yamt 2041: LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
! 2042:
1.66 thorpej 2043: (*pa->pa_free)(pp, v);
2044:
2045: s = splvm();
2046: simple_lock(&pa->pa_slock);
2047: if ((pa->pa_flags & PA_WANT) == 0) {
2048: simple_unlock(&pa->pa_slock);
2049: splx(s);
2050: return;
2051: }
2052:
2053: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2054: simple_lock(&pp->pr_slock);
2055: if ((pp->pr_flags & PR_WANTED) != 0) {
2056: pp->pr_flags &= ~PR_WANTED;
2057: wakeup(pp);
2058: }
1.69 thorpej 2059: simple_unlock(&pp->pr_slock);
1.66 thorpej 2060: }
2061: pa->pa_flags &= ~PA_WANT;
2062: simple_unlock(&pa->pa_slock);
2063: splx(s);
2064: }
2065:
2066: void *
2067: pool_page_alloc(struct pool *pp, int flags)
2068: {
2069: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2070:
2071: return ((void *) uvm_km_alloc_poolpage(waitok));
2072: }
2073:
2074: void
2075: pool_page_free(struct pool *pp, void *v)
2076: {
2077:
2078: uvm_km_free_poolpage((vaddr_t) v);
2079: }
2080:
2081: #ifdef POOL_SUBPAGE
2082: /* Sub-page allocator, for machines with large hardware pages. */
2083: void *
2084: pool_subpage_alloc(struct pool *pp, int flags)
2085: {
2086:
2087: return (pool_get(&psppool, flags));
2088: }
2089:
2090: void
2091: pool_subpage_free(struct pool *pp, void *v)
2092: {
2093:
2094: pool_put(&psppool, v);
2095: }
2096:
2097: /* We don't provide a real nointr allocator. Maybe later. */
2098: void *
2099: pool_page_alloc_nointr(struct pool *pp, int flags)
2100: {
2101:
2102: return (pool_subpage_alloc(pp, flags));
2103: }
2104:
2105: void
2106: pool_page_free_nointr(struct pool *pp, void *v)
2107: {
2108:
2109: pool_subpage_free(pp, v);
2110: }
2111: #else
2112: void *
2113: pool_page_alloc_nointr(struct pool *pp, int flags)
2114: {
2115: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2116:
2117: return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2118: uvm.kernel_object, waitok));
2119: }
2120:
2121: void
2122: pool_page_free_nointr(struct pool *pp, void *v)
2123: {
2124:
2125: uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2126: }
2127: #endif /* POOL_SUBPAGE */
CVSweb <webmaster@jp.NetBSD.org>