Annotation of src/sys/kern/subr_pool.c, Revision 1.92
1.92 ! enami 1: /* $NetBSD: subr_pool.c,v 1.91 2004/01/16 12:47:37 yamt Exp $ */
1.1 pk 2:
3: /*-
1.43 thorpej 4: * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.92 ! enami 41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.91 2004/01/16 12:47:37 yamt Exp $");
1.24 scottr 42:
1.25 thorpej 43: #include "opt_pool.h"
1.24 scottr 44: #include "opt_poollog.h"
1.28 thorpej 45: #include "opt_lockdebug.h"
1.1 pk 46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/proc.h>
50: #include <sys/errno.h>
51: #include <sys/kernel.h>
52: #include <sys/malloc.h>
53: #include <sys/lock.h>
54: #include <sys/pool.h>
1.20 thorpej 55: #include <sys/syslog.h>
1.3 pk 56:
57: #include <uvm/uvm.h>
58:
1.1 pk 59: /*
60: * Pool resource management utility.
1.3 pk 61: *
1.88 chs 62: * Memory is allocated in pages which are split into pieces according to
63: * the pool item size. Each page is kept on one of three lists in the
64: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65: * for empty, full and partially-full pages respectively. The individual
66: * pool items are on a linked list headed by `ph_itemlist' in each page
67: * header. The memory for building the page list is either taken from
68: * the allocated pages themselves (for small pool items) or taken from
69: * an internal pool of page headers (`phpool').
1.1 pk 70: */
71:
1.3 pk 72: /* List of all pools */
1.5 thorpej 73: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.3 pk 74:
75: /* Private pool for page header structures */
76: static struct pool phpool;
77:
1.62 bjh21 78: #ifdef POOL_SUBPAGE
79: /* Pool of subpages for use by normal pools. */
80: static struct pool psppool;
81: #endif
82:
1.3 pk 83: /* # of seconds to retain page after last use */
84: int pool_inactive_time = 10;
85:
86: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 87: static struct pool *drainpp;
88:
89: /* This spin lock protects both pool_head and drainpp. */
90: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3 pk 91:
92: struct pool_item_header {
93: /* Page headers */
1.88 chs 94: LIST_ENTRY(pool_item_header)
1.3 pk 95: ph_pagelist; /* pool page list */
96: TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
1.88 chs 97: SPLAY_ENTRY(pool_item_header)
98: ph_node; /* Off-page page headers */
1.79 thorpej 99: unsigned int ph_nmissing; /* # of chunks in use */
1.3 pk 100: caddr_t ph_page; /* this page's address */
101: struct timeval ph_time; /* last referenced */
102: };
103:
1.1 pk 104: struct pool_item {
1.3 pk 105: #ifdef DIAGNOSTIC
1.82 thorpej 106: u_int pi_magic;
1.33 chs 107: #endif
1.82 thorpej 108: #define PI_MAGIC 0xdeadbeefU
1.3 pk 109: /* Other entries use only this list entry */
110: TAILQ_ENTRY(pool_item) pi_list;
111: };
112:
1.53 thorpej 113: #define POOL_NEEDS_CATCHUP(pp) \
114: ((pp)->pr_nitems < (pp)->pr_minitems)
115:
1.43 thorpej 116: /*
117: * Pool cache management.
118: *
119: * Pool caches provide a way for constructed objects to be cached by the
120: * pool subsystem. This can lead to performance improvements by avoiding
121: * needless object construction/destruction; it is deferred until absolutely
122: * necessary.
123: *
124: * Caches are grouped into cache groups. Each cache group references
125: * up to 16 constructed objects. When a cache allocates an object
126: * from the pool, it calls the object's constructor and places it into
127: * a cache group. When a cache group frees an object back to the pool,
128: * it first calls the object's destructor. This allows the object to
129: * persist in constructed form while freed to the cache.
130: *
131: * Multiple caches may exist for each pool. This allows a single
132: * object type to have multiple constructed forms. The pool references
133: * each cache, so that when a pool is drained by the pagedaemon, it can
134: * drain each individual cache as well. Each time a cache is drained,
135: * the most idle cache group is freed to the pool in its entirety.
136: *
137: * Pool caches are layed on top of pools. By layering them, we can avoid
138: * the complexity of cache management for pools which would not benefit
139: * from it.
140: */
141:
142: /* The cache group pool. */
143: static struct pool pcgpool;
1.3 pk 144:
1.43 thorpej 145: static void pool_cache_reclaim(struct pool_cache *);
1.3 pk 146:
1.42 thorpej 147: static int pool_catchup(struct pool *);
1.55 thorpej 148: static void pool_prime_page(struct pool *, caddr_t,
149: struct pool_item_header *);
1.88 chs 150: static void pool_update_curpage(struct pool *);
1.66 thorpej 151:
152: void *pool_allocator_alloc(struct pool *, int);
153: void pool_allocator_free(struct pool *, void *);
1.3 pk 154:
1.88 chs 155: static void pool_print_pagelist(struct pool_pagelist *,
156: void (*)(const char *, ...));
1.42 thorpej 157: static void pool_print1(struct pool *, const char *,
158: void (*)(const char *, ...));
1.3 pk 159:
1.88 chs 160: static int pool_chk_page(struct pool *, const char *,
161: struct pool_item_header *);
162:
1.3 pk 163: /*
1.52 thorpej 164: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 165: */
166: struct pool_log {
167: const char *pl_file;
168: long pl_line;
169: int pl_action;
1.25 thorpej 170: #define PRLOG_GET 1
171: #define PRLOG_PUT 2
1.3 pk 172: void *pl_addr;
1.1 pk 173: };
174:
1.86 matt 175: #ifdef POOL_DIAGNOSTIC
1.3 pk 176: /* Number of entries in pool log buffers */
1.17 thorpej 177: #ifndef POOL_LOGSIZE
178: #define POOL_LOGSIZE 10
179: #endif
180:
181: int pool_logsize = POOL_LOGSIZE;
1.1 pk 182:
1.42 thorpej 183: static __inline void
184: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 185: {
186: int n = pp->pr_curlogentry;
187: struct pool_log *pl;
188:
1.20 thorpej 189: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 190: return;
191:
192: /*
193: * Fill in the current entry. Wrap around and overwrite
194: * the oldest entry if necessary.
195: */
196: pl = &pp->pr_log[n];
197: pl->pl_file = file;
198: pl->pl_line = line;
199: pl->pl_action = action;
200: pl->pl_addr = v;
201: if (++n >= pp->pr_logsize)
202: n = 0;
203: pp->pr_curlogentry = n;
204: }
205:
206: static void
1.42 thorpej 207: pr_printlog(struct pool *pp, struct pool_item *pi,
208: void (*pr)(const char *, ...))
1.3 pk 209: {
210: int i = pp->pr_logsize;
211: int n = pp->pr_curlogentry;
212:
1.20 thorpej 213: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 214: return;
215:
216: /*
217: * Print all entries in this pool's log.
218: */
219: while (i-- > 0) {
220: struct pool_log *pl = &pp->pr_log[n];
221: if (pl->pl_action != 0) {
1.25 thorpej 222: if (pi == NULL || pi == pl->pl_addr) {
223: (*pr)("\tlog entry %d:\n", i);
224: (*pr)("\t\taction = %s, addr = %p\n",
225: pl->pl_action == PRLOG_GET ? "get" : "put",
226: pl->pl_addr);
227: (*pr)("\t\tfile: %s at line %lu\n",
228: pl->pl_file, pl->pl_line);
229: }
1.3 pk 230: }
231: if (++n >= pp->pr_logsize)
232: n = 0;
233: }
234: }
1.25 thorpej 235:
1.42 thorpej 236: static __inline void
237: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 238: {
239:
1.34 thorpej 240: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 241: printf("pool %s: reentrancy at file %s line %ld\n",
242: pp->pr_wchan, file, line);
243: printf(" previous entry at file %s line %ld\n",
244: pp->pr_entered_file, pp->pr_entered_line);
245: panic("pr_enter");
246: }
247:
248: pp->pr_entered_file = file;
249: pp->pr_entered_line = line;
250: }
251:
1.42 thorpej 252: static __inline void
253: pr_leave(struct pool *pp)
1.25 thorpej 254: {
255:
1.34 thorpej 256: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 257: printf("pool %s not entered?\n", pp->pr_wchan);
258: panic("pr_leave");
259: }
260:
261: pp->pr_entered_file = NULL;
262: pp->pr_entered_line = 0;
263: }
264:
1.42 thorpej 265: static __inline void
266: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 267: {
268:
269: if (pp->pr_entered_file != NULL)
270: (*pr)("\n\tcurrently entered from file %s line %ld\n",
271: pp->pr_entered_file, pp->pr_entered_line);
272: }
1.3 pk 273: #else
1.25 thorpej 274: #define pr_log(pp, v, action, file, line)
275: #define pr_printlog(pp, pi, pr)
276: #define pr_enter(pp, file, line)
277: #define pr_leave(pp)
278: #define pr_enter_check(pp, pr)
1.59 thorpej 279: #endif /* POOL_DIAGNOSTIC */
1.3 pk 280:
1.88 chs 281: static __inline int
282: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
283: {
284: if (a->ph_page < b->ph_page)
285: return (-1);
286: else if (a->ph_page > b->ph_page)
287: return (1);
288: else
289: return (0);
290: }
291:
292: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
293: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
294:
1.3 pk 295: /*
296: * Return the pool page header based on page address.
297: */
1.42 thorpej 298: static __inline struct pool_item_header *
299: pr_find_pagehead(struct pool *pp, caddr_t page)
1.3 pk 300: {
1.88 chs 301: struct pool_item_header *ph, tmp;
1.3 pk 302:
1.20 thorpej 303: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3 pk 304: return ((struct pool_item_header *)(page + pp->pr_phoffset));
305:
1.88 chs 306: tmp.ph_page = page;
307: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
308: return ph;
1.3 pk 309: }
310:
311: /*
312: * Remove a page from the pool.
313: */
1.42 thorpej 314: static __inline void
1.61 chs 315: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
316: struct pool_pagelist *pq)
1.3 pk 317: {
1.61 chs 318: int s;
1.3 pk 319:
1.91 yamt 320: LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL);
321:
1.3 pk 322: /*
1.7 thorpej 323: * If the page was idle, decrement the idle page count.
1.3 pk 324: */
1.6 thorpej 325: if (ph->ph_nmissing == 0) {
326: #ifdef DIAGNOSTIC
327: if (pp->pr_nidle == 0)
328: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 329: if (pp->pr_nitems < pp->pr_itemsperpage)
330: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 331: #endif
332: pp->pr_nidle--;
333: }
1.7 thorpej 334:
1.20 thorpej 335: pp->pr_nitems -= pp->pr_itemsperpage;
336:
1.7 thorpej 337: /*
1.61 chs 338: * Unlink a page from the pool and release it (or queue it for release).
1.7 thorpej 339: */
1.88 chs 340: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 341: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
342: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.61 chs 343: if (pq) {
1.88 chs 344: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
1.61 chs 345: } else {
1.66 thorpej 346: pool_allocator_free(pp, ph->ph_page);
1.61 chs 347: if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
1.85 pk 348: s = splvm();
1.61 chs 349: pool_put(&phpool, ph);
350: splx(s);
351: }
352: }
1.7 thorpej 353: pp->pr_npages--;
354: pp->pr_npagefree++;
1.6 thorpej 355:
1.88 chs 356: pool_update_curpage(pp);
1.3 pk 357: }
358:
359: /*
360: * Initialize the given pool resource structure.
361: *
362: * We export this routine to allow other kernel parts to declare
363: * static pools that must be initialized before malloc() is available.
364: */
365: void
1.42 thorpej 366: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.66 thorpej 367: const char *wchan, struct pool_allocator *palloc)
1.3 pk 368: {
1.88 chs 369: int off, slack;
1.92 ! enami 370: size_t trysize, phsize;
1.3 pk 371:
1.25 thorpej 372: #ifdef POOL_DIAGNOSTIC
373: /*
374: * Always log if POOL_DIAGNOSTIC is defined.
375: */
376: if (pool_logsize != 0)
377: flags |= PR_LOGGING;
378: #endif
379:
1.66 thorpej 380: #ifdef POOL_SUBPAGE
381: /*
382: * XXX We don't provide a real `nointr' back-end
383: * yet; all sub-pages come from a kmem back-end.
384: * maybe some day...
385: */
386: if (palloc == NULL) {
387: extern struct pool_allocator pool_allocator_kmem_subpage;
388: palloc = &pool_allocator_kmem_subpage;
389: }
1.3 pk 390: /*
1.66 thorpej 391: * We'll assume any user-specified back-end allocator
392: * will deal with sub-pages, or simply don't care.
1.3 pk 393: */
1.66 thorpej 394: #else
395: if (palloc == NULL)
396: palloc = &pool_allocator_kmem;
397: #endif /* POOL_SUBPAGE */
398: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
399: if (palloc->pa_pagesz == 0) {
1.62 bjh21 400: #ifdef POOL_SUBPAGE
1.66 thorpej 401: if (palloc == &pool_allocator_kmem)
402: palloc->pa_pagesz = PAGE_SIZE;
403: else
404: palloc->pa_pagesz = POOL_SUBPAGE;
1.62 bjh21 405: #else
1.66 thorpej 406: palloc->pa_pagesz = PAGE_SIZE;
407: #endif /* POOL_SUBPAGE */
408: }
409:
410: TAILQ_INIT(&palloc->pa_list);
411:
412: simple_lock_init(&palloc->pa_slock);
413: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
414: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
415: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 416: }
1.3 pk 417:
418: if (align == 0)
419: align = ALIGN(1);
1.14 thorpej 420:
421: if (size < sizeof(struct pool_item))
422: size = sizeof(struct pool_item);
1.3 pk 423:
1.78 thorpej 424: size = roundup(size, align);
1.66 thorpej 425: #ifdef DIAGNOSTIC
426: if (size > palloc->pa_pagesz)
1.35 pk 427: panic("pool_init: pool item size (%lu) too large",
428: (u_long)size);
1.66 thorpej 429: #endif
1.35 pk 430:
1.3 pk 431: /*
432: * Initialize the pool structure.
433: */
1.88 chs 434: LIST_INIT(&pp->pr_emptypages);
435: LIST_INIT(&pp->pr_fullpages);
436: LIST_INIT(&pp->pr_partpages);
1.43 thorpej 437: TAILQ_INIT(&pp->pr_cachelist);
1.3 pk 438: pp->pr_curpage = NULL;
439: pp->pr_npages = 0;
440: pp->pr_minitems = 0;
441: pp->pr_minpages = 0;
442: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 443: pp->pr_roflags = flags;
444: pp->pr_flags = 0;
1.35 pk 445: pp->pr_size = size;
1.3 pk 446: pp->pr_align = align;
447: pp->pr_wchan = wchan;
1.66 thorpej 448: pp->pr_alloc = palloc;
1.20 thorpej 449: pp->pr_nitems = 0;
450: pp->pr_nout = 0;
451: pp->pr_hardlimit = UINT_MAX;
452: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 453: pp->pr_hardlimit_ratecap.tv_sec = 0;
454: pp->pr_hardlimit_ratecap.tv_usec = 0;
455: pp->pr_hardlimit_warning_last.tv_sec = 0;
456: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 457: pp->pr_drain_hook = NULL;
458: pp->pr_drain_hook_arg = NULL;
1.3 pk 459:
460: /*
461: * Decide whether to put the page header off page to avoid
1.92 ! enami 462: * wasting too large a part of the page or too big item.
! 463: * Off-page page headers go on a hash table, so we can match
! 464: * a returned item with its header based on the page address.
! 465: * We use 1/16 of the page size and about 8 times of the item
! 466: * size as the threshold (XXX: tune)
! 467: *
! 468: * However, we'll put the header into the page if we can put
! 469: * it without wasting any items.
! 470: *
! 471: * Silently enforce `0 <= ioff < align'.
1.3 pk 472: */
1.92 ! enami 473: pp->pr_itemoffset = ioff %= align;
! 474: /* See the comment below about reserved bytes. */
! 475: trysize = palloc->pa_pagesz - ((align - ioff) % align);
! 476: phsize = ALIGN(sizeof(struct pool_item_header));
! 477: if (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
! 478: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size) {
1.3 pk 479: /* Use the end of the page for the page header */
1.20 thorpej 480: pp->pr_roflags |= PR_PHINPAGE;
1.92 ! enami 481: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 482: } else {
1.3 pk 483: /* The page header will be taken from our page header pool */
484: pp->pr_phoffset = 0;
1.66 thorpej 485: off = palloc->pa_pagesz;
1.88 chs 486: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 487: }
1.1 pk 488:
1.3 pk 489: /*
490: * Alignment is to take place at `ioff' within the item. This means
491: * we must reserve up to `align - 1' bytes on the page to allow
492: * appropriate positioning of each item.
493: */
494: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 495: KASSERT(pp->pr_itemsperpage != 0);
1.3 pk 496:
497: /*
498: * Use the slack between the chunks and the page header
499: * for "cache coloring".
500: */
501: slack = off - pp->pr_itemsperpage * pp->pr_size;
502: pp->pr_maxcolor = (slack / align) * align;
503: pp->pr_curcolor = 0;
504:
505: pp->pr_nget = 0;
506: pp->pr_nfail = 0;
507: pp->pr_nput = 0;
508: pp->pr_npagealloc = 0;
509: pp->pr_npagefree = 0;
1.1 pk 510: pp->pr_hiwat = 0;
1.8 thorpej 511: pp->pr_nidle = 0;
1.3 pk 512:
1.59 thorpej 513: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 514: if (flags & PR_LOGGING) {
515: if (kmem_map == NULL ||
516: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
517: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 518: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 519: pp->pr_curlogentry = 0;
520: pp->pr_logsize = pool_logsize;
521: }
1.59 thorpej 522: #endif
1.25 thorpej 523:
524: pp->pr_entered_file = NULL;
525: pp->pr_entered_line = 0;
1.3 pk 526:
1.21 thorpej 527: simple_lock_init(&pp->pr_slock);
1.1 pk 528:
1.3 pk 529: /*
1.43 thorpej 530: * Initialize private page header pool and cache magazine pool if we
531: * haven't done so yet.
1.23 thorpej 532: * XXX LOCKING.
1.3 pk 533: */
534: if (phpool.pr_size == 0) {
1.62 bjh21 535: #ifdef POOL_SUBPAGE
536: pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
1.66 thorpej 537: "phpool", &pool_allocator_kmem);
1.62 bjh21 538: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.66 thorpej 539: PR_RECURSIVE, "psppool", &pool_allocator_kmem);
1.62 bjh21 540: #else
1.3 pk 541: pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
1.66 thorpej 542: 0, "phpool", NULL);
1.62 bjh21 543: #endif
1.43 thorpej 544: pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
1.66 thorpej 545: 0, "pcgpool", NULL);
1.1 pk 546: }
547:
1.23 thorpej 548: /* Insert into the list of all pools. */
549: simple_lock(&pool_head_slock);
550: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
551: simple_unlock(&pool_head_slock);
1.66 thorpej 552:
553: /* Insert this into the list of pools using this allocator. */
554: simple_lock(&palloc->pa_slock);
555: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
556: simple_unlock(&palloc->pa_slock);
1.1 pk 557: }
558:
559: /*
560: * De-commision a pool resource.
561: */
562: void
1.42 thorpej 563: pool_destroy(struct pool *pp)
1.1 pk 564: {
1.3 pk 565: struct pool_item_header *ph;
1.43 thorpej 566: struct pool_cache *pc;
567:
1.66 thorpej 568: /* Locking order: pool_allocator -> pool */
569: simple_lock(&pp->pr_alloc->pa_slock);
570: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
571: simple_unlock(&pp->pr_alloc->pa_slock);
572:
1.43 thorpej 573: /* Destroy all caches for this pool. */
574: while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
575: pool_cache_destroy(pc);
1.3 pk 576:
577: #ifdef DIAGNOSTIC
1.20 thorpej 578: if (pp->pr_nout != 0) {
1.25 thorpej 579: pr_printlog(pp, NULL, printf);
1.80 provos 580: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 581: pp->pr_nout);
1.3 pk 582: }
583: #endif
1.1 pk 584:
1.3 pk 585: /* Remove all pages */
1.88 chs 586: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.70 thorpej 587: pr_rmpage(pp, ph, NULL);
1.88 chs 588: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
589: KASSERT(LIST_EMPTY(&pp->pr_partpages));
1.3 pk 590:
591: /* Remove from global pool list */
1.23 thorpej 592: simple_lock(&pool_head_slock);
1.3 pk 593: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.61 chs 594: if (drainpp == pp) {
595: drainpp = NULL;
596: }
1.23 thorpej 597: simple_unlock(&pool_head_slock);
1.3 pk 598:
1.59 thorpej 599: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 600: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 601: free(pp->pr_log, M_TEMP);
1.59 thorpej 602: #endif
1.1 pk 603: }
604:
1.68 thorpej 605: void
606: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
607: {
608:
609: /* XXX no locking -- must be used just after pool_init() */
610: #ifdef DIAGNOSTIC
611: if (pp->pr_drain_hook != NULL)
612: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
613: #endif
614: pp->pr_drain_hook = fn;
615: pp->pr_drain_hook_arg = arg;
616: }
617:
1.88 chs 618: static struct pool_item_header *
1.55 thorpej 619: pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
620: {
621: struct pool_item_header *ph;
622: int s;
623:
624: LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
625:
626: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
627: ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
628: else {
1.85 pk 629: s = splvm();
1.55 thorpej 630: ph = pool_get(&phpool, flags);
631: splx(s);
632: }
633:
634: return (ph);
635: }
1.1 pk 636:
637: /*
1.3 pk 638: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 639: */
1.3 pk 640: void *
1.59 thorpej 641: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 642: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 643: #else
644: pool_get(struct pool *pp, int flags)
645: #endif
1.1 pk 646: {
647: struct pool_item *pi;
1.3 pk 648: struct pool_item_header *ph;
1.55 thorpej 649: void *v;
1.1 pk 650:
1.2 pk 651: #ifdef DIAGNOSTIC
1.84 thorpej 652: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 653: (flags & PR_WAITOK) != 0))
1.77 matt 654: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 655:
656: #ifdef LOCKDEBUG
657: if (flags & PR_WAITOK)
658: simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
1.56 sommerfe 659: #endif
1.58 thorpej 660: #endif /* DIAGNOSTIC */
1.1 pk 661:
1.21 thorpej 662: simple_lock(&pp->pr_slock);
1.25 thorpej 663: pr_enter(pp, file, line);
1.20 thorpej 664:
665: startover:
666: /*
667: * Check to see if we've reached the hard limit. If we have,
668: * and we can wait, then wait until an item has been returned to
669: * the pool.
670: */
671: #ifdef DIAGNOSTIC
1.34 thorpej 672: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 673: pr_leave(pp);
1.21 thorpej 674: simple_unlock(&pp->pr_slock);
1.20 thorpej 675: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
676: }
677: #endif
1.34 thorpej 678: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 679: if (pp->pr_drain_hook != NULL) {
680: /*
681: * Since the drain hook is going to free things
682: * back to the pool, unlock, call the hook, re-lock,
683: * and check the hardlimit condition again.
684: */
685: pr_leave(pp);
686: simple_unlock(&pp->pr_slock);
687: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
688: simple_lock(&pp->pr_slock);
689: pr_enter(pp, file, line);
690: if (pp->pr_nout < pp->pr_hardlimit)
691: goto startover;
692: }
693:
1.29 sommerfe 694: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 695: /*
696: * XXX: A warning isn't logged in this case. Should
697: * it be?
698: */
699: pp->pr_flags |= PR_WANTED;
1.25 thorpej 700: pr_leave(pp);
1.40 sommerfe 701: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 702: pr_enter(pp, file, line);
1.20 thorpej 703: goto startover;
704: }
1.31 thorpej 705:
706: /*
707: * Log a message that the hard limit has been hit.
708: */
709: if (pp->pr_hardlimit_warning != NULL &&
710: ratecheck(&pp->pr_hardlimit_warning_last,
711: &pp->pr_hardlimit_ratecap))
712: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 713:
714: pp->pr_nfail++;
715:
1.25 thorpej 716: pr_leave(pp);
1.21 thorpej 717: simple_unlock(&pp->pr_slock);
1.20 thorpej 718: return (NULL);
719: }
720:
1.3 pk 721: /*
722: * The convention we use is that if `curpage' is not NULL, then
723: * it points at a non-empty bucket. In particular, `curpage'
724: * never points at a page header which has PR_PHINPAGE set and
725: * has no items in its bucket.
726: */
1.20 thorpej 727: if ((ph = pp->pr_curpage) == NULL) {
728: #ifdef DIAGNOSTIC
729: if (pp->pr_nitems != 0) {
1.21 thorpej 730: simple_unlock(&pp->pr_slock);
1.20 thorpej 731: printf("pool_get: %s: curpage NULL, nitems %u\n",
732: pp->pr_wchan, pp->pr_nitems);
1.80 provos 733: panic("pool_get: nitems inconsistent");
1.20 thorpej 734: }
735: #endif
736:
1.21 thorpej 737: /*
738: * Call the back-end page allocator for more memory.
739: * Release the pool lock, as the back-end page allocator
740: * may block.
741: */
1.25 thorpej 742: pr_leave(pp);
1.21 thorpej 743: simple_unlock(&pp->pr_slock);
1.66 thorpej 744: v = pool_allocator_alloc(pp, flags);
1.55 thorpej 745: if (__predict_true(v != NULL))
746: ph = pool_alloc_item_header(pp, v, flags);
1.15 pk 747:
1.55 thorpej 748: if (__predict_false(v == NULL || ph == NULL)) {
749: if (v != NULL)
1.66 thorpej 750: pool_allocator_free(pp, v);
1.55 thorpej 751:
1.91 yamt 752: simple_lock(&pp->pr_slock);
753: pr_enter(pp, file, line);
754:
1.21 thorpej 755: /*
1.55 thorpej 756: * We were unable to allocate a page or item
757: * header, but we released the lock during
758: * allocation, so perhaps items were freed
759: * back to the pool. Check for this case.
1.21 thorpej 760: */
761: if (pp->pr_curpage != NULL)
762: goto startover;
1.15 pk 763:
1.3 pk 764: if ((flags & PR_WAITOK) == 0) {
765: pp->pr_nfail++;
1.25 thorpej 766: pr_leave(pp);
1.21 thorpej 767: simple_unlock(&pp->pr_slock);
1.1 pk 768: return (NULL);
1.3 pk 769: }
770:
1.15 pk 771: /*
772: * Wait for items to be returned to this pool.
1.21 thorpej 773: *
1.20 thorpej 774: * XXX: maybe we should wake up once a second and
775: * try again?
1.15 pk 776: */
1.1 pk 777: pp->pr_flags |= PR_WANTED;
1.66 thorpej 778: /* PA_WANTED is already set on the allocator. */
1.25 thorpej 779: pr_leave(pp);
1.40 sommerfe 780: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 781: pr_enter(pp, file, line);
1.20 thorpej 782: goto startover;
1.1 pk 783: }
1.3 pk 784:
1.15 pk 785: /* We have more memory; add it to the pool */
1.91 yamt 786: simple_lock(&pp->pr_slock);
787: pr_enter(pp, file, line);
1.55 thorpej 788: pool_prime_page(pp, v, ph);
1.15 pk 789: pp->pr_npagealloc++;
790:
1.20 thorpej 791: /* Start the allocation process over. */
792: goto startover;
1.3 pk 793: }
1.34 thorpej 794: if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
1.25 thorpej 795: pr_leave(pp);
1.21 thorpej 796: simple_unlock(&pp->pr_slock);
1.3 pk 797: panic("pool_get: %s: page empty", pp->pr_wchan);
1.21 thorpej 798: }
1.20 thorpej 799: #ifdef DIAGNOSTIC
1.34 thorpej 800: if (__predict_false(pp->pr_nitems == 0)) {
1.25 thorpej 801: pr_leave(pp);
1.21 thorpej 802: simple_unlock(&pp->pr_slock);
1.20 thorpej 803: printf("pool_get: %s: items on itemlist, nitems %u\n",
804: pp->pr_wchan, pp->pr_nitems);
1.80 provos 805: panic("pool_get: nitems inconsistent");
1.20 thorpej 806: }
1.65 enami 807: #endif
1.56 sommerfe 808:
1.65 enami 809: #ifdef POOL_DIAGNOSTIC
1.3 pk 810: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 811: #endif
1.3 pk 812:
1.65 enami 813: #ifdef DIAGNOSTIC
1.34 thorpej 814: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1.25 thorpej 815: pr_printlog(pp, pi, printf);
1.3 pk 816: panic("pool_get(%s): free list modified: magic=%x; page %p;"
817: " item addr %p\n",
818: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
819: }
820: #endif
821:
822: /*
823: * Remove from item list.
824: */
825: TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
1.20 thorpej 826: pp->pr_nitems--;
827: pp->pr_nout++;
1.6 thorpej 828: if (ph->ph_nmissing == 0) {
829: #ifdef DIAGNOSTIC
1.34 thorpej 830: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 831: panic("pool_get: nidle inconsistent");
832: #endif
833: pp->pr_nidle--;
1.88 chs 834:
835: /*
836: * This page was previously empty. Move it to the list of
837: * partially-full pages. This page is already curpage.
838: */
839: LIST_REMOVE(ph, ph_pagelist);
840: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 841: }
1.3 pk 842: ph->ph_nmissing++;
1.88 chs 843: if (TAILQ_EMPTY(&ph->ph_itemlist)) {
1.21 thorpej 844: #ifdef DIAGNOSTIC
1.34 thorpej 845: if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
1.25 thorpej 846: pr_leave(pp);
1.21 thorpej 847: simple_unlock(&pp->pr_slock);
848: panic("pool_get: %s: nmissing inconsistent",
849: pp->pr_wchan);
850: }
851: #endif
1.3 pk 852: /*
1.88 chs 853: * This page is now full. Move it to the full list
854: * and select a new current page.
1.3 pk 855: */
1.88 chs 856: LIST_REMOVE(ph, ph_pagelist);
857: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
858: pool_update_curpage(pp);
1.1 pk 859: }
1.3 pk 860:
861: pp->pr_nget++;
1.20 thorpej 862:
863: /*
864: * If we have a low water mark and we are now below that low
865: * water mark, add more items to the pool.
866: */
1.53 thorpej 867: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 868: /*
869: * XXX: Should we log a warning? Should we set up a timeout
870: * to try again in a second or so? The latter could break
871: * a caller's assumptions about interrupt protection, etc.
872: */
873: }
874:
1.25 thorpej 875: pr_leave(pp);
1.21 thorpej 876: simple_unlock(&pp->pr_slock);
1.1 pk 877: return (v);
878: }
879:
880: /*
1.43 thorpej 881: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 882: */
1.43 thorpej 883: static void
1.56 sommerfe 884: pool_do_put(struct pool *pp, void *v)
1.1 pk 885: {
886: struct pool_item *pi = v;
1.3 pk 887: struct pool_item_header *ph;
888: caddr_t page;
1.21 thorpej 889: int s;
1.3 pk 890:
1.61 chs 891: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
892:
1.66 thorpej 893: page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1.1 pk 894:
1.30 thorpej 895: #ifdef DIAGNOSTIC
1.34 thorpej 896: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 897: printf("pool %s: putting with none out\n",
898: pp->pr_wchan);
899: panic("pool_put");
900: }
901: #endif
1.3 pk 902:
1.34 thorpej 903: if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1.25 thorpej 904: pr_printlog(pp, NULL, printf);
1.3 pk 905: panic("pool_put: %s: page header missing", pp->pr_wchan);
906: }
1.28 thorpej 907:
908: #ifdef LOCKDEBUG
909: /*
910: * Check if we're freeing a locked simple lock.
911: */
912: simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
913: #endif
1.3 pk 914:
915: /*
916: * Return to item list.
917: */
1.2 pk 918: #ifdef DIAGNOSTIC
1.3 pk 919: pi->pi_magic = PI_MAGIC;
920: #endif
1.32 chs 921: #ifdef DEBUG
922: {
923: int i, *ip = v;
924:
925: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
926: *ip++ = PI_MAGIC;
927: }
928: }
929: #endif
930:
1.3 pk 931: TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.79 thorpej 932: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 933: ph->ph_nmissing--;
934: pp->pr_nput++;
1.20 thorpej 935: pp->pr_nitems++;
936: pp->pr_nout--;
1.3 pk 937:
938: /* Cancel "pool empty" condition if it exists */
939: if (pp->pr_curpage == NULL)
940: pp->pr_curpage = ph;
941:
942: if (pp->pr_flags & PR_WANTED) {
943: pp->pr_flags &= ~PR_WANTED;
1.15 pk 944: if (ph->ph_nmissing == 0)
945: pp->pr_nidle++;
1.3 pk 946: wakeup((caddr_t)pp);
947: return;
948: }
949:
950: /*
1.88 chs 951: * If this page is now empty, do one of two things:
1.21 thorpej 952: *
1.88 chs 953: * (1) If we have more pages than the page high water mark,
1.90 thorpej 954: * or if we are flagged as immediately freeing back idle
955: * pages, free the page back to the system. ONLY CONSIDER
956: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
957: * CLAIM.
1.21 thorpej 958: *
1.88 chs 959: * (2) Otherwise, move the page to the empty page list.
960: *
961: * Either way, select a new current page (so we use a partially-full
962: * page if one is available).
1.3 pk 963: */
964: if (ph->ph_nmissing == 0) {
1.6 thorpej 965: pp->pr_nidle++;
1.90 thorpej 966: if (pp->pr_npages > pp->pr_minpages &&
967: (pp->pr_npages > pp->pr_maxpages ||
968: (pp->pr_roflags & PR_IMMEDRELEASE) != 0 ||
969: (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
1.91 yamt 970: simple_unlock(&pp->pr_slock);
1.61 chs 971: pr_rmpage(pp, ph, NULL);
1.91 yamt 972: simple_lock(&pp->pr_slock);
1.3 pk 973: } else {
1.88 chs 974: LIST_REMOVE(ph, ph_pagelist);
975: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 976:
1.21 thorpej 977: /*
978: * Update the timestamp on the page. A page must
979: * be idle for some period of time before it can
980: * be reclaimed by the pagedaemon. This minimizes
981: * ping-pong'ing for memory.
982: */
983: s = splclock();
984: ph->ph_time = mono_time;
985: splx(s);
1.1 pk 986: }
1.88 chs 987: pool_update_curpage(pp);
1.1 pk 988: }
1.88 chs 989:
1.21 thorpej 990: /*
1.88 chs 991: * If the page was previously completely full, move it to the
992: * partially-full list and make it the current page. The next
993: * allocation will get the item from this page, instead of
994: * further fragmenting the pool.
1.21 thorpej 995: */
996: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 997: LIST_REMOVE(ph, ph_pagelist);
998: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 999: pp->pr_curpage = ph;
1000: }
1.43 thorpej 1001: }
1002:
1003: /*
1004: * Return resource to the pool; must be called at appropriate spl level
1005: */
1.59 thorpej 1006: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1007: void
1008: _pool_put(struct pool *pp, void *v, const char *file, long line)
1009: {
1010:
1011: simple_lock(&pp->pr_slock);
1012: pr_enter(pp, file, line);
1013:
1.56 sommerfe 1014: pr_log(pp, v, PRLOG_PUT, file, line);
1015:
1016: pool_do_put(pp, v);
1.21 thorpej 1017:
1.25 thorpej 1018: pr_leave(pp);
1.21 thorpej 1019: simple_unlock(&pp->pr_slock);
1.1 pk 1020: }
1.57 sommerfe 1021: #undef pool_put
1.59 thorpej 1022: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1023:
1.56 sommerfe 1024: void
1025: pool_put(struct pool *pp, void *v)
1026: {
1027:
1028: simple_lock(&pp->pr_slock);
1029:
1030: pool_do_put(pp, v);
1031:
1032: simple_unlock(&pp->pr_slock);
1033: }
1.57 sommerfe 1034:
1.59 thorpej 1035: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1036: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1037: #endif
1.74 thorpej 1038:
1039: /*
1040: * Add N items to the pool.
1041: */
1042: int
1043: pool_prime(struct pool *pp, int n)
1044: {
1.83 scw 1045: struct pool_item_header *ph = NULL;
1.74 thorpej 1046: caddr_t cp;
1.75 simonb 1047: int newpages;
1.74 thorpej 1048:
1049: simple_lock(&pp->pr_slock);
1050:
1051: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1052:
1053: while (newpages-- > 0) {
1054: simple_unlock(&pp->pr_slock);
1055: cp = pool_allocator_alloc(pp, PR_NOWAIT);
1056: if (__predict_true(cp != NULL))
1057: ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1058:
1059: if (__predict_false(cp == NULL || ph == NULL)) {
1060: if (cp != NULL)
1061: pool_allocator_free(pp, cp);
1.91 yamt 1062: simple_lock(&pp->pr_slock);
1.74 thorpej 1063: break;
1064: }
1065:
1.91 yamt 1066: simple_lock(&pp->pr_slock);
1.74 thorpej 1067: pool_prime_page(pp, cp, ph);
1068: pp->pr_npagealloc++;
1069: pp->pr_minpages++;
1070: }
1071:
1072: if (pp->pr_minpages >= pp->pr_maxpages)
1073: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1074:
1075: simple_unlock(&pp->pr_slock);
1076: return (0);
1077: }
1.55 thorpej 1078:
1079: /*
1.3 pk 1080: * Add a page worth of items to the pool.
1.21 thorpej 1081: *
1082: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1083: */
1.55 thorpej 1084: static void
1085: pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1.3 pk 1086: {
1087: struct pool_item *pi;
1088: caddr_t cp = storage;
1089: unsigned int align = pp->pr_align;
1090: unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1091: int n;
1.89 yamt 1092: int s;
1.36 pk 1093:
1.91 yamt 1094: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1095:
1.66 thorpej 1096: #ifdef DIAGNOSTIC
1097: if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1098: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1099: #endif
1.3 pk 1100:
1101: /*
1102: * Insert page header.
1103: */
1.88 chs 1104: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1105: TAILQ_INIT(&ph->ph_itemlist);
1106: ph->ph_page = storage;
1107: ph->ph_nmissing = 0;
1.89 yamt 1108: s = splclock();
1109: ph->ph_time = mono_time;
1110: splx(s);
1.88 chs 1111: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1112: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1113:
1.6 thorpej 1114: pp->pr_nidle++;
1115:
1.3 pk 1116: /*
1117: * Color this page.
1118: */
1119: cp = (caddr_t)(cp + pp->pr_curcolor);
1120: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1121: pp->pr_curcolor = 0;
1122:
1123: /*
1124: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1125: */
1126: if (ioff != 0)
1127: cp = (caddr_t)(cp + (align - ioff));
1128:
1129: /*
1130: * Insert remaining chunks on the bucket list.
1131: */
1132: n = pp->pr_itemsperpage;
1.20 thorpej 1133: pp->pr_nitems += n;
1.3 pk 1134:
1135: while (n--) {
1136: pi = (struct pool_item *)cp;
1.78 thorpej 1137:
1138: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1139:
1140: /* Insert on page list */
1141: TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1142: #ifdef DIAGNOSTIC
1143: pi->pi_magic = PI_MAGIC;
1144: #endif
1145: cp = (caddr_t)(cp + pp->pr_size);
1146: }
1147:
1148: /*
1149: * If the pool was depleted, point at the new page.
1150: */
1151: if (pp->pr_curpage == NULL)
1152: pp->pr_curpage = ph;
1153:
1154: if (++pp->pr_npages > pp->pr_hiwat)
1155: pp->pr_hiwat = pp->pr_npages;
1156: }
1157:
1.20 thorpej 1158: /*
1.52 thorpej 1159: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1160: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1161: *
1.21 thorpej 1162: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1163: *
1.73 thorpej 1164: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1165: * with it locked.
1166: */
1167: static int
1.42 thorpej 1168: pool_catchup(struct pool *pp)
1.20 thorpej 1169: {
1.83 scw 1170: struct pool_item_header *ph = NULL;
1.20 thorpej 1171: caddr_t cp;
1172: int error = 0;
1173:
1.54 thorpej 1174: while (POOL_NEEDS_CATCHUP(pp)) {
1.20 thorpej 1175: /*
1.21 thorpej 1176: * Call the page back-end allocator for more memory.
1177: *
1178: * XXX: We never wait, so should we bother unlocking
1179: * the pool descriptor?
1.20 thorpej 1180: */
1.21 thorpej 1181: simple_unlock(&pp->pr_slock);
1.66 thorpej 1182: cp = pool_allocator_alloc(pp, PR_NOWAIT);
1.55 thorpej 1183: if (__predict_true(cp != NULL))
1184: ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1185: if (__predict_false(cp == NULL || ph == NULL)) {
1186: if (cp != NULL)
1.66 thorpej 1187: pool_allocator_free(pp, cp);
1.20 thorpej 1188: error = ENOMEM;
1.91 yamt 1189: simple_lock(&pp->pr_slock);
1.20 thorpej 1190: break;
1191: }
1.91 yamt 1192: simple_lock(&pp->pr_slock);
1.55 thorpej 1193: pool_prime_page(pp, cp, ph);
1.26 thorpej 1194: pp->pr_npagealloc++;
1.20 thorpej 1195: }
1196:
1197: return (error);
1198: }
1199:
1.88 chs 1200: static void
1201: pool_update_curpage(struct pool *pp)
1202: {
1203:
1204: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1205: if (pp->pr_curpage == NULL) {
1206: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1207: }
1208: }
1209:
1.3 pk 1210: void
1.42 thorpej 1211: pool_setlowat(struct pool *pp, int n)
1.3 pk 1212: {
1.15 pk 1213:
1.21 thorpej 1214: simple_lock(&pp->pr_slock);
1215:
1.3 pk 1216: pp->pr_minitems = n;
1.15 pk 1217: pp->pr_minpages = (n == 0)
1218: ? 0
1.18 thorpej 1219: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1220:
1221: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1222: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1223: /*
1224: * XXX: Should we log a warning? Should we set up a timeout
1225: * to try again in a second or so? The latter could break
1226: * a caller's assumptions about interrupt protection, etc.
1227: */
1228: }
1.21 thorpej 1229:
1230: simple_unlock(&pp->pr_slock);
1.3 pk 1231: }
1232:
1233: void
1.42 thorpej 1234: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1235: {
1.15 pk 1236:
1.21 thorpej 1237: simple_lock(&pp->pr_slock);
1238:
1.15 pk 1239: pp->pr_maxpages = (n == 0)
1240: ? 0
1.18 thorpej 1241: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1242:
1243: simple_unlock(&pp->pr_slock);
1.3 pk 1244: }
1245:
1.20 thorpej 1246: void
1.42 thorpej 1247: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1248: {
1249:
1.21 thorpej 1250: simple_lock(&pp->pr_slock);
1.20 thorpej 1251:
1252: pp->pr_hardlimit = n;
1253: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1254: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1255: pp->pr_hardlimit_warning_last.tv_sec = 0;
1256: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1257:
1258: /*
1.21 thorpej 1259: * In-line version of pool_sethiwat(), because we don't want to
1260: * release the lock.
1.20 thorpej 1261: */
1262: pp->pr_maxpages = (n == 0)
1263: ? 0
1264: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1265:
1266: simple_unlock(&pp->pr_slock);
1.20 thorpej 1267: }
1.3 pk 1268:
1269: /*
1270: * Release all complete pages that have not been used recently.
1271: */
1.66 thorpej 1272: int
1.59 thorpej 1273: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1274: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1275: #else
1276: pool_reclaim(struct pool *pp)
1277: #endif
1.3 pk 1278: {
1279: struct pool_item_header *ph, *phnext;
1.43 thorpej 1280: struct pool_cache *pc;
1.21 thorpej 1281: struct timeval curtime;
1.61 chs 1282: struct pool_pagelist pq;
1.88 chs 1283: struct timeval diff;
1.21 thorpej 1284: int s;
1.3 pk 1285:
1.68 thorpej 1286: if (pp->pr_drain_hook != NULL) {
1287: /*
1288: * The drain hook must be called with the pool unlocked.
1289: */
1290: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1291: }
1292:
1.21 thorpej 1293: if (simple_lock_try(&pp->pr_slock) == 0)
1.66 thorpej 1294: return (0);
1.25 thorpej 1295: pr_enter(pp, file, line);
1.68 thorpej 1296:
1.88 chs 1297: LIST_INIT(&pq);
1.3 pk 1298:
1.43 thorpej 1299: /*
1300: * Reclaim items from the pool's caches.
1301: */
1.61 chs 1302: TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1.43 thorpej 1303: pool_cache_reclaim(pc);
1304:
1.21 thorpej 1305: s = splclock();
1306: curtime = mono_time;
1307: splx(s);
1308:
1.88 chs 1309: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1310: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1311:
1312: /* Check our minimum page claim */
1313: if (pp->pr_npages <= pp->pr_minpages)
1314: break;
1315:
1.88 chs 1316: KASSERT(ph->ph_nmissing == 0);
1317: timersub(&curtime, &ph->ph_time, &diff);
1318: if (diff.tv_sec < pool_inactive_time)
1319: continue;
1.21 thorpej 1320:
1.88 chs 1321: /*
1322: * If freeing this page would put us below
1323: * the low water mark, stop now.
1324: */
1325: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1326: pp->pr_minitems)
1327: break;
1.21 thorpej 1328:
1.88 chs 1329: pr_rmpage(pp, ph, &pq);
1.3 pk 1330: }
1331:
1.25 thorpej 1332: pr_leave(pp);
1.21 thorpej 1333: simple_unlock(&pp->pr_slock);
1.88 chs 1334: if (LIST_EMPTY(&pq))
1.66 thorpej 1335: return (0);
1336:
1.88 chs 1337: while ((ph = LIST_FIRST(&pq)) != NULL) {
1338: LIST_REMOVE(ph, ph_pagelist);
1.66 thorpej 1339: pool_allocator_free(pp, ph->ph_page);
1.61 chs 1340: if (pp->pr_roflags & PR_PHINPAGE) {
1341: continue;
1342: }
1.85 pk 1343: s = splvm();
1.61 chs 1344: pool_put(&phpool, ph);
1345: splx(s);
1346: }
1.66 thorpej 1347:
1348: return (1);
1.3 pk 1349: }
1350:
1351: /*
1352: * Drain pools, one at a time.
1.21 thorpej 1353: *
1354: * Note, we must never be called from an interrupt context.
1.3 pk 1355: */
1356: void
1.42 thorpej 1357: pool_drain(void *arg)
1.3 pk 1358: {
1359: struct pool *pp;
1.23 thorpej 1360: int s;
1.3 pk 1361:
1.61 chs 1362: pp = NULL;
1.49 thorpej 1363: s = splvm();
1.23 thorpej 1364: simple_lock(&pool_head_slock);
1.61 chs 1365: if (drainpp == NULL) {
1366: drainpp = TAILQ_FIRST(&pool_head);
1367: }
1368: if (drainpp) {
1369: pp = drainpp;
1370: drainpp = TAILQ_NEXT(pp, pr_poollist);
1371: }
1372: simple_unlock(&pool_head_slock);
1.63 chs 1373: pool_reclaim(pp);
1.61 chs 1374: splx(s);
1.3 pk 1375: }
1376:
1377: /*
1378: * Diagnostic helpers.
1379: */
1380: void
1.42 thorpej 1381: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1382: {
1383: int s;
1384:
1.49 thorpej 1385: s = splvm();
1.25 thorpej 1386: if (simple_lock_try(&pp->pr_slock) == 0) {
1387: printf("pool %s is locked; try again later\n",
1388: pp->pr_wchan);
1389: splx(s);
1390: return;
1391: }
1392: pool_print1(pp, modif, printf);
1.21 thorpej 1393: simple_unlock(&pp->pr_slock);
1394: splx(s);
1395: }
1396:
1.25 thorpej 1397: void
1.42 thorpej 1398: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1399: {
1400: int didlock = 0;
1401:
1402: if (pp == NULL) {
1403: (*pr)("Must specify a pool to print.\n");
1404: return;
1405: }
1406:
1407: /*
1408: * Called from DDB; interrupts should be blocked, and all
1409: * other processors should be paused. We can skip locking
1410: * the pool in this case.
1411: *
1412: * We do a simple_lock_try() just to print the lock
1413: * status, however.
1414: */
1415:
1416: if (simple_lock_try(&pp->pr_slock) == 0)
1417: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1418: else
1419: didlock = 1;
1420:
1421: pool_print1(pp, modif, pr);
1422:
1423: if (didlock)
1424: simple_unlock(&pp->pr_slock);
1425: }
1426:
1.21 thorpej 1427: static void
1.88 chs 1428: pool_print_pagelist(struct pool_pagelist *pl, void (*pr)(const char *, ...))
1429: {
1430: struct pool_item_header *ph;
1431: #ifdef DIAGNOSTIC
1432: struct pool_item *pi;
1433: #endif
1434:
1435: LIST_FOREACH(ph, pl, ph_pagelist) {
1436: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1437: ph->ph_page, ph->ph_nmissing,
1438: (u_long)ph->ph_time.tv_sec,
1439: (u_long)ph->ph_time.tv_usec);
1440: #ifdef DIAGNOSTIC
1441: TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1442: if (pi->pi_magic != PI_MAGIC) {
1443: (*pr)("\t\t\titem %p, magic 0x%x\n",
1444: pi, pi->pi_magic);
1445: }
1446: }
1447: #endif
1448: }
1449: }
1450:
1451: static void
1.42 thorpej 1452: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1453: {
1.25 thorpej 1454: struct pool_item_header *ph;
1.44 thorpej 1455: struct pool_cache *pc;
1456: struct pool_cache_group *pcg;
1457: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1458: char c;
1459:
1460: while ((c = *modif++) != '\0') {
1461: if (c == 'l')
1462: print_log = 1;
1463: if (c == 'p')
1464: print_pagelist = 1;
1.44 thorpej 1465: if (c == 'c')
1466: print_cache = 1;
1.25 thorpej 1467: }
1468:
1469: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1470: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1471: pp->pr_roflags);
1.66 thorpej 1472: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1473: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1474: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1475: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1476: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1477:
1478: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1479: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1480: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1481: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1482:
1483: if (print_pagelist == 0)
1484: goto skip_pagelist;
1485:
1.88 chs 1486: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1487: (*pr)("\n\tempty page list:\n");
1488: pool_print_pagelist(&pp->pr_emptypages, pr);
1489: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1490: (*pr)("\n\tfull page list:\n");
1491: pool_print_pagelist(&pp->pr_fullpages, pr);
1492: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1493: (*pr)("\n\tpartial-page list:\n");
1494: pool_print_pagelist(&pp->pr_partpages, pr);
1495:
1.25 thorpej 1496: if (pp->pr_curpage == NULL)
1497: (*pr)("\tno current page\n");
1498: else
1499: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1500:
1501: skip_pagelist:
1502: if (print_log == 0)
1503: goto skip_log;
1504:
1505: (*pr)("\n");
1506: if ((pp->pr_roflags & PR_LOGGING) == 0)
1507: (*pr)("\tno log\n");
1508: else
1509: pr_printlog(pp, NULL, pr);
1.3 pk 1510:
1.25 thorpej 1511: skip_log:
1.44 thorpej 1512: if (print_cache == 0)
1513: goto skip_cache;
1514:
1.61 chs 1515: TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1.44 thorpej 1516: (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1517: pc->pc_allocfrom, pc->pc_freeto);
1.48 thorpej 1518: (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1519: pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1.61 chs 1520: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.44 thorpej 1521: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1.87 thorpej 1522: for (i = 0; i < PCG_NOBJECTS; i++) {
1523: if (pcg->pcg_objects[i].pcgo_pa !=
1524: POOL_PADDR_INVALID) {
1525: (*pr)("\t\t\t%p, 0x%llx\n",
1526: pcg->pcg_objects[i].pcgo_va,
1527: (unsigned long long)
1528: pcg->pcg_objects[i].pcgo_pa);
1529: } else {
1530: (*pr)("\t\t\t%p\n",
1531: pcg->pcg_objects[i].pcgo_va);
1532: }
1533: }
1.44 thorpej 1534: }
1535: }
1536:
1537: skip_cache:
1.88 chs 1538: pr_enter_check(pp, pr);
1539: }
1540:
1541: static int
1542: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1543: {
1544: struct pool_item *pi;
1545: caddr_t page;
1546: int n;
1547:
1548: page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1549: if (page != ph->ph_page &&
1550: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1551: if (label != NULL)
1552: printf("%s: ", label);
1553: printf("pool(%p:%s): page inconsistency: page %p;"
1554: " at page head addr %p (p %p)\n", pp,
1555: pp->pr_wchan, ph->ph_page,
1556: ph, page);
1557: return 1;
1558: }
1.3 pk 1559:
1.88 chs 1560: for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1561: pi != NULL;
1562: pi = TAILQ_NEXT(pi,pi_list), n++) {
1563:
1564: #ifdef DIAGNOSTIC
1565: if (pi->pi_magic != PI_MAGIC) {
1566: if (label != NULL)
1567: printf("%s: ", label);
1568: printf("pool(%s): free list modified: magic=%x;"
1569: " page %p; item ordinal %d;"
1570: " addr %p (p %p)\n",
1571: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1572: n, pi, page);
1573: panic("pool");
1574: }
1575: #endif
1576: page =
1577: (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1578: if (page == ph->ph_page)
1579: continue;
1580:
1581: if (label != NULL)
1582: printf("%s: ", label);
1583: printf("pool(%p:%s): page inconsistency: page %p;"
1584: " item ordinal %d; addr %p (p %p)\n", pp,
1585: pp->pr_wchan, ph->ph_page,
1586: n, pi, page);
1587: return 1;
1588: }
1589: return 0;
1.3 pk 1590: }
1591:
1.88 chs 1592:
1.3 pk 1593: int
1.42 thorpej 1594: pool_chk(struct pool *pp, const char *label)
1.3 pk 1595: {
1596: struct pool_item_header *ph;
1597: int r = 0;
1598:
1.21 thorpej 1599: simple_lock(&pp->pr_slock);
1.88 chs 1600: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1601: r = pool_chk_page(pp, label, ph);
1602: if (r) {
1603: goto out;
1604: }
1605: }
1606: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1607: r = pool_chk_page(pp, label, ph);
1608: if (r) {
1.3 pk 1609: goto out;
1610: }
1.88 chs 1611: }
1612: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1613: r = pool_chk_page(pp, label, ph);
1614: if (r) {
1.3 pk 1615: goto out;
1616: }
1617: }
1.88 chs 1618:
1.3 pk 1619: out:
1.21 thorpej 1620: simple_unlock(&pp->pr_slock);
1.3 pk 1621: return (r);
1.43 thorpej 1622: }
1623:
1624: /*
1625: * pool_cache_init:
1626: *
1627: * Initialize a pool cache.
1628: *
1629: * NOTE: If the pool must be protected from interrupts, we expect
1630: * to be called at the appropriate interrupt priority level.
1631: */
1632: void
1633: pool_cache_init(struct pool_cache *pc, struct pool *pp,
1634: int (*ctor)(void *, void *, int),
1635: void (*dtor)(void *, void *),
1636: void *arg)
1637: {
1638:
1639: TAILQ_INIT(&pc->pc_grouplist);
1640: simple_lock_init(&pc->pc_slock);
1641:
1642: pc->pc_allocfrom = NULL;
1643: pc->pc_freeto = NULL;
1644: pc->pc_pool = pp;
1645:
1646: pc->pc_ctor = ctor;
1647: pc->pc_dtor = dtor;
1648: pc->pc_arg = arg;
1649:
1.48 thorpej 1650: pc->pc_hits = 0;
1651: pc->pc_misses = 0;
1652:
1653: pc->pc_ngroups = 0;
1654:
1655: pc->pc_nitems = 0;
1656:
1.43 thorpej 1657: simple_lock(&pp->pr_slock);
1658: TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1659: simple_unlock(&pp->pr_slock);
1660: }
1661:
1662: /*
1663: * pool_cache_destroy:
1664: *
1665: * Destroy a pool cache.
1666: */
1667: void
1668: pool_cache_destroy(struct pool_cache *pc)
1669: {
1670: struct pool *pp = pc->pc_pool;
1671:
1672: /* First, invalidate the entire cache. */
1673: pool_cache_invalidate(pc);
1674:
1675: /* ...and remove it from the pool's cache list. */
1676: simple_lock(&pp->pr_slock);
1677: TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1678: simple_unlock(&pp->pr_slock);
1679: }
1680:
1681: static __inline void *
1.87 thorpej 1682: pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1.43 thorpej 1683: {
1684: void *object;
1685: u_int idx;
1686:
1687: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45 thorpej 1688: KASSERT(pcg->pcg_avail != 0);
1.43 thorpej 1689: idx = --pcg->pcg_avail;
1690:
1.87 thorpej 1691: KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1692: object = pcg->pcg_objects[idx].pcgo_va;
1693: if (pap != NULL)
1694: *pap = pcg->pcg_objects[idx].pcgo_pa;
1695: pcg->pcg_objects[idx].pcgo_va = NULL;
1.43 thorpej 1696:
1697: return (object);
1698: }
1699:
1700: static __inline void
1.87 thorpej 1701: pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1.43 thorpej 1702: {
1703: u_int idx;
1704:
1705: KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1706: idx = pcg->pcg_avail++;
1707:
1.87 thorpej 1708: KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1709: pcg->pcg_objects[idx].pcgo_va = object;
1710: pcg->pcg_objects[idx].pcgo_pa = pa;
1.43 thorpej 1711: }
1712:
1713: /*
1.87 thorpej 1714: * pool_cache_get{,_paddr}:
1.43 thorpej 1715: *
1.87 thorpej 1716: * Get an object from a pool cache (optionally returning
1717: * the physical address of the object).
1.43 thorpej 1718: */
1719: void *
1.87 thorpej 1720: pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1.43 thorpej 1721: {
1722: struct pool_cache_group *pcg;
1723: void *object;
1.58 thorpej 1724:
1725: #ifdef LOCKDEBUG
1726: if (flags & PR_WAITOK)
1727: simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1728: #endif
1.43 thorpej 1729:
1730: simple_lock(&pc->pc_slock);
1731:
1732: if ((pcg = pc->pc_allocfrom) == NULL) {
1.61 chs 1733: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43 thorpej 1734: if (pcg->pcg_avail != 0) {
1735: pc->pc_allocfrom = pcg;
1736: goto have_group;
1737: }
1738: }
1739:
1740: /*
1741: * No groups with any available objects. Allocate
1742: * a new object, construct it, and return it to
1743: * the caller. We will allocate a group, if necessary,
1744: * when the object is freed back to the cache.
1745: */
1.48 thorpej 1746: pc->pc_misses++;
1.43 thorpej 1747: simple_unlock(&pc->pc_slock);
1748: object = pool_get(pc->pc_pool, flags);
1749: if (object != NULL && pc->pc_ctor != NULL) {
1750: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1751: pool_put(pc->pc_pool, object);
1752: return (NULL);
1753: }
1754: }
1.87 thorpej 1755: if (object != NULL && pap != NULL) {
1756: #ifdef POOL_VTOPHYS
1757: *pap = POOL_VTOPHYS(object);
1758: #else
1759: *pap = POOL_PADDR_INVALID;
1760: #endif
1761: }
1.43 thorpej 1762: return (object);
1763: }
1764:
1765: have_group:
1.48 thorpej 1766: pc->pc_hits++;
1767: pc->pc_nitems--;
1.87 thorpej 1768: object = pcg_get(pcg, pap);
1.43 thorpej 1769:
1770: if (pcg->pcg_avail == 0)
1771: pc->pc_allocfrom = NULL;
1.45 thorpej 1772:
1.43 thorpej 1773: simple_unlock(&pc->pc_slock);
1774:
1775: return (object);
1776: }
1777:
1778: /*
1.87 thorpej 1779: * pool_cache_put{,_paddr}:
1.43 thorpej 1780: *
1.87 thorpej 1781: * Put an object back to the pool cache (optionally caching the
1782: * physical address of the object).
1.43 thorpej 1783: */
1784: void
1.87 thorpej 1785: pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1.43 thorpej 1786: {
1787: struct pool_cache_group *pcg;
1.60 thorpej 1788: int s;
1.43 thorpej 1789:
1790: simple_lock(&pc->pc_slock);
1791:
1792: if ((pcg = pc->pc_freeto) == NULL) {
1.61 chs 1793: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43 thorpej 1794: if (pcg->pcg_avail != PCG_NOBJECTS) {
1795: pc->pc_freeto = pcg;
1796: goto have_group;
1797: }
1798: }
1799:
1800: /*
1801: * No empty groups to free the object to. Attempt to
1.47 thorpej 1802: * allocate one.
1.43 thorpej 1803: */
1.47 thorpej 1804: simple_unlock(&pc->pc_slock);
1.60 thorpej 1805: s = splvm();
1.43 thorpej 1806: pcg = pool_get(&pcgpool, PR_NOWAIT);
1.60 thorpej 1807: splx(s);
1.43 thorpej 1808: if (pcg != NULL) {
1809: memset(pcg, 0, sizeof(*pcg));
1.47 thorpej 1810: simple_lock(&pc->pc_slock);
1.48 thorpej 1811: pc->pc_ngroups++;
1.43 thorpej 1812: TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1.47 thorpej 1813: if (pc->pc_freeto == NULL)
1814: pc->pc_freeto = pcg;
1.43 thorpej 1815: goto have_group;
1816: }
1817:
1818: /*
1819: * Unable to allocate a cache group; destruct the object
1820: * and free it back to the pool.
1821: */
1.51 thorpej 1822: pool_cache_destruct_object(pc, object);
1.43 thorpej 1823: return;
1824: }
1825:
1826: have_group:
1.48 thorpej 1827: pc->pc_nitems++;
1.87 thorpej 1828: pcg_put(pcg, object, pa);
1.43 thorpej 1829:
1830: if (pcg->pcg_avail == PCG_NOBJECTS)
1831: pc->pc_freeto = NULL;
1832:
1833: simple_unlock(&pc->pc_slock);
1.51 thorpej 1834: }
1835:
1836: /*
1837: * pool_cache_destruct_object:
1838: *
1839: * Force destruction of an object and its release back into
1840: * the pool.
1841: */
1842: void
1843: pool_cache_destruct_object(struct pool_cache *pc, void *object)
1844: {
1845:
1846: if (pc->pc_dtor != NULL)
1847: (*pc->pc_dtor)(pc->pc_arg, object);
1848: pool_put(pc->pc_pool, object);
1.43 thorpej 1849: }
1850:
1851: /*
1852: * pool_cache_do_invalidate:
1853: *
1854: * This internal function implements pool_cache_invalidate() and
1855: * pool_cache_reclaim().
1856: */
1857: static void
1858: pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1.56 sommerfe 1859: void (*putit)(struct pool *, void *))
1.43 thorpej 1860: {
1861: struct pool_cache_group *pcg, *npcg;
1862: void *object;
1.60 thorpej 1863: int s;
1.43 thorpej 1864:
1865: for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1866: pcg = npcg) {
1867: npcg = TAILQ_NEXT(pcg, pcg_list);
1868: while (pcg->pcg_avail != 0) {
1.48 thorpej 1869: pc->pc_nitems--;
1.87 thorpej 1870: object = pcg_get(pcg, NULL);
1.45 thorpej 1871: if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1872: pc->pc_allocfrom = NULL;
1.43 thorpej 1873: if (pc->pc_dtor != NULL)
1874: (*pc->pc_dtor)(pc->pc_arg, object);
1.56 sommerfe 1875: (*putit)(pc->pc_pool, object);
1.43 thorpej 1876: }
1877: if (free_groups) {
1.48 thorpej 1878: pc->pc_ngroups--;
1.43 thorpej 1879: TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1.46 thorpej 1880: if (pc->pc_freeto == pcg)
1881: pc->pc_freeto = NULL;
1.60 thorpej 1882: s = splvm();
1.43 thorpej 1883: pool_put(&pcgpool, pcg);
1.60 thorpej 1884: splx(s);
1.43 thorpej 1885: }
1886: }
1887: }
1888:
1889: /*
1890: * pool_cache_invalidate:
1891: *
1892: * Invalidate a pool cache (destruct and release all of the
1893: * cached objects).
1894: */
1895: void
1896: pool_cache_invalidate(struct pool_cache *pc)
1897: {
1898:
1899: simple_lock(&pc->pc_slock);
1.56 sommerfe 1900: pool_cache_do_invalidate(pc, 0, pool_put);
1.43 thorpej 1901: simple_unlock(&pc->pc_slock);
1902: }
1903:
1904: /*
1905: * pool_cache_reclaim:
1906: *
1907: * Reclaim a pool cache for pool_reclaim().
1908: */
1909: static void
1910: pool_cache_reclaim(struct pool_cache *pc)
1911: {
1912:
1.47 thorpej 1913: simple_lock(&pc->pc_slock);
1.43 thorpej 1914: pool_cache_do_invalidate(pc, 1, pool_do_put);
1915: simple_unlock(&pc->pc_slock);
1.3 pk 1916: }
1.66 thorpej 1917:
1918: /*
1919: * Pool backend allocators.
1920: *
1921: * Each pool has a backend allocator that handles allocation, deallocation,
1922: * and any additional draining that might be needed.
1923: *
1924: * We provide two standard allocators:
1925: *
1926: * pool_allocator_kmem - the default when no allocator is specified
1927: *
1928: * pool_allocator_nointr - used for pools that will not be accessed
1929: * in interrupt context.
1930: */
1931: void *pool_page_alloc(struct pool *, int);
1932: void pool_page_free(struct pool *, void *);
1933:
1934: struct pool_allocator pool_allocator_kmem = {
1935: pool_page_alloc, pool_page_free, 0,
1936: };
1937:
1938: void *pool_page_alloc_nointr(struct pool *, int);
1939: void pool_page_free_nointr(struct pool *, void *);
1940:
1941: struct pool_allocator pool_allocator_nointr = {
1942: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1943: };
1944:
1945: #ifdef POOL_SUBPAGE
1946: void *pool_subpage_alloc(struct pool *, int);
1947: void pool_subpage_free(struct pool *, void *);
1948:
1949: struct pool_allocator pool_allocator_kmem_subpage = {
1950: pool_subpage_alloc, pool_subpage_free, 0,
1951: };
1952: #endif /* POOL_SUBPAGE */
1953:
1954: /*
1955: * We have at least three different resources for the same allocation and
1956: * each resource can be depleted. First, we have the ready elements in the
1957: * pool. Then we have the resource (typically a vm_map) for this allocator.
1958: * Finally, we have physical memory. Waiting for any of these can be
1959: * unnecessary when any other is freed, but the kernel doesn't support
1960: * sleeping on multiple wait channels, so we have to employ another strategy.
1961: *
1962: * The caller sleeps on the pool (so that it can be awakened when an item
1963: * is returned to the pool), but we set PA_WANT on the allocator. When a
1964: * page is returned to the allocator and PA_WANT is set, pool_allocator_free
1965: * will wake up all sleeping pools belonging to this allocator.
1966: *
1967: * XXX Thundering herd.
1968: */
1969: void *
1970: pool_allocator_alloc(struct pool *org, int flags)
1971: {
1972: struct pool_allocator *pa = org->pr_alloc;
1973: struct pool *pp, *start;
1974: int s, freed;
1975: void *res;
1976:
1.91 yamt 1977: LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
1978:
1.66 thorpej 1979: do {
1980: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1981: return (res);
1.68 thorpej 1982: if ((flags & PR_WAITOK) == 0) {
1983: /*
1984: * We only run the drain hookhere if PR_NOWAIT.
1985: * In other cases, the hook will be run in
1986: * pool_reclaim().
1987: */
1988: if (org->pr_drain_hook != NULL) {
1989: (*org->pr_drain_hook)(org->pr_drain_hook_arg,
1990: flags);
1991: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1992: return (res);
1993: }
1.66 thorpej 1994: break;
1.68 thorpej 1995: }
1.66 thorpej 1996:
1997: /*
1998: * Drain all pools, except "org", that use this
1999: * allocator. We do this to reclaim VA space.
2000: * pa_alloc is responsible for waiting for
2001: * physical memory.
2002: *
2003: * XXX We risk looping forever if start if someone
2004: * calls pool_destroy on "start". But there is no
2005: * other way to have potentially sleeping pool_reclaim,
2006: * non-sleeping locks on pool_allocator, and some
2007: * stirring of drained pools in the allocator.
1.68 thorpej 2008: *
2009: * XXX Maybe we should use pool_head_slock for locking
2010: * the allocators?
1.66 thorpej 2011: */
2012: freed = 0;
2013:
2014: s = splvm();
2015: simple_lock(&pa->pa_slock);
2016: pp = start = TAILQ_FIRST(&pa->pa_list);
2017: do {
2018: TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
2019: TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
2020: if (pp == org)
2021: continue;
1.73 thorpej 2022: simple_unlock(&pa->pa_slock);
1.66 thorpej 2023: freed = pool_reclaim(pp);
1.73 thorpej 2024: simple_lock(&pa->pa_slock);
1.66 thorpej 2025: } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
2026: freed == 0);
2027:
2028: if (freed == 0) {
2029: /*
2030: * We set PA_WANT here, the caller will most likely
2031: * sleep waiting for pages (if not, this won't hurt
2032: * that much), and there is no way to set this in
2033: * the caller without violating locking order.
2034: */
2035: pa->pa_flags |= PA_WANT;
2036: }
2037: simple_unlock(&pa->pa_slock);
2038: splx(s);
2039: } while (freed);
2040: return (NULL);
2041: }
2042:
2043: void
2044: pool_allocator_free(struct pool *pp, void *v)
2045: {
2046: struct pool_allocator *pa = pp->pr_alloc;
2047: int s;
2048:
1.91 yamt 2049: LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
2050:
1.66 thorpej 2051: (*pa->pa_free)(pp, v);
2052:
2053: s = splvm();
2054: simple_lock(&pa->pa_slock);
2055: if ((pa->pa_flags & PA_WANT) == 0) {
2056: simple_unlock(&pa->pa_slock);
2057: splx(s);
2058: return;
2059: }
2060:
2061: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2062: simple_lock(&pp->pr_slock);
2063: if ((pp->pr_flags & PR_WANTED) != 0) {
2064: pp->pr_flags &= ~PR_WANTED;
2065: wakeup(pp);
2066: }
1.69 thorpej 2067: simple_unlock(&pp->pr_slock);
1.66 thorpej 2068: }
2069: pa->pa_flags &= ~PA_WANT;
2070: simple_unlock(&pa->pa_slock);
2071: splx(s);
2072: }
2073:
2074: void *
2075: pool_page_alloc(struct pool *pp, int flags)
2076: {
2077: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2078:
2079: return ((void *) uvm_km_alloc_poolpage(waitok));
2080: }
2081:
2082: void
2083: pool_page_free(struct pool *pp, void *v)
2084: {
2085:
2086: uvm_km_free_poolpage((vaddr_t) v);
2087: }
2088:
2089: #ifdef POOL_SUBPAGE
2090: /* Sub-page allocator, for machines with large hardware pages. */
2091: void *
2092: pool_subpage_alloc(struct pool *pp, int flags)
2093: {
2094:
2095: return (pool_get(&psppool, flags));
2096: }
2097:
2098: void
2099: pool_subpage_free(struct pool *pp, void *v)
2100: {
2101:
2102: pool_put(&psppool, v);
2103: }
2104:
2105: /* We don't provide a real nointr allocator. Maybe later. */
2106: void *
2107: pool_page_alloc_nointr(struct pool *pp, int flags)
2108: {
2109:
2110: return (pool_subpage_alloc(pp, flags));
2111: }
2112:
2113: void
2114: pool_page_free_nointr(struct pool *pp, void *v)
2115: {
2116:
2117: pool_subpage_free(pp, v);
2118: }
2119: #else
2120: void *
2121: pool_page_alloc_nointr(struct pool *pp, int flags)
2122: {
2123: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2124:
2125: return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2126: uvm.kernel_object, waitok));
2127: }
2128:
2129: void
2130: pool_page_free_nointr(struct pool *pp, void *v)
2131: {
2132:
2133: uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2134: }
2135: #endif /* POOL_SUBPAGE */
CVSweb <webmaster@jp.NetBSD.org>