Annotation of src/sys/kern/subr_pool.c, Revision 1.97
1.97 ! yamt 1: /* $NetBSD: subr_pool.c,v 1.96 2004/06/20 18:19:27 thorpej Exp $ */
1.1 pk 2:
3: /*-
1.43 thorpej 4: * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.97 ! yamt 41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.96 2004/06/20 18:19:27 thorpej Exp $");
1.24 scottr 42:
1.25 thorpej 43: #include "opt_pool.h"
1.24 scottr 44: #include "opt_poollog.h"
1.28 thorpej 45: #include "opt_lockdebug.h"
1.1 pk 46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/proc.h>
50: #include <sys/errno.h>
51: #include <sys/kernel.h>
52: #include <sys/malloc.h>
53: #include <sys/lock.h>
54: #include <sys/pool.h>
1.20 thorpej 55: #include <sys/syslog.h>
1.3 pk 56:
57: #include <uvm/uvm.h>
58:
1.1 pk 59: /*
60: * Pool resource management utility.
1.3 pk 61: *
1.88 chs 62: * Memory is allocated in pages which are split into pieces according to
63: * the pool item size. Each page is kept on one of three lists in the
64: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65: * for empty, full and partially-full pages respectively. The individual
66: * pool items are on a linked list headed by `ph_itemlist' in each page
67: * header. The memory for building the page list is either taken from
68: * the allocated pages themselves (for small pool items) or taken from
69: * an internal pool of page headers (`phpool').
1.1 pk 70: */
71:
1.3 pk 72: /* List of all pools */
1.5 thorpej 73: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.3 pk 74:
75: /* Private pool for page header structures */
1.97 ! yamt 76: #define PHPOOL_MAX 8
! 77: static struct pool phpool[PHPOOL_MAX];
! 78: #define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx)))
1.3 pk 79:
1.62 bjh21 80: #ifdef POOL_SUBPAGE
81: /* Pool of subpages for use by normal pools. */
82: static struct pool psppool;
83: #endif
84:
1.3 pk 85: /* # of seconds to retain page after last use */
86: int pool_inactive_time = 10;
87:
88: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 89: static struct pool *drainpp;
90:
91: /* This spin lock protects both pool_head and drainpp. */
92: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3 pk 93:
94: struct pool_item_header {
95: /* Page headers */
1.88 chs 96: LIST_ENTRY(pool_item_header)
1.3 pk 97: ph_pagelist; /* pool page list */
1.88 chs 98: SPLAY_ENTRY(pool_item_header)
99: ph_node; /* Off-page page headers */
1.3 pk 100: caddr_t ph_page; /* this page's address */
101: struct timeval ph_time; /* last referenced */
1.97 ! yamt 102: union {
! 103: /* !PR_NOTOUCH */
! 104: struct {
! 105: TAILQ_HEAD(, pool_item)
! 106: phu_itemlist; /* chunk list for this page */
! 107: } phu_normal;
! 108: /* PR_NOTOUCH */
! 109: struct {
! 110: uint16_t
! 111: phu_off; /* start offset in page */
! 112: uint16_t
! 113: phu_firstfree; /* first free item */
! 114: } phu_notouch;
! 115: } ph_u;
! 116: uint16_t ph_nmissing; /* # of chunks in use */
1.3 pk 117: };
1.97 ! yamt 118: #define ph_itemlist ph_u.phu_normal.phu_itemlist
! 119: #define ph_off ph_u.phu_notouch.phu_off
! 120: #define ph_firstfree ph_u.phu_notouch.phu_firstfree
1.3 pk 121:
1.1 pk 122: struct pool_item {
1.3 pk 123: #ifdef DIAGNOSTIC
1.82 thorpej 124: u_int pi_magic;
1.33 chs 125: #endif
1.82 thorpej 126: #define PI_MAGIC 0xdeadbeefU
1.3 pk 127: /* Other entries use only this list entry */
128: TAILQ_ENTRY(pool_item) pi_list;
129: };
130:
1.53 thorpej 131: #define POOL_NEEDS_CATCHUP(pp) \
132: ((pp)->pr_nitems < (pp)->pr_minitems)
133:
1.43 thorpej 134: /*
135: * Pool cache management.
136: *
137: * Pool caches provide a way for constructed objects to be cached by the
138: * pool subsystem. This can lead to performance improvements by avoiding
139: * needless object construction/destruction; it is deferred until absolutely
140: * necessary.
141: *
142: * Caches are grouped into cache groups. Each cache group references
143: * up to 16 constructed objects. When a cache allocates an object
144: * from the pool, it calls the object's constructor and places it into
145: * a cache group. When a cache group frees an object back to the pool,
146: * it first calls the object's destructor. This allows the object to
147: * persist in constructed form while freed to the cache.
148: *
149: * Multiple caches may exist for each pool. This allows a single
150: * object type to have multiple constructed forms. The pool references
151: * each cache, so that when a pool is drained by the pagedaemon, it can
152: * drain each individual cache as well. Each time a cache is drained,
153: * the most idle cache group is freed to the pool in its entirety.
154: *
155: * Pool caches are layed on top of pools. By layering them, we can avoid
156: * the complexity of cache management for pools which would not benefit
157: * from it.
158: */
159:
160: /* The cache group pool. */
161: static struct pool pcgpool;
1.3 pk 162:
1.43 thorpej 163: static void pool_cache_reclaim(struct pool_cache *);
1.3 pk 164:
1.42 thorpej 165: static int pool_catchup(struct pool *);
1.55 thorpej 166: static void pool_prime_page(struct pool *, caddr_t,
167: struct pool_item_header *);
1.88 chs 168: static void pool_update_curpage(struct pool *);
1.66 thorpej 169:
170: void *pool_allocator_alloc(struct pool *, int);
171: void pool_allocator_free(struct pool *, void *);
1.3 pk 172:
1.97 ! yamt 173: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 174: void (*)(const char *, ...));
1.42 thorpej 175: static void pool_print1(struct pool *, const char *,
176: void (*)(const char *, ...));
1.3 pk 177:
1.88 chs 178: static int pool_chk_page(struct pool *, const char *,
179: struct pool_item_header *);
180:
1.3 pk 181: /*
1.52 thorpej 182: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 183: */
184: struct pool_log {
185: const char *pl_file;
186: long pl_line;
187: int pl_action;
1.25 thorpej 188: #define PRLOG_GET 1
189: #define PRLOG_PUT 2
1.3 pk 190: void *pl_addr;
1.1 pk 191: };
192:
1.86 matt 193: #ifdef POOL_DIAGNOSTIC
1.3 pk 194: /* Number of entries in pool log buffers */
1.17 thorpej 195: #ifndef POOL_LOGSIZE
196: #define POOL_LOGSIZE 10
197: #endif
198:
199: int pool_logsize = POOL_LOGSIZE;
1.1 pk 200:
1.42 thorpej 201: static __inline void
202: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 203: {
204: int n = pp->pr_curlogentry;
205: struct pool_log *pl;
206:
1.20 thorpej 207: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 208: return;
209:
210: /*
211: * Fill in the current entry. Wrap around and overwrite
212: * the oldest entry if necessary.
213: */
214: pl = &pp->pr_log[n];
215: pl->pl_file = file;
216: pl->pl_line = line;
217: pl->pl_action = action;
218: pl->pl_addr = v;
219: if (++n >= pp->pr_logsize)
220: n = 0;
221: pp->pr_curlogentry = n;
222: }
223:
224: static void
1.42 thorpej 225: pr_printlog(struct pool *pp, struct pool_item *pi,
226: void (*pr)(const char *, ...))
1.3 pk 227: {
228: int i = pp->pr_logsize;
229: int n = pp->pr_curlogentry;
230:
1.20 thorpej 231: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 232: return;
233:
234: /*
235: * Print all entries in this pool's log.
236: */
237: while (i-- > 0) {
238: struct pool_log *pl = &pp->pr_log[n];
239: if (pl->pl_action != 0) {
1.25 thorpej 240: if (pi == NULL || pi == pl->pl_addr) {
241: (*pr)("\tlog entry %d:\n", i);
242: (*pr)("\t\taction = %s, addr = %p\n",
243: pl->pl_action == PRLOG_GET ? "get" : "put",
244: pl->pl_addr);
245: (*pr)("\t\tfile: %s at line %lu\n",
246: pl->pl_file, pl->pl_line);
247: }
1.3 pk 248: }
249: if (++n >= pp->pr_logsize)
250: n = 0;
251: }
252: }
1.25 thorpej 253:
1.42 thorpej 254: static __inline void
255: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 256: {
257:
1.34 thorpej 258: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 259: printf("pool %s: reentrancy at file %s line %ld\n",
260: pp->pr_wchan, file, line);
261: printf(" previous entry at file %s line %ld\n",
262: pp->pr_entered_file, pp->pr_entered_line);
263: panic("pr_enter");
264: }
265:
266: pp->pr_entered_file = file;
267: pp->pr_entered_line = line;
268: }
269:
1.42 thorpej 270: static __inline void
271: pr_leave(struct pool *pp)
1.25 thorpej 272: {
273:
1.34 thorpej 274: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 275: printf("pool %s not entered?\n", pp->pr_wchan);
276: panic("pr_leave");
277: }
278:
279: pp->pr_entered_file = NULL;
280: pp->pr_entered_line = 0;
281: }
282:
1.42 thorpej 283: static __inline void
284: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 285: {
286:
287: if (pp->pr_entered_file != NULL)
288: (*pr)("\n\tcurrently entered from file %s line %ld\n",
289: pp->pr_entered_file, pp->pr_entered_line);
290: }
1.3 pk 291: #else
1.25 thorpej 292: #define pr_log(pp, v, action, file, line)
293: #define pr_printlog(pp, pi, pr)
294: #define pr_enter(pp, file, line)
295: #define pr_leave(pp)
296: #define pr_enter_check(pp, pr)
1.59 thorpej 297: #endif /* POOL_DIAGNOSTIC */
1.3 pk 298:
1.88 chs 299: static __inline int
1.97 ! yamt 300: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
! 301: const void *v)
! 302: {
! 303: const char *cp = v;
! 304: int idx;
! 305:
! 306: KASSERT(pp->pr_roflags & PR_NOTOUCH);
! 307: idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size;
! 308: KASSERT(idx < pp->pr_itemsperpage);
! 309: return idx;
! 310: }
! 311:
! 312: #define PR_FREELIST_ALIGN(p) roundup((uintptr_t)(p), sizeof(uint16_t))
! 313: #define PR_FREELIST(ph) ((uint16_t *)PR_FREELIST_ALIGN((ph) + 1))
! 314: #define PR_INDEX_USED ((uint16_t)-1)
! 315: #define PR_INDEX_EOL ((uint16_t)-2)
! 316:
! 317: static __inline void
! 318: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
! 319: void *obj)
! 320: {
! 321: int idx = pr_item_notouch_index(pp, ph, obj);
! 322: uint16_t *freelist = PR_FREELIST(ph);
! 323:
! 324: KASSERT(freelist[idx] == PR_INDEX_USED);
! 325: freelist[idx] = ph->ph_firstfree;
! 326: ph->ph_firstfree = idx;
! 327: }
! 328:
! 329: static __inline void *
! 330: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
! 331: {
! 332: int idx = ph->ph_firstfree;
! 333: uint16_t *freelist = PR_FREELIST(ph);
! 334:
! 335: KASSERT(freelist[idx] != PR_INDEX_USED);
! 336: ph->ph_firstfree = freelist[idx];
! 337: freelist[idx] = PR_INDEX_USED;
! 338:
! 339: return ph->ph_page + ph->ph_off + idx * pp->pr_size;
! 340: }
! 341:
! 342: static __inline int
1.88 chs 343: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
344: {
345: if (a->ph_page < b->ph_page)
346: return (-1);
347: else if (a->ph_page > b->ph_page)
348: return (1);
349: else
350: return (0);
351: }
352:
353: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
354: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
355:
1.3 pk 356: /*
357: * Return the pool page header based on page address.
358: */
1.42 thorpej 359: static __inline struct pool_item_header *
360: pr_find_pagehead(struct pool *pp, caddr_t page)
1.3 pk 361: {
1.88 chs 362: struct pool_item_header *ph, tmp;
1.3 pk 363:
1.20 thorpej 364: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3 pk 365: return ((struct pool_item_header *)(page + pp->pr_phoffset));
366:
1.88 chs 367: tmp.ph_page = page;
368: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
369: return ph;
1.3 pk 370: }
371:
372: /*
373: * Remove a page from the pool.
374: */
1.42 thorpej 375: static __inline void
1.61 chs 376: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
377: struct pool_pagelist *pq)
1.3 pk 378: {
1.61 chs 379: int s;
1.3 pk 380:
1.91 yamt 381: LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL);
382:
1.3 pk 383: /*
1.7 thorpej 384: * If the page was idle, decrement the idle page count.
1.3 pk 385: */
1.6 thorpej 386: if (ph->ph_nmissing == 0) {
387: #ifdef DIAGNOSTIC
388: if (pp->pr_nidle == 0)
389: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 390: if (pp->pr_nitems < pp->pr_itemsperpage)
391: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 392: #endif
393: pp->pr_nidle--;
394: }
1.7 thorpej 395:
1.20 thorpej 396: pp->pr_nitems -= pp->pr_itemsperpage;
397:
1.7 thorpej 398: /*
1.61 chs 399: * Unlink a page from the pool and release it (or queue it for release).
1.7 thorpej 400: */
1.88 chs 401: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 402: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
403: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.61 chs 404: if (pq) {
1.88 chs 405: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
1.61 chs 406: } else {
1.66 thorpej 407: pool_allocator_free(pp, ph->ph_page);
1.61 chs 408: if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
1.85 pk 409: s = splvm();
1.97 ! yamt 410: pool_put(pp->pr_phpool, ph);
1.61 chs 411: splx(s);
412: }
413: }
1.7 thorpej 414: pp->pr_npages--;
415: pp->pr_npagefree++;
1.6 thorpej 416:
1.88 chs 417: pool_update_curpage(pp);
1.3 pk 418: }
419:
420: /*
1.94 simonb 421: * Initialize all the pools listed in the "pools" link set.
422: */
423: void
424: link_pool_init(void)
425: {
426: __link_set_decl(pools, struct link_pool_init);
427: struct link_pool_init * const *pi;
428:
429: __link_set_foreach(pi, pools)
430: pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
431: (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
432: (*pi)->palloc);
433: }
434:
435: /*
1.3 pk 436: * Initialize the given pool resource structure.
437: *
438: * We export this routine to allow other kernel parts to declare
439: * static pools that must be initialized before malloc() is available.
440: */
441: void
1.42 thorpej 442: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.66 thorpej 443: const char *wchan, struct pool_allocator *palloc)
1.3 pk 444: {
1.88 chs 445: int off, slack;
1.92 enami 446: size_t trysize, phsize;
1.93 dbj 447: int s;
1.3 pk 448:
1.25 thorpej 449: #ifdef POOL_DIAGNOSTIC
450: /*
451: * Always log if POOL_DIAGNOSTIC is defined.
452: */
453: if (pool_logsize != 0)
454: flags |= PR_LOGGING;
455: #endif
456:
1.66 thorpej 457: #ifdef POOL_SUBPAGE
458: /*
459: * XXX We don't provide a real `nointr' back-end
460: * yet; all sub-pages come from a kmem back-end.
461: * maybe some day...
462: */
463: if (palloc == NULL) {
464: extern struct pool_allocator pool_allocator_kmem_subpage;
465: palloc = &pool_allocator_kmem_subpage;
466: }
1.3 pk 467: /*
1.66 thorpej 468: * We'll assume any user-specified back-end allocator
469: * will deal with sub-pages, or simply don't care.
1.3 pk 470: */
1.66 thorpej 471: #else
472: if (palloc == NULL)
473: palloc = &pool_allocator_kmem;
474: #endif /* POOL_SUBPAGE */
475: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
476: if (palloc->pa_pagesz == 0) {
1.62 bjh21 477: #ifdef POOL_SUBPAGE
1.66 thorpej 478: if (palloc == &pool_allocator_kmem)
479: palloc->pa_pagesz = PAGE_SIZE;
480: else
481: palloc->pa_pagesz = POOL_SUBPAGE;
1.62 bjh21 482: #else
1.66 thorpej 483: palloc->pa_pagesz = PAGE_SIZE;
484: #endif /* POOL_SUBPAGE */
485: }
486:
487: TAILQ_INIT(&palloc->pa_list);
488:
489: simple_lock_init(&palloc->pa_slock);
490: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
491: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
492: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 493: }
1.3 pk 494:
495: if (align == 0)
496: align = ALIGN(1);
1.14 thorpej 497:
498: if (size < sizeof(struct pool_item))
499: size = sizeof(struct pool_item);
1.3 pk 500:
1.78 thorpej 501: size = roundup(size, align);
1.66 thorpej 502: #ifdef DIAGNOSTIC
503: if (size > palloc->pa_pagesz)
1.35 pk 504: panic("pool_init: pool item size (%lu) too large",
505: (u_long)size);
1.66 thorpej 506: #endif
1.35 pk 507:
1.3 pk 508: /*
509: * Initialize the pool structure.
510: */
1.88 chs 511: LIST_INIT(&pp->pr_emptypages);
512: LIST_INIT(&pp->pr_fullpages);
513: LIST_INIT(&pp->pr_partpages);
1.43 thorpej 514: TAILQ_INIT(&pp->pr_cachelist);
1.3 pk 515: pp->pr_curpage = NULL;
516: pp->pr_npages = 0;
517: pp->pr_minitems = 0;
518: pp->pr_minpages = 0;
519: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 520: pp->pr_roflags = flags;
521: pp->pr_flags = 0;
1.35 pk 522: pp->pr_size = size;
1.3 pk 523: pp->pr_align = align;
524: pp->pr_wchan = wchan;
1.66 thorpej 525: pp->pr_alloc = palloc;
1.20 thorpej 526: pp->pr_nitems = 0;
527: pp->pr_nout = 0;
528: pp->pr_hardlimit = UINT_MAX;
529: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 530: pp->pr_hardlimit_ratecap.tv_sec = 0;
531: pp->pr_hardlimit_ratecap.tv_usec = 0;
532: pp->pr_hardlimit_warning_last.tv_sec = 0;
533: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 534: pp->pr_drain_hook = NULL;
535: pp->pr_drain_hook_arg = NULL;
1.3 pk 536:
537: /*
538: * Decide whether to put the page header off page to avoid
1.92 enami 539: * wasting too large a part of the page or too big item.
540: * Off-page page headers go on a hash table, so we can match
541: * a returned item with its header based on the page address.
542: * We use 1/16 of the page size and about 8 times of the item
543: * size as the threshold (XXX: tune)
544: *
545: * However, we'll put the header into the page if we can put
546: * it without wasting any items.
547: *
548: * Silently enforce `0 <= ioff < align'.
1.3 pk 549: */
1.92 enami 550: pp->pr_itemoffset = ioff %= align;
551: /* See the comment below about reserved bytes. */
552: trysize = palloc->pa_pagesz - ((align - ioff) % align);
553: phsize = ALIGN(sizeof(struct pool_item_header));
1.97 ! yamt 554: if ((pp->pr_roflags & PR_NOTOUCH) == 0 &&
! 555: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
! 556: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 557: /* Use the end of the page for the page header */
1.20 thorpej 558: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 559: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 560: } else {
1.3 pk 561: /* The page header will be taken from our page header pool */
562: pp->pr_phoffset = 0;
1.66 thorpej 563: off = palloc->pa_pagesz;
1.88 chs 564: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 565: }
1.1 pk 566:
1.3 pk 567: /*
568: * Alignment is to take place at `ioff' within the item. This means
569: * we must reserve up to `align - 1' bytes on the page to allow
570: * appropriate positioning of each item.
571: */
572: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 573: KASSERT(pp->pr_itemsperpage != 0);
1.97 ! yamt 574: if ((pp->pr_roflags & PR_NOTOUCH)) {
! 575: int idx;
! 576:
! 577: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
! 578: idx++) {
! 579: /* nothing */
! 580: }
! 581: if (idx >= PHPOOL_MAX) {
! 582: /*
! 583: * if you see this panic, consider to tweak
! 584: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
! 585: */
! 586: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
! 587: pp->pr_wchan, pp->pr_itemsperpage);
! 588: }
! 589: pp->pr_phpool = &phpool[idx];
! 590: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
! 591: pp->pr_phpool = &phpool[0];
! 592: }
! 593: #if defined(DIAGNOSTIC)
! 594: else {
! 595: pp->pr_phpool = NULL;
! 596: }
! 597: #endif
1.3 pk 598:
599: /*
600: * Use the slack between the chunks and the page header
601: * for "cache coloring".
602: */
603: slack = off - pp->pr_itemsperpage * pp->pr_size;
604: pp->pr_maxcolor = (slack / align) * align;
605: pp->pr_curcolor = 0;
606:
607: pp->pr_nget = 0;
608: pp->pr_nfail = 0;
609: pp->pr_nput = 0;
610: pp->pr_npagealloc = 0;
611: pp->pr_npagefree = 0;
1.1 pk 612: pp->pr_hiwat = 0;
1.8 thorpej 613: pp->pr_nidle = 0;
1.3 pk 614:
1.59 thorpej 615: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 616: if (flags & PR_LOGGING) {
617: if (kmem_map == NULL ||
618: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
619: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 620: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 621: pp->pr_curlogentry = 0;
622: pp->pr_logsize = pool_logsize;
623: }
1.59 thorpej 624: #endif
1.25 thorpej 625:
626: pp->pr_entered_file = NULL;
627: pp->pr_entered_line = 0;
1.3 pk 628:
1.21 thorpej 629: simple_lock_init(&pp->pr_slock);
1.1 pk 630:
1.3 pk 631: /*
1.43 thorpej 632: * Initialize private page header pool and cache magazine pool if we
633: * haven't done so yet.
1.23 thorpej 634: * XXX LOCKING.
1.3 pk 635: */
1.97 ! yamt 636: if (phpool[0].pr_size == 0) {
! 637: struct pool_allocator *pa;
! 638: int idx;
! 639: #ifdef POOL_SUBPAGE
! 640: pa = &pool_allocator_kmem;
! 641: #else
! 642: pa = NULL;
! 643: #endif
! 644: for (idx = 0; idx < PHPOOL_MAX; idx++) {
! 645: static char phpool_names[PHPOOL_MAX][6+1+6+1];
! 646: int nelem;
! 647: size_t sz;
! 648:
! 649: nelem = PHPOOL_FREELIST_NELEM(idx);
! 650: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
! 651: "phpool-%d", nelem);
! 652: sz = sizeof(struct pool_item_header);
! 653: if (nelem) {
! 654: sz = PR_FREELIST_ALIGN(sz)
! 655: + nelem * sizeof(uint16_t);
! 656: }
! 657: pool_init(&phpool[idx], sz, 0, 0, 0,
! 658: phpool_names[idx], pa);
! 659: }
1.62 bjh21 660: #ifdef POOL_SUBPAGE
661: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.66 thorpej 662: PR_RECURSIVE, "psppool", &pool_allocator_kmem);
1.62 bjh21 663: #endif
1.43 thorpej 664: pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
1.66 thorpej 665: 0, "pcgpool", NULL);
1.1 pk 666: }
667:
1.23 thorpej 668: /* Insert into the list of all pools. */
669: simple_lock(&pool_head_slock);
670: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
671: simple_unlock(&pool_head_slock);
1.66 thorpej 672:
673: /* Insert this into the list of pools using this allocator. */
1.93 dbj 674: s = splvm();
1.66 thorpej 675: simple_lock(&palloc->pa_slock);
676: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
677: simple_unlock(&palloc->pa_slock);
1.93 dbj 678: splx(s);
1.1 pk 679: }
680:
681: /*
682: * De-commision a pool resource.
683: */
684: void
1.42 thorpej 685: pool_destroy(struct pool *pp)
1.1 pk 686: {
1.3 pk 687: struct pool_item_header *ph;
1.43 thorpej 688: struct pool_cache *pc;
1.93 dbj 689: int s;
1.43 thorpej 690:
1.66 thorpej 691: /* Locking order: pool_allocator -> pool */
1.93 dbj 692: s = splvm();
1.66 thorpej 693: simple_lock(&pp->pr_alloc->pa_slock);
694: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
695: simple_unlock(&pp->pr_alloc->pa_slock);
1.93 dbj 696: splx(s);
1.66 thorpej 697:
1.43 thorpej 698: /* Destroy all caches for this pool. */
699: while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
700: pool_cache_destroy(pc);
1.3 pk 701:
702: #ifdef DIAGNOSTIC
1.20 thorpej 703: if (pp->pr_nout != 0) {
1.25 thorpej 704: pr_printlog(pp, NULL, printf);
1.80 provos 705: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 706: pp->pr_nout);
1.3 pk 707: }
708: #endif
1.1 pk 709:
1.3 pk 710: /* Remove all pages */
1.88 chs 711: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.70 thorpej 712: pr_rmpage(pp, ph, NULL);
1.88 chs 713: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
714: KASSERT(LIST_EMPTY(&pp->pr_partpages));
1.3 pk 715:
716: /* Remove from global pool list */
1.23 thorpej 717: simple_lock(&pool_head_slock);
1.3 pk 718: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.61 chs 719: if (drainpp == pp) {
720: drainpp = NULL;
721: }
1.23 thorpej 722: simple_unlock(&pool_head_slock);
1.3 pk 723:
1.59 thorpej 724: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 725: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 726: free(pp->pr_log, M_TEMP);
1.59 thorpej 727: #endif
1.1 pk 728: }
729:
1.68 thorpej 730: void
731: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
732: {
733:
734: /* XXX no locking -- must be used just after pool_init() */
735: #ifdef DIAGNOSTIC
736: if (pp->pr_drain_hook != NULL)
737: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
738: #endif
739: pp->pr_drain_hook = fn;
740: pp->pr_drain_hook_arg = arg;
741: }
742:
1.88 chs 743: static struct pool_item_header *
1.55 thorpej 744: pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
745: {
746: struct pool_item_header *ph;
747: int s;
748:
749: LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
750:
751: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
752: ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
753: else {
1.85 pk 754: s = splvm();
1.97 ! yamt 755: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 756: splx(s);
757: }
758:
759: return (ph);
760: }
1.1 pk 761:
762: /*
1.3 pk 763: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 764: */
1.3 pk 765: void *
1.59 thorpej 766: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 767: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 768: #else
769: pool_get(struct pool *pp, int flags)
770: #endif
1.1 pk 771: {
772: struct pool_item *pi;
1.3 pk 773: struct pool_item_header *ph;
1.55 thorpej 774: void *v;
1.1 pk 775:
1.2 pk 776: #ifdef DIAGNOSTIC
1.95 atatat 777: if (__predict_false(pp->pr_itemsperpage == 0))
778: panic("pool_get: pool %p: pr_itemsperpage is zero, "
779: "pool not initialized?", pp);
1.84 thorpej 780: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 781: (flags & PR_WAITOK) != 0))
1.77 matt 782: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 783:
784: #ifdef LOCKDEBUG
785: if (flags & PR_WAITOK)
786: simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
1.56 sommerfe 787: #endif
1.58 thorpej 788: #endif /* DIAGNOSTIC */
1.1 pk 789:
1.21 thorpej 790: simple_lock(&pp->pr_slock);
1.25 thorpej 791: pr_enter(pp, file, line);
1.20 thorpej 792:
793: startover:
794: /*
795: * Check to see if we've reached the hard limit. If we have,
796: * and we can wait, then wait until an item has been returned to
797: * the pool.
798: */
799: #ifdef DIAGNOSTIC
1.34 thorpej 800: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 801: pr_leave(pp);
1.21 thorpej 802: simple_unlock(&pp->pr_slock);
1.20 thorpej 803: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
804: }
805: #endif
1.34 thorpej 806: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 807: if (pp->pr_drain_hook != NULL) {
808: /*
809: * Since the drain hook is going to free things
810: * back to the pool, unlock, call the hook, re-lock,
811: * and check the hardlimit condition again.
812: */
813: pr_leave(pp);
814: simple_unlock(&pp->pr_slock);
815: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
816: simple_lock(&pp->pr_slock);
817: pr_enter(pp, file, line);
818: if (pp->pr_nout < pp->pr_hardlimit)
819: goto startover;
820: }
821:
1.29 sommerfe 822: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 823: /*
824: * XXX: A warning isn't logged in this case. Should
825: * it be?
826: */
827: pp->pr_flags |= PR_WANTED;
1.25 thorpej 828: pr_leave(pp);
1.40 sommerfe 829: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 830: pr_enter(pp, file, line);
1.20 thorpej 831: goto startover;
832: }
1.31 thorpej 833:
834: /*
835: * Log a message that the hard limit has been hit.
836: */
837: if (pp->pr_hardlimit_warning != NULL &&
838: ratecheck(&pp->pr_hardlimit_warning_last,
839: &pp->pr_hardlimit_ratecap))
840: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 841:
842: pp->pr_nfail++;
843:
1.25 thorpej 844: pr_leave(pp);
1.21 thorpej 845: simple_unlock(&pp->pr_slock);
1.20 thorpej 846: return (NULL);
847: }
848:
1.3 pk 849: /*
850: * The convention we use is that if `curpage' is not NULL, then
851: * it points at a non-empty bucket. In particular, `curpage'
852: * never points at a page header which has PR_PHINPAGE set and
853: * has no items in its bucket.
854: */
1.20 thorpej 855: if ((ph = pp->pr_curpage) == NULL) {
856: #ifdef DIAGNOSTIC
857: if (pp->pr_nitems != 0) {
1.21 thorpej 858: simple_unlock(&pp->pr_slock);
1.20 thorpej 859: printf("pool_get: %s: curpage NULL, nitems %u\n",
860: pp->pr_wchan, pp->pr_nitems);
1.80 provos 861: panic("pool_get: nitems inconsistent");
1.20 thorpej 862: }
863: #endif
864:
1.21 thorpej 865: /*
866: * Call the back-end page allocator for more memory.
867: * Release the pool lock, as the back-end page allocator
868: * may block.
869: */
1.25 thorpej 870: pr_leave(pp);
1.21 thorpej 871: simple_unlock(&pp->pr_slock);
1.66 thorpej 872: v = pool_allocator_alloc(pp, flags);
1.55 thorpej 873: if (__predict_true(v != NULL))
874: ph = pool_alloc_item_header(pp, v, flags);
1.15 pk 875:
1.55 thorpej 876: if (__predict_false(v == NULL || ph == NULL)) {
877: if (v != NULL)
1.66 thorpej 878: pool_allocator_free(pp, v);
1.55 thorpej 879:
1.91 yamt 880: simple_lock(&pp->pr_slock);
881: pr_enter(pp, file, line);
882:
1.21 thorpej 883: /*
1.55 thorpej 884: * We were unable to allocate a page or item
885: * header, but we released the lock during
886: * allocation, so perhaps items were freed
887: * back to the pool. Check for this case.
1.21 thorpej 888: */
889: if (pp->pr_curpage != NULL)
890: goto startover;
1.15 pk 891:
1.3 pk 892: if ((flags & PR_WAITOK) == 0) {
893: pp->pr_nfail++;
1.25 thorpej 894: pr_leave(pp);
1.21 thorpej 895: simple_unlock(&pp->pr_slock);
1.1 pk 896: return (NULL);
1.3 pk 897: }
898:
1.15 pk 899: /*
900: * Wait for items to be returned to this pool.
1.21 thorpej 901: *
1.20 thorpej 902: * XXX: maybe we should wake up once a second and
903: * try again?
1.15 pk 904: */
1.1 pk 905: pp->pr_flags |= PR_WANTED;
1.66 thorpej 906: /* PA_WANTED is already set on the allocator. */
1.25 thorpej 907: pr_leave(pp);
1.40 sommerfe 908: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 909: pr_enter(pp, file, line);
1.20 thorpej 910: goto startover;
1.1 pk 911: }
1.3 pk 912:
1.15 pk 913: /* We have more memory; add it to the pool */
1.91 yamt 914: simple_lock(&pp->pr_slock);
915: pr_enter(pp, file, line);
1.55 thorpej 916: pool_prime_page(pp, v, ph);
1.15 pk 917: pp->pr_npagealloc++;
918:
1.20 thorpej 919: /* Start the allocation process over. */
920: goto startover;
1.3 pk 921: }
1.97 ! yamt 922: if (pp->pr_roflags & PR_NOTOUCH) {
! 923: #ifdef DIAGNOSTIC
! 924: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
! 925: pr_leave(pp);
! 926: simple_unlock(&pp->pr_slock);
! 927: panic("pool_get: %s: page empty", pp->pr_wchan);
! 928: }
! 929: #endif
! 930: v = pr_item_notouch_get(pp, ph);
! 931: #ifdef POOL_DIAGNOSTIC
! 932: pr_log(pp, v, PRLOG_GET, file, line);
! 933: #endif
! 934: } else {
! 935: v = pi = TAILQ_FIRST(&ph->ph_itemlist);
! 936: if (__predict_false(v == NULL)) {
! 937: pr_leave(pp);
! 938: simple_unlock(&pp->pr_slock);
! 939: panic("pool_get: %s: page empty", pp->pr_wchan);
! 940: }
1.20 thorpej 941: #ifdef DIAGNOSTIC
1.97 ! yamt 942: if (__predict_false(pp->pr_nitems == 0)) {
! 943: pr_leave(pp);
! 944: simple_unlock(&pp->pr_slock);
! 945: printf("pool_get: %s: items on itemlist, nitems %u\n",
! 946: pp->pr_wchan, pp->pr_nitems);
! 947: panic("pool_get: nitems inconsistent");
! 948: }
1.65 enami 949: #endif
1.56 sommerfe 950:
1.65 enami 951: #ifdef POOL_DIAGNOSTIC
1.97 ! yamt 952: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 953: #endif
1.3 pk 954:
1.65 enami 955: #ifdef DIAGNOSTIC
1.97 ! yamt 956: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
! 957: pr_printlog(pp, pi, printf);
! 958: panic("pool_get(%s): free list modified: "
! 959: "magic=%x; page %p; item addr %p\n",
! 960: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
! 961: }
1.3 pk 962: #endif
963:
1.97 ! yamt 964: /*
! 965: * Remove from item list.
! 966: */
! 967: TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
! 968: }
1.20 thorpej 969: pp->pr_nitems--;
970: pp->pr_nout++;
1.6 thorpej 971: if (ph->ph_nmissing == 0) {
972: #ifdef DIAGNOSTIC
1.34 thorpej 973: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 974: panic("pool_get: nidle inconsistent");
975: #endif
976: pp->pr_nidle--;
1.88 chs 977:
978: /*
979: * This page was previously empty. Move it to the list of
980: * partially-full pages. This page is already curpage.
981: */
982: LIST_REMOVE(ph, ph_pagelist);
983: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 984: }
1.3 pk 985: ph->ph_nmissing++;
1.97 ! yamt 986: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 987: #ifdef DIAGNOSTIC
1.97 ! yamt 988: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
! 989: !TAILQ_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 990: pr_leave(pp);
1.21 thorpej 991: simple_unlock(&pp->pr_slock);
992: panic("pool_get: %s: nmissing inconsistent",
993: pp->pr_wchan);
994: }
995: #endif
1.3 pk 996: /*
1.88 chs 997: * This page is now full. Move it to the full list
998: * and select a new current page.
1.3 pk 999: */
1.88 chs 1000: LIST_REMOVE(ph, ph_pagelist);
1001: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1002: pool_update_curpage(pp);
1.1 pk 1003: }
1.3 pk 1004:
1005: pp->pr_nget++;
1.20 thorpej 1006:
1007: /*
1008: * If we have a low water mark and we are now below that low
1009: * water mark, add more items to the pool.
1010: */
1.53 thorpej 1011: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1012: /*
1013: * XXX: Should we log a warning? Should we set up a timeout
1014: * to try again in a second or so? The latter could break
1015: * a caller's assumptions about interrupt protection, etc.
1016: */
1017: }
1018:
1.25 thorpej 1019: pr_leave(pp);
1.21 thorpej 1020: simple_unlock(&pp->pr_slock);
1.1 pk 1021: return (v);
1022: }
1023:
1024: /*
1.43 thorpej 1025: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1026: */
1.43 thorpej 1027: static void
1.56 sommerfe 1028: pool_do_put(struct pool *pp, void *v)
1.1 pk 1029: {
1030: struct pool_item *pi = v;
1.3 pk 1031: struct pool_item_header *ph;
1032: caddr_t page;
1.21 thorpej 1033: int s;
1.3 pk 1034:
1.61 chs 1035: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1036:
1.66 thorpej 1037: page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1.1 pk 1038:
1.30 thorpej 1039: #ifdef DIAGNOSTIC
1.34 thorpej 1040: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1041: printf("pool %s: putting with none out\n",
1042: pp->pr_wchan);
1043: panic("pool_put");
1044: }
1045: #endif
1.3 pk 1046:
1.34 thorpej 1047: if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1.25 thorpej 1048: pr_printlog(pp, NULL, printf);
1.3 pk 1049: panic("pool_put: %s: page header missing", pp->pr_wchan);
1050: }
1.28 thorpej 1051:
1052: #ifdef LOCKDEBUG
1053: /*
1054: * Check if we're freeing a locked simple lock.
1055: */
1056: simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
1057: #endif
1.3 pk 1058:
1059: /*
1060: * Return to item list.
1061: */
1.97 ! yamt 1062: if (pp->pr_roflags & PR_NOTOUCH) {
! 1063: pr_item_notouch_put(pp, ph, v);
! 1064: } else {
1.2 pk 1065: #ifdef DIAGNOSTIC
1.97 ! yamt 1066: pi->pi_magic = PI_MAGIC;
1.3 pk 1067: #endif
1.32 chs 1068: #ifdef DEBUG
1.97 ! yamt 1069: {
! 1070: int i, *ip = v;
1.32 chs 1071:
1.97 ! yamt 1072: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
! 1073: *ip++ = PI_MAGIC;
! 1074: }
1.32 chs 1075: }
1076: #endif
1077:
1.97 ! yamt 1078: TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
! 1079: }
1.79 thorpej 1080: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1081: ph->ph_nmissing--;
1082: pp->pr_nput++;
1.20 thorpej 1083: pp->pr_nitems++;
1084: pp->pr_nout--;
1.3 pk 1085:
1086: /* Cancel "pool empty" condition if it exists */
1087: if (pp->pr_curpage == NULL)
1088: pp->pr_curpage = ph;
1089:
1090: if (pp->pr_flags & PR_WANTED) {
1091: pp->pr_flags &= ~PR_WANTED;
1.15 pk 1092: if (ph->ph_nmissing == 0)
1093: pp->pr_nidle++;
1.3 pk 1094: wakeup((caddr_t)pp);
1095: return;
1096: }
1097:
1098: /*
1.88 chs 1099: * If this page is now empty, do one of two things:
1.21 thorpej 1100: *
1.88 chs 1101: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1102: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1103: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1104: * CLAIM.
1.21 thorpej 1105: *
1.88 chs 1106: * (2) Otherwise, move the page to the empty page list.
1107: *
1108: * Either way, select a new current page (so we use a partially-full
1109: * page if one is available).
1.3 pk 1110: */
1111: if (ph->ph_nmissing == 0) {
1.6 thorpej 1112: pp->pr_nidle++;
1.90 thorpej 1113: if (pp->pr_npages > pp->pr_minpages &&
1114: (pp->pr_npages > pp->pr_maxpages ||
1115: (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
1.91 yamt 1116: simple_unlock(&pp->pr_slock);
1.61 chs 1117: pr_rmpage(pp, ph, NULL);
1.91 yamt 1118: simple_lock(&pp->pr_slock);
1.3 pk 1119: } else {
1.88 chs 1120: LIST_REMOVE(ph, ph_pagelist);
1121: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1122:
1.21 thorpej 1123: /*
1124: * Update the timestamp on the page. A page must
1125: * be idle for some period of time before it can
1126: * be reclaimed by the pagedaemon. This minimizes
1127: * ping-pong'ing for memory.
1128: */
1129: s = splclock();
1130: ph->ph_time = mono_time;
1131: splx(s);
1.1 pk 1132: }
1.88 chs 1133: pool_update_curpage(pp);
1.1 pk 1134: }
1.88 chs 1135:
1.21 thorpej 1136: /*
1.88 chs 1137: * If the page was previously completely full, move it to the
1138: * partially-full list and make it the current page. The next
1139: * allocation will get the item from this page, instead of
1140: * further fragmenting the pool.
1.21 thorpej 1141: */
1142: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1143: LIST_REMOVE(ph, ph_pagelist);
1144: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1145: pp->pr_curpage = ph;
1146: }
1.43 thorpej 1147: }
1148:
1149: /*
1150: * Return resource to the pool; must be called at appropriate spl level
1151: */
1.59 thorpej 1152: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1153: void
1154: _pool_put(struct pool *pp, void *v, const char *file, long line)
1155: {
1156:
1157: simple_lock(&pp->pr_slock);
1158: pr_enter(pp, file, line);
1159:
1.56 sommerfe 1160: pr_log(pp, v, PRLOG_PUT, file, line);
1161:
1162: pool_do_put(pp, v);
1.21 thorpej 1163:
1.25 thorpej 1164: pr_leave(pp);
1.21 thorpej 1165: simple_unlock(&pp->pr_slock);
1.1 pk 1166: }
1.57 sommerfe 1167: #undef pool_put
1.59 thorpej 1168: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1169:
1.56 sommerfe 1170: void
1171: pool_put(struct pool *pp, void *v)
1172: {
1173:
1174: simple_lock(&pp->pr_slock);
1175:
1176: pool_do_put(pp, v);
1177:
1178: simple_unlock(&pp->pr_slock);
1179: }
1.57 sommerfe 1180:
1.59 thorpej 1181: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1182: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1183: #endif
1.74 thorpej 1184:
1185: /*
1186: * Add N items to the pool.
1187: */
1188: int
1189: pool_prime(struct pool *pp, int n)
1190: {
1.83 scw 1191: struct pool_item_header *ph = NULL;
1.74 thorpej 1192: caddr_t cp;
1.75 simonb 1193: int newpages;
1.74 thorpej 1194:
1195: simple_lock(&pp->pr_slock);
1196:
1197: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1198:
1199: while (newpages-- > 0) {
1200: simple_unlock(&pp->pr_slock);
1201: cp = pool_allocator_alloc(pp, PR_NOWAIT);
1202: if (__predict_true(cp != NULL))
1203: ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1204:
1205: if (__predict_false(cp == NULL || ph == NULL)) {
1206: if (cp != NULL)
1207: pool_allocator_free(pp, cp);
1.91 yamt 1208: simple_lock(&pp->pr_slock);
1.74 thorpej 1209: break;
1210: }
1211:
1.91 yamt 1212: simple_lock(&pp->pr_slock);
1.74 thorpej 1213: pool_prime_page(pp, cp, ph);
1214: pp->pr_npagealloc++;
1215: pp->pr_minpages++;
1216: }
1217:
1218: if (pp->pr_minpages >= pp->pr_maxpages)
1219: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1220:
1221: simple_unlock(&pp->pr_slock);
1222: return (0);
1223: }
1.55 thorpej 1224:
1225: /*
1.3 pk 1226: * Add a page worth of items to the pool.
1.21 thorpej 1227: *
1228: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1229: */
1.55 thorpej 1230: static void
1231: pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1.3 pk 1232: {
1233: struct pool_item *pi;
1234: caddr_t cp = storage;
1235: unsigned int align = pp->pr_align;
1236: unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1237: int n;
1.89 yamt 1238: int s;
1.36 pk 1239:
1.91 yamt 1240: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1241:
1.66 thorpej 1242: #ifdef DIAGNOSTIC
1243: if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1244: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1245: #endif
1.3 pk 1246:
1247: /*
1248: * Insert page header.
1249: */
1.88 chs 1250: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1251: TAILQ_INIT(&ph->ph_itemlist);
1252: ph->ph_page = storage;
1253: ph->ph_nmissing = 0;
1.89 yamt 1254: s = splclock();
1255: ph->ph_time = mono_time;
1256: splx(s);
1.88 chs 1257: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1258: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1259:
1.6 thorpej 1260: pp->pr_nidle++;
1261:
1.3 pk 1262: /*
1263: * Color this page.
1264: */
1265: cp = (caddr_t)(cp + pp->pr_curcolor);
1266: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1267: pp->pr_curcolor = 0;
1268:
1269: /*
1270: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1271: */
1272: if (ioff != 0)
1273: cp = (caddr_t)(cp + (align - ioff));
1274:
1275: /*
1276: * Insert remaining chunks on the bucket list.
1277: */
1278: n = pp->pr_itemsperpage;
1.20 thorpej 1279: pp->pr_nitems += n;
1.3 pk 1280:
1.97 ! yamt 1281: ph->ph_off = cp - storage;
! 1282:
! 1283: if (pp->pr_roflags & PR_NOTOUCH) {
! 1284: uint16_t *freelist = PR_FREELIST(ph);
! 1285: int i;
! 1286:
! 1287: ph->ph_firstfree = 0;
! 1288: for (i = 0; i < n - 1; i++)
! 1289: freelist[i] = i + 1;
! 1290: freelist[n - 1] = PR_INDEX_EOL;
! 1291: } else {
! 1292: while (n--) {
! 1293: pi = (struct pool_item *)cp;
1.78 thorpej 1294:
1.97 ! yamt 1295: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1296:
1.97 ! yamt 1297: /* Insert on page list */
! 1298: TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1299: #ifdef DIAGNOSTIC
1.97 ! yamt 1300: pi->pi_magic = PI_MAGIC;
1.3 pk 1301: #endif
1.97 ! yamt 1302: cp = (caddr_t)(cp + pp->pr_size);
! 1303: }
1.3 pk 1304: }
1305:
1306: /*
1307: * If the pool was depleted, point at the new page.
1308: */
1309: if (pp->pr_curpage == NULL)
1310: pp->pr_curpage = ph;
1311:
1312: if (++pp->pr_npages > pp->pr_hiwat)
1313: pp->pr_hiwat = pp->pr_npages;
1314: }
1315:
1.20 thorpej 1316: /*
1.52 thorpej 1317: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1318: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1319: *
1.21 thorpej 1320: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1321: *
1.73 thorpej 1322: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1323: * with it locked.
1324: */
1325: static int
1.42 thorpej 1326: pool_catchup(struct pool *pp)
1.20 thorpej 1327: {
1.83 scw 1328: struct pool_item_header *ph = NULL;
1.20 thorpej 1329: caddr_t cp;
1330: int error = 0;
1331:
1.54 thorpej 1332: while (POOL_NEEDS_CATCHUP(pp)) {
1.20 thorpej 1333: /*
1.21 thorpej 1334: * Call the page back-end allocator for more memory.
1335: *
1336: * XXX: We never wait, so should we bother unlocking
1337: * the pool descriptor?
1.20 thorpej 1338: */
1.21 thorpej 1339: simple_unlock(&pp->pr_slock);
1.66 thorpej 1340: cp = pool_allocator_alloc(pp, PR_NOWAIT);
1.55 thorpej 1341: if (__predict_true(cp != NULL))
1342: ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1343: if (__predict_false(cp == NULL || ph == NULL)) {
1344: if (cp != NULL)
1.66 thorpej 1345: pool_allocator_free(pp, cp);
1.20 thorpej 1346: error = ENOMEM;
1.91 yamt 1347: simple_lock(&pp->pr_slock);
1.20 thorpej 1348: break;
1349: }
1.91 yamt 1350: simple_lock(&pp->pr_slock);
1.55 thorpej 1351: pool_prime_page(pp, cp, ph);
1.26 thorpej 1352: pp->pr_npagealloc++;
1.20 thorpej 1353: }
1354:
1355: return (error);
1356: }
1357:
1.88 chs 1358: static void
1359: pool_update_curpage(struct pool *pp)
1360: {
1361:
1362: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1363: if (pp->pr_curpage == NULL) {
1364: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1365: }
1366: }
1367:
1.3 pk 1368: void
1.42 thorpej 1369: pool_setlowat(struct pool *pp, int n)
1.3 pk 1370: {
1.15 pk 1371:
1.21 thorpej 1372: simple_lock(&pp->pr_slock);
1373:
1.3 pk 1374: pp->pr_minitems = n;
1.15 pk 1375: pp->pr_minpages = (n == 0)
1376: ? 0
1.18 thorpej 1377: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1378:
1379: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1380: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1381: /*
1382: * XXX: Should we log a warning? Should we set up a timeout
1383: * to try again in a second or so? The latter could break
1384: * a caller's assumptions about interrupt protection, etc.
1385: */
1386: }
1.21 thorpej 1387:
1388: simple_unlock(&pp->pr_slock);
1.3 pk 1389: }
1390:
1391: void
1.42 thorpej 1392: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1393: {
1.15 pk 1394:
1.21 thorpej 1395: simple_lock(&pp->pr_slock);
1396:
1.15 pk 1397: pp->pr_maxpages = (n == 0)
1398: ? 0
1.18 thorpej 1399: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1400:
1401: simple_unlock(&pp->pr_slock);
1.3 pk 1402: }
1403:
1.20 thorpej 1404: void
1.42 thorpej 1405: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1406: {
1407:
1.21 thorpej 1408: simple_lock(&pp->pr_slock);
1.20 thorpej 1409:
1410: pp->pr_hardlimit = n;
1411: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1412: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1413: pp->pr_hardlimit_warning_last.tv_sec = 0;
1414: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1415:
1416: /*
1.21 thorpej 1417: * In-line version of pool_sethiwat(), because we don't want to
1418: * release the lock.
1.20 thorpej 1419: */
1420: pp->pr_maxpages = (n == 0)
1421: ? 0
1422: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1423:
1424: simple_unlock(&pp->pr_slock);
1.20 thorpej 1425: }
1.3 pk 1426:
1427: /*
1428: * Release all complete pages that have not been used recently.
1429: */
1.66 thorpej 1430: int
1.59 thorpej 1431: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1432: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1433: #else
1434: pool_reclaim(struct pool *pp)
1435: #endif
1.3 pk 1436: {
1437: struct pool_item_header *ph, *phnext;
1.43 thorpej 1438: struct pool_cache *pc;
1.21 thorpej 1439: struct timeval curtime;
1.61 chs 1440: struct pool_pagelist pq;
1.88 chs 1441: struct timeval diff;
1.21 thorpej 1442: int s;
1.3 pk 1443:
1.68 thorpej 1444: if (pp->pr_drain_hook != NULL) {
1445: /*
1446: * The drain hook must be called with the pool unlocked.
1447: */
1448: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1449: }
1450:
1.21 thorpej 1451: if (simple_lock_try(&pp->pr_slock) == 0)
1.66 thorpej 1452: return (0);
1.25 thorpej 1453: pr_enter(pp, file, line);
1.68 thorpej 1454:
1.88 chs 1455: LIST_INIT(&pq);
1.3 pk 1456:
1.43 thorpej 1457: /*
1458: * Reclaim items from the pool's caches.
1459: */
1.61 chs 1460: TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1.43 thorpej 1461: pool_cache_reclaim(pc);
1462:
1.21 thorpej 1463: s = splclock();
1464: curtime = mono_time;
1465: splx(s);
1466:
1.88 chs 1467: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1468: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1469:
1470: /* Check our minimum page claim */
1471: if (pp->pr_npages <= pp->pr_minpages)
1472: break;
1473:
1.88 chs 1474: KASSERT(ph->ph_nmissing == 0);
1475: timersub(&curtime, &ph->ph_time, &diff);
1476: if (diff.tv_sec < pool_inactive_time)
1477: continue;
1.21 thorpej 1478:
1.88 chs 1479: /*
1480: * If freeing this page would put us below
1481: * the low water mark, stop now.
1482: */
1483: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1484: pp->pr_minitems)
1485: break;
1.21 thorpej 1486:
1.88 chs 1487: pr_rmpage(pp, ph, &pq);
1.3 pk 1488: }
1489:
1.25 thorpej 1490: pr_leave(pp);
1.21 thorpej 1491: simple_unlock(&pp->pr_slock);
1.88 chs 1492: if (LIST_EMPTY(&pq))
1.66 thorpej 1493: return (0);
1494:
1.88 chs 1495: while ((ph = LIST_FIRST(&pq)) != NULL) {
1496: LIST_REMOVE(ph, ph_pagelist);
1.66 thorpej 1497: pool_allocator_free(pp, ph->ph_page);
1.61 chs 1498: if (pp->pr_roflags & PR_PHINPAGE) {
1499: continue;
1500: }
1.85 pk 1501: s = splvm();
1.97 ! yamt 1502: pool_put(pp->pr_phpool, ph);
1.61 chs 1503: splx(s);
1504: }
1.66 thorpej 1505:
1506: return (1);
1.3 pk 1507: }
1508:
1509: /*
1510: * Drain pools, one at a time.
1.21 thorpej 1511: *
1512: * Note, we must never be called from an interrupt context.
1.3 pk 1513: */
1514: void
1.42 thorpej 1515: pool_drain(void *arg)
1.3 pk 1516: {
1517: struct pool *pp;
1.23 thorpej 1518: int s;
1.3 pk 1519:
1.61 chs 1520: pp = NULL;
1.49 thorpej 1521: s = splvm();
1.23 thorpej 1522: simple_lock(&pool_head_slock);
1.61 chs 1523: if (drainpp == NULL) {
1524: drainpp = TAILQ_FIRST(&pool_head);
1525: }
1526: if (drainpp) {
1527: pp = drainpp;
1528: drainpp = TAILQ_NEXT(pp, pr_poollist);
1529: }
1530: simple_unlock(&pool_head_slock);
1.63 chs 1531: pool_reclaim(pp);
1.61 chs 1532: splx(s);
1.3 pk 1533: }
1534:
1535: /*
1536: * Diagnostic helpers.
1537: */
1538: void
1.42 thorpej 1539: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1540: {
1541: int s;
1542:
1.49 thorpej 1543: s = splvm();
1.25 thorpej 1544: if (simple_lock_try(&pp->pr_slock) == 0) {
1545: printf("pool %s is locked; try again later\n",
1546: pp->pr_wchan);
1547: splx(s);
1548: return;
1549: }
1550: pool_print1(pp, modif, printf);
1.21 thorpej 1551: simple_unlock(&pp->pr_slock);
1552: splx(s);
1553: }
1554:
1.25 thorpej 1555: void
1.42 thorpej 1556: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1557: {
1558: int didlock = 0;
1559:
1560: if (pp == NULL) {
1561: (*pr)("Must specify a pool to print.\n");
1562: return;
1563: }
1564:
1565: /*
1566: * Called from DDB; interrupts should be blocked, and all
1567: * other processors should be paused. We can skip locking
1568: * the pool in this case.
1569: *
1570: * We do a simple_lock_try() just to print the lock
1571: * status, however.
1572: */
1573:
1574: if (simple_lock_try(&pp->pr_slock) == 0)
1575: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1576: else
1577: didlock = 1;
1578:
1579: pool_print1(pp, modif, pr);
1580:
1581: if (didlock)
1582: simple_unlock(&pp->pr_slock);
1583: }
1584:
1.21 thorpej 1585: static void
1.97 ! yamt 1586: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
! 1587: void (*pr)(const char *, ...))
1.88 chs 1588: {
1589: struct pool_item_header *ph;
1590: #ifdef DIAGNOSTIC
1591: struct pool_item *pi;
1592: #endif
1593:
1594: LIST_FOREACH(ph, pl, ph_pagelist) {
1595: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1596: ph->ph_page, ph->ph_nmissing,
1597: (u_long)ph->ph_time.tv_sec,
1598: (u_long)ph->ph_time.tv_usec);
1599: #ifdef DIAGNOSTIC
1.97 ! yamt 1600: if (!(pp->pr_roflags & PR_NOTOUCH)) {
! 1601: TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
! 1602: if (pi->pi_magic != PI_MAGIC) {
! 1603: (*pr)("\t\t\titem %p, magic 0x%x\n",
! 1604: pi, pi->pi_magic);
! 1605: }
1.88 chs 1606: }
1607: }
1608: #endif
1609: }
1610: }
1611:
1612: static void
1.42 thorpej 1613: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1614: {
1.25 thorpej 1615: struct pool_item_header *ph;
1.44 thorpej 1616: struct pool_cache *pc;
1617: struct pool_cache_group *pcg;
1618: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1619: char c;
1620:
1621: while ((c = *modif++) != '\0') {
1622: if (c == 'l')
1623: print_log = 1;
1624: if (c == 'p')
1625: print_pagelist = 1;
1.44 thorpej 1626: if (c == 'c')
1627: print_cache = 1;
1.25 thorpej 1628: }
1629:
1630: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1631: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1632: pp->pr_roflags);
1.66 thorpej 1633: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1634: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1635: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1636: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1637: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1638:
1639: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1640: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1641: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1642: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1643:
1644: if (print_pagelist == 0)
1645: goto skip_pagelist;
1646:
1.88 chs 1647: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1648: (*pr)("\n\tempty page list:\n");
1.97 ! yamt 1649: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1650: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1651: (*pr)("\n\tfull page list:\n");
1.97 ! yamt 1652: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1653: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1654: (*pr)("\n\tpartial-page list:\n");
1.97 ! yamt 1655: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1656:
1.25 thorpej 1657: if (pp->pr_curpage == NULL)
1658: (*pr)("\tno current page\n");
1659: else
1660: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1661:
1662: skip_pagelist:
1663: if (print_log == 0)
1664: goto skip_log;
1665:
1666: (*pr)("\n");
1667: if ((pp->pr_roflags & PR_LOGGING) == 0)
1668: (*pr)("\tno log\n");
1669: else
1670: pr_printlog(pp, NULL, pr);
1.3 pk 1671:
1.25 thorpej 1672: skip_log:
1.44 thorpej 1673: if (print_cache == 0)
1674: goto skip_cache;
1675:
1.61 chs 1676: TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1.44 thorpej 1677: (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1678: pc->pc_allocfrom, pc->pc_freeto);
1.48 thorpej 1679: (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1680: pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1.61 chs 1681: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.44 thorpej 1682: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1.87 thorpej 1683: for (i = 0; i < PCG_NOBJECTS; i++) {
1684: if (pcg->pcg_objects[i].pcgo_pa !=
1685: POOL_PADDR_INVALID) {
1686: (*pr)("\t\t\t%p, 0x%llx\n",
1687: pcg->pcg_objects[i].pcgo_va,
1688: (unsigned long long)
1689: pcg->pcg_objects[i].pcgo_pa);
1690: } else {
1691: (*pr)("\t\t\t%p\n",
1692: pcg->pcg_objects[i].pcgo_va);
1693: }
1694: }
1.44 thorpej 1695: }
1696: }
1697:
1698: skip_cache:
1.88 chs 1699: pr_enter_check(pp, pr);
1700: }
1701:
1702: static int
1703: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1704: {
1705: struct pool_item *pi;
1706: caddr_t page;
1707: int n;
1708:
1709: page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1710: if (page != ph->ph_page &&
1711: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1712: if (label != NULL)
1713: printf("%s: ", label);
1714: printf("pool(%p:%s): page inconsistency: page %p;"
1715: " at page head addr %p (p %p)\n", pp,
1716: pp->pr_wchan, ph->ph_page,
1717: ph, page);
1718: return 1;
1719: }
1.3 pk 1720:
1.97 ! yamt 1721: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
! 1722: return 0;
! 1723:
1.88 chs 1724: for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1725: pi != NULL;
1726: pi = TAILQ_NEXT(pi,pi_list), n++) {
1727:
1728: #ifdef DIAGNOSTIC
1729: if (pi->pi_magic != PI_MAGIC) {
1730: if (label != NULL)
1731: printf("%s: ", label);
1732: printf("pool(%s): free list modified: magic=%x;"
1733: " page %p; item ordinal %d;"
1734: " addr %p (p %p)\n",
1735: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1736: n, pi, page);
1737: panic("pool");
1738: }
1739: #endif
1740: page =
1741: (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1742: if (page == ph->ph_page)
1743: continue;
1744:
1745: if (label != NULL)
1746: printf("%s: ", label);
1747: printf("pool(%p:%s): page inconsistency: page %p;"
1748: " item ordinal %d; addr %p (p %p)\n", pp,
1749: pp->pr_wchan, ph->ph_page,
1750: n, pi, page);
1751: return 1;
1752: }
1753: return 0;
1.3 pk 1754: }
1755:
1.88 chs 1756:
1.3 pk 1757: int
1.42 thorpej 1758: pool_chk(struct pool *pp, const char *label)
1.3 pk 1759: {
1760: struct pool_item_header *ph;
1761: int r = 0;
1762:
1.21 thorpej 1763: simple_lock(&pp->pr_slock);
1.88 chs 1764: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1765: r = pool_chk_page(pp, label, ph);
1766: if (r) {
1767: goto out;
1768: }
1769: }
1770: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1771: r = pool_chk_page(pp, label, ph);
1772: if (r) {
1.3 pk 1773: goto out;
1774: }
1.88 chs 1775: }
1776: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1777: r = pool_chk_page(pp, label, ph);
1778: if (r) {
1.3 pk 1779: goto out;
1780: }
1781: }
1.88 chs 1782:
1.3 pk 1783: out:
1.21 thorpej 1784: simple_unlock(&pp->pr_slock);
1.3 pk 1785: return (r);
1.43 thorpej 1786: }
1787:
1788: /*
1789: * pool_cache_init:
1790: *
1791: * Initialize a pool cache.
1792: *
1793: * NOTE: If the pool must be protected from interrupts, we expect
1794: * to be called at the appropriate interrupt priority level.
1795: */
1796: void
1797: pool_cache_init(struct pool_cache *pc, struct pool *pp,
1798: int (*ctor)(void *, void *, int),
1799: void (*dtor)(void *, void *),
1800: void *arg)
1801: {
1802:
1803: TAILQ_INIT(&pc->pc_grouplist);
1804: simple_lock_init(&pc->pc_slock);
1805:
1806: pc->pc_allocfrom = NULL;
1807: pc->pc_freeto = NULL;
1808: pc->pc_pool = pp;
1809:
1810: pc->pc_ctor = ctor;
1811: pc->pc_dtor = dtor;
1812: pc->pc_arg = arg;
1813:
1.48 thorpej 1814: pc->pc_hits = 0;
1815: pc->pc_misses = 0;
1816:
1817: pc->pc_ngroups = 0;
1818:
1819: pc->pc_nitems = 0;
1820:
1.43 thorpej 1821: simple_lock(&pp->pr_slock);
1822: TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1823: simple_unlock(&pp->pr_slock);
1824: }
1825:
1826: /*
1827: * pool_cache_destroy:
1828: *
1829: * Destroy a pool cache.
1830: */
1831: void
1832: pool_cache_destroy(struct pool_cache *pc)
1833: {
1834: struct pool *pp = pc->pc_pool;
1835:
1836: /* First, invalidate the entire cache. */
1837: pool_cache_invalidate(pc);
1838:
1839: /* ...and remove it from the pool's cache list. */
1840: simple_lock(&pp->pr_slock);
1841: TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1842: simple_unlock(&pp->pr_slock);
1843: }
1844:
1845: static __inline void *
1.87 thorpej 1846: pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1.43 thorpej 1847: {
1848: void *object;
1849: u_int idx;
1850:
1851: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45 thorpej 1852: KASSERT(pcg->pcg_avail != 0);
1.43 thorpej 1853: idx = --pcg->pcg_avail;
1854:
1.87 thorpej 1855: KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1856: object = pcg->pcg_objects[idx].pcgo_va;
1857: if (pap != NULL)
1858: *pap = pcg->pcg_objects[idx].pcgo_pa;
1859: pcg->pcg_objects[idx].pcgo_va = NULL;
1.43 thorpej 1860:
1861: return (object);
1862: }
1863:
1864: static __inline void
1.87 thorpej 1865: pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1.43 thorpej 1866: {
1867: u_int idx;
1868:
1869: KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1870: idx = pcg->pcg_avail++;
1871:
1.87 thorpej 1872: KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1873: pcg->pcg_objects[idx].pcgo_va = object;
1874: pcg->pcg_objects[idx].pcgo_pa = pa;
1.43 thorpej 1875: }
1876:
1877: /*
1.87 thorpej 1878: * pool_cache_get{,_paddr}:
1.43 thorpej 1879: *
1.87 thorpej 1880: * Get an object from a pool cache (optionally returning
1881: * the physical address of the object).
1.43 thorpej 1882: */
1883: void *
1.87 thorpej 1884: pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1.43 thorpej 1885: {
1886: struct pool_cache_group *pcg;
1887: void *object;
1.58 thorpej 1888:
1889: #ifdef LOCKDEBUG
1890: if (flags & PR_WAITOK)
1891: simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1892: #endif
1.43 thorpej 1893:
1894: simple_lock(&pc->pc_slock);
1895:
1896: if ((pcg = pc->pc_allocfrom) == NULL) {
1.61 chs 1897: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43 thorpej 1898: if (pcg->pcg_avail != 0) {
1899: pc->pc_allocfrom = pcg;
1900: goto have_group;
1901: }
1902: }
1903:
1904: /*
1905: * No groups with any available objects. Allocate
1906: * a new object, construct it, and return it to
1907: * the caller. We will allocate a group, if necessary,
1908: * when the object is freed back to the cache.
1909: */
1.48 thorpej 1910: pc->pc_misses++;
1.43 thorpej 1911: simple_unlock(&pc->pc_slock);
1912: object = pool_get(pc->pc_pool, flags);
1913: if (object != NULL && pc->pc_ctor != NULL) {
1914: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1915: pool_put(pc->pc_pool, object);
1916: return (NULL);
1917: }
1918: }
1.87 thorpej 1919: if (object != NULL && pap != NULL) {
1920: #ifdef POOL_VTOPHYS
1921: *pap = POOL_VTOPHYS(object);
1922: #else
1923: *pap = POOL_PADDR_INVALID;
1924: #endif
1925: }
1.43 thorpej 1926: return (object);
1927: }
1928:
1929: have_group:
1.48 thorpej 1930: pc->pc_hits++;
1931: pc->pc_nitems--;
1.87 thorpej 1932: object = pcg_get(pcg, pap);
1.43 thorpej 1933:
1934: if (pcg->pcg_avail == 0)
1935: pc->pc_allocfrom = NULL;
1.45 thorpej 1936:
1.43 thorpej 1937: simple_unlock(&pc->pc_slock);
1938:
1939: return (object);
1940: }
1941:
1942: /*
1.87 thorpej 1943: * pool_cache_put{,_paddr}:
1.43 thorpej 1944: *
1.87 thorpej 1945: * Put an object back to the pool cache (optionally caching the
1946: * physical address of the object).
1.43 thorpej 1947: */
1948: void
1.87 thorpej 1949: pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1.43 thorpej 1950: {
1951: struct pool_cache_group *pcg;
1.60 thorpej 1952: int s;
1.43 thorpej 1953:
1954: simple_lock(&pc->pc_slock);
1955:
1956: if ((pcg = pc->pc_freeto) == NULL) {
1.61 chs 1957: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43 thorpej 1958: if (pcg->pcg_avail != PCG_NOBJECTS) {
1959: pc->pc_freeto = pcg;
1960: goto have_group;
1961: }
1962: }
1963:
1964: /*
1965: * No empty groups to free the object to. Attempt to
1.47 thorpej 1966: * allocate one.
1.43 thorpej 1967: */
1.47 thorpej 1968: simple_unlock(&pc->pc_slock);
1.60 thorpej 1969: s = splvm();
1.43 thorpej 1970: pcg = pool_get(&pcgpool, PR_NOWAIT);
1.60 thorpej 1971: splx(s);
1.43 thorpej 1972: if (pcg != NULL) {
1973: memset(pcg, 0, sizeof(*pcg));
1.47 thorpej 1974: simple_lock(&pc->pc_slock);
1.48 thorpej 1975: pc->pc_ngroups++;
1.43 thorpej 1976: TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1.47 thorpej 1977: if (pc->pc_freeto == NULL)
1978: pc->pc_freeto = pcg;
1.43 thorpej 1979: goto have_group;
1980: }
1981:
1982: /*
1983: * Unable to allocate a cache group; destruct the object
1984: * and free it back to the pool.
1985: */
1.51 thorpej 1986: pool_cache_destruct_object(pc, object);
1.43 thorpej 1987: return;
1988: }
1989:
1990: have_group:
1.48 thorpej 1991: pc->pc_nitems++;
1.87 thorpej 1992: pcg_put(pcg, object, pa);
1.43 thorpej 1993:
1994: if (pcg->pcg_avail == PCG_NOBJECTS)
1995: pc->pc_freeto = NULL;
1996:
1997: simple_unlock(&pc->pc_slock);
1.51 thorpej 1998: }
1999:
2000: /*
2001: * pool_cache_destruct_object:
2002: *
2003: * Force destruction of an object and its release back into
2004: * the pool.
2005: */
2006: void
2007: pool_cache_destruct_object(struct pool_cache *pc, void *object)
2008: {
2009:
2010: if (pc->pc_dtor != NULL)
2011: (*pc->pc_dtor)(pc->pc_arg, object);
2012: pool_put(pc->pc_pool, object);
1.43 thorpej 2013: }
2014:
2015: /*
2016: * pool_cache_do_invalidate:
2017: *
2018: * This internal function implements pool_cache_invalidate() and
2019: * pool_cache_reclaim().
2020: */
2021: static void
2022: pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1.56 sommerfe 2023: void (*putit)(struct pool *, void *))
1.43 thorpej 2024: {
2025: struct pool_cache_group *pcg, *npcg;
2026: void *object;
1.60 thorpej 2027: int s;
1.43 thorpej 2028:
2029: for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
2030: pcg = npcg) {
2031: npcg = TAILQ_NEXT(pcg, pcg_list);
2032: while (pcg->pcg_avail != 0) {
1.48 thorpej 2033: pc->pc_nitems--;
1.87 thorpej 2034: object = pcg_get(pcg, NULL);
1.45 thorpej 2035: if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
2036: pc->pc_allocfrom = NULL;
1.43 thorpej 2037: if (pc->pc_dtor != NULL)
2038: (*pc->pc_dtor)(pc->pc_arg, object);
1.56 sommerfe 2039: (*putit)(pc->pc_pool, object);
1.43 thorpej 2040: }
2041: if (free_groups) {
1.48 thorpej 2042: pc->pc_ngroups--;
1.43 thorpej 2043: TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1.46 thorpej 2044: if (pc->pc_freeto == pcg)
2045: pc->pc_freeto = NULL;
1.60 thorpej 2046: s = splvm();
1.43 thorpej 2047: pool_put(&pcgpool, pcg);
1.60 thorpej 2048: splx(s);
1.43 thorpej 2049: }
2050: }
2051: }
2052:
2053: /*
2054: * pool_cache_invalidate:
2055: *
2056: * Invalidate a pool cache (destruct and release all of the
2057: * cached objects).
2058: */
2059: void
2060: pool_cache_invalidate(struct pool_cache *pc)
2061: {
2062:
2063: simple_lock(&pc->pc_slock);
1.56 sommerfe 2064: pool_cache_do_invalidate(pc, 0, pool_put);
1.43 thorpej 2065: simple_unlock(&pc->pc_slock);
2066: }
2067:
2068: /*
2069: * pool_cache_reclaim:
2070: *
2071: * Reclaim a pool cache for pool_reclaim().
2072: */
2073: static void
2074: pool_cache_reclaim(struct pool_cache *pc)
2075: {
2076:
1.47 thorpej 2077: simple_lock(&pc->pc_slock);
1.43 thorpej 2078: pool_cache_do_invalidate(pc, 1, pool_do_put);
2079: simple_unlock(&pc->pc_slock);
1.3 pk 2080: }
1.66 thorpej 2081:
2082: /*
2083: * Pool backend allocators.
2084: *
2085: * Each pool has a backend allocator that handles allocation, deallocation,
2086: * and any additional draining that might be needed.
2087: *
2088: * We provide two standard allocators:
2089: *
2090: * pool_allocator_kmem - the default when no allocator is specified
2091: *
2092: * pool_allocator_nointr - used for pools that will not be accessed
2093: * in interrupt context.
2094: */
2095: void *pool_page_alloc(struct pool *, int);
2096: void pool_page_free(struct pool *, void *);
2097:
2098: struct pool_allocator pool_allocator_kmem = {
2099: pool_page_alloc, pool_page_free, 0,
2100: };
2101:
2102: void *pool_page_alloc_nointr(struct pool *, int);
2103: void pool_page_free_nointr(struct pool *, void *);
2104:
2105: struct pool_allocator pool_allocator_nointr = {
2106: pool_page_alloc_nointr, pool_page_free_nointr, 0,
2107: };
2108:
2109: #ifdef POOL_SUBPAGE
2110: void *pool_subpage_alloc(struct pool *, int);
2111: void pool_subpage_free(struct pool *, void *);
2112:
2113: struct pool_allocator pool_allocator_kmem_subpage = {
2114: pool_subpage_alloc, pool_subpage_free, 0,
2115: };
2116: #endif /* POOL_SUBPAGE */
2117:
2118: /*
2119: * We have at least three different resources for the same allocation and
2120: * each resource can be depleted. First, we have the ready elements in the
2121: * pool. Then we have the resource (typically a vm_map) for this allocator.
2122: * Finally, we have physical memory. Waiting for any of these can be
2123: * unnecessary when any other is freed, but the kernel doesn't support
2124: * sleeping on multiple wait channels, so we have to employ another strategy.
2125: *
2126: * The caller sleeps on the pool (so that it can be awakened when an item
2127: * is returned to the pool), but we set PA_WANT on the allocator. When a
2128: * page is returned to the allocator and PA_WANT is set, pool_allocator_free
2129: * will wake up all sleeping pools belonging to this allocator.
2130: *
2131: * XXX Thundering herd.
2132: */
2133: void *
2134: pool_allocator_alloc(struct pool *org, int flags)
2135: {
2136: struct pool_allocator *pa = org->pr_alloc;
2137: struct pool *pp, *start;
2138: int s, freed;
2139: void *res;
2140:
1.91 yamt 2141: LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
2142:
1.66 thorpej 2143: do {
2144: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2145: return (res);
1.68 thorpej 2146: if ((flags & PR_WAITOK) == 0) {
2147: /*
2148: * We only run the drain hookhere if PR_NOWAIT.
2149: * In other cases, the hook will be run in
2150: * pool_reclaim().
2151: */
2152: if (org->pr_drain_hook != NULL) {
2153: (*org->pr_drain_hook)(org->pr_drain_hook_arg,
2154: flags);
2155: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2156: return (res);
2157: }
1.66 thorpej 2158: break;
1.68 thorpej 2159: }
1.66 thorpej 2160:
2161: /*
2162: * Drain all pools, except "org", that use this
2163: * allocator. We do this to reclaim VA space.
2164: * pa_alloc is responsible for waiting for
2165: * physical memory.
2166: *
2167: * XXX We risk looping forever if start if someone
2168: * calls pool_destroy on "start". But there is no
2169: * other way to have potentially sleeping pool_reclaim,
2170: * non-sleeping locks on pool_allocator, and some
2171: * stirring of drained pools in the allocator.
1.68 thorpej 2172: *
2173: * XXX Maybe we should use pool_head_slock for locking
2174: * the allocators?
1.66 thorpej 2175: */
2176: freed = 0;
2177:
2178: s = splvm();
2179: simple_lock(&pa->pa_slock);
2180: pp = start = TAILQ_FIRST(&pa->pa_list);
2181: do {
2182: TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
2183: TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
2184: if (pp == org)
2185: continue;
1.73 thorpej 2186: simple_unlock(&pa->pa_slock);
1.66 thorpej 2187: freed = pool_reclaim(pp);
1.73 thorpej 2188: simple_lock(&pa->pa_slock);
1.66 thorpej 2189: } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
2190: freed == 0);
2191:
2192: if (freed == 0) {
2193: /*
2194: * We set PA_WANT here, the caller will most likely
2195: * sleep waiting for pages (if not, this won't hurt
2196: * that much), and there is no way to set this in
2197: * the caller without violating locking order.
2198: */
2199: pa->pa_flags |= PA_WANT;
2200: }
2201: simple_unlock(&pa->pa_slock);
2202: splx(s);
2203: } while (freed);
2204: return (NULL);
2205: }
2206:
2207: void
2208: pool_allocator_free(struct pool *pp, void *v)
2209: {
2210: struct pool_allocator *pa = pp->pr_alloc;
2211: int s;
2212:
1.91 yamt 2213: LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
2214:
1.66 thorpej 2215: (*pa->pa_free)(pp, v);
2216:
2217: s = splvm();
2218: simple_lock(&pa->pa_slock);
2219: if ((pa->pa_flags & PA_WANT) == 0) {
2220: simple_unlock(&pa->pa_slock);
2221: splx(s);
2222: return;
2223: }
2224:
2225: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2226: simple_lock(&pp->pr_slock);
2227: if ((pp->pr_flags & PR_WANTED) != 0) {
2228: pp->pr_flags &= ~PR_WANTED;
2229: wakeup(pp);
2230: }
1.69 thorpej 2231: simple_unlock(&pp->pr_slock);
1.66 thorpej 2232: }
2233: pa->pa_flags &= ~PA_WANT;
2234: simple_unlock(&pa->pa_slock);
2235: splx(s);
2236: }
2237:
2238: void *
2239: pool_page_alloc(struct pool *pp, int flags)
2240: {
2241: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2242:
2243: return ((void *) uvm_km_alloc_poolpage(waitok));
2244: }
2245:
2246: void
2247: pool_page_free(struct pool *pp, void *v)
2248: {
2249:
2250: uvm_km_free_poolpage((vaddr_t) v);
2251: }
2252:
2253: #ifdef POOL_SUBPAGE
2254: /* Sub-page allocator, for machines with large hardware pages. */
2255: void *
2256: pool_subpage_alloc(struct pool *pp, int flags)
2257: {
1.93 dbj 2258: void *v;
2259: int s;
2260: s = splvm();
2261: v = pool_get(&psppool, flags);
2262: splx(s);
2263: return v;
1.66 thorpej 2264: }
2265:
2266: void
2267: pool_subpage_free(struct pool *pp, void *v)
2268: {
1.93 dbj 2269: int s;
2270: s = splvm();
1.66 thorpej 2271: pool_put(&psppool, v);
1.93 dbj 2272: splx(s);
1.66 thorpej 2273: }
2274:
2275: /* We don't provide a real nointr allocator. Maybe later. */
2276: void *
2277: pool_page_alloc_nointr(struct pool *pp, int flags)
2278: {
2279:
2280: return (pool_subpage_alloc(pp, flags));
2281: }
2282:
2283: void
2284: pool_page_free_nointr(struct pool *pp, void *v)
2285: {
2286:
2287: pool_subpage_free(pp, v);
2288: }
2289: #else
2290: void *
2291: pool_page_alloc_nointr(struct pool *pp, int flags)
2292: {
2293: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2294:
2295: return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2296: uvm.kernel_object, waitok));
2297: }
2298:
2299: void
2300: pool_page_free_nointr(struct pool *pp, void *v)
2301: {
2302:
2303: uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2304: }
2305: #endif /* POOL_SUBPAGE */
CVSweb <webmaster@jp.NetBSD.org>