Annotation of src/sys/kern/subr_pool.c, Revision 1.99.8.1.2.1
1.99.8.1 tron 1: /* $NetBSD$ */
1.1 pk 2:
3: /*-
1.43 thorpej 4: * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.99.8.1 tron 41: __KERNEL_RCSID(0, "$NetBSD$");
1.24 scottr 42:
1.25 thorpej 43: #include "opt_pool.h"
1.24 scottr 44: #include "opt_poollog.h"
1.28 thorpej 45: #include "opt_lockdebug.h"
1.1 pk 46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/proc.h>
50: #include <sys/errno.h>
51: #include <sys/kernel.h>
52: #include <sys/malloc.h>
53: #include <sys/lock.h>
54: #include <sys/pool.h>
1.20 thorpej 55: #include <sys/syslog.h>
1.3 pk 56:
57: #include <uvm/uvm.h>
58:
1.1 pk 59: /*
60: * Pool resource management utility.
1.3 pk 61: *
1.88 chs 62: * Memory is allocated in pages which are split into pieces according to
63: * the pool item size. Each page is kept on one of three lists in the
64: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65: * for empty, full and partially-full pages respectively. The individual
66: * pool items are on a linked list headed by `ph_itemlist' in each page
67: * header. The memory for building the page list is either taken from
68: * the allocated pages themselves (for small pool items) or taken from
69: * an internal pool of page headers (`phpool').
1.1 pk 70: */
71:
1.3 pk 72: /* List of all pools */
1.5 thorpej 73: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.3 pk 74:
75: /* Private pool for page header structures */
1.97 yamt 76: #define PHPOOL_MAX 8
77: static struct pool phpool[PHPOOL_MAX];
78: #define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx)))
1.3 pk 79:
1.62 bjh21 80: #ifdef POOL_SUBPAGE
81: /* Pool of subpages for use by normal pools. */
82: static struct pool psppool;
83: #endif
84:
1.98 yamt 85: static void *pool_page_alloc_meta(struct pool *, int);
86: static void pool_page_free_meta(struct pool *, void *);
87:
88: /* allocator for pool metadata */
89: static struct pool_allocator pool_allocator_meta = {
90: pool_page_alloc_meta, pool_page_free_meta
91: };
92:
1.3 pk 93: /* # of seconds to retain page after last use */
94: int pool_inactive_time = 10;
95:
96: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 97: static struct pool *drainpp;
98:
99: /* This spin lock protects both pool_head and drainpp. */
100: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3 pk 101:
1.99 yamt 102: typedef uint8_t pool_item_freelist_t;
103:
1.3 pk 104: struct pool_item_header {
105: /* Page headers */
1.88 chs 106: LIST_ENTRY(pool_item_header)
1.3 pk 107: ph_pagelist; /* pool page list */
1.88 chs 108: SPLAY_ENTRY(pool_item_header)
109: ph_node; /* Off-page page headers */
1.3 pk 110: caddr_t ph_page; /* this page's address */
111: struct timeval ph_time; /* last referenced */
1.97 yamt 112: union {
113: /* !PR_NOTOUCH */
114: struct {
115: TAILQ_HEAD(, pool_item)
116: phu_itemlist; /* chunk list for this page */
117: } phu_normal;
118: /* PR_NOTOUCH */
119: struct {
120: uint16_t
121: phu_off; /* start offset in page */
1.99 yamt 122: pool_item_freelist_t
1.97 yamt 123: phu_firstfree; /* first free item */
1.99 yamt 124: /*
125: * XXX it might be better to use
126: * a simple bitmap and ffs(3)
127: */
1.97 yamt 128: } phu_notouch;
129: } ph_u;
130: uint16_t ph_nmissing; /* # of chunks in use */
1.3 pk 131: };
1.97 yamt 132: #define ph_itemlist ph_u.phu_normal.phu_itemlist
133: #define ph_off ph_u.phu_notouch.phu_off
134: #define ph_firstfree ph_u.phu_notouch.phu_firstfree
1.3 pk 135:
1.1 pk 136: struct pool_item {
1.3 pk 137: #ifdef DIAGNOSTIC
1.82 thorpej 138: u_int pi_magic;
1.33 chs 139: #endif
1.82 thorpej 140: #define PI_MAGIC 0xdeadbeefU
1.3 pk 141: /* Other entries use only this list entry */
142: TAILQ_ENTRY(pool_item) pi_list;
143: };
144:
1.53 thorpej 145: #define POOL_NEEDS_CATCHUP(pp) \
146: ((pp)->pr_nitems < (pp)->pr_minitems)
147:
1.43 thorpej 148: /*
149: * Pool cache management.
150: *
151: * Pool caches provide a way for constructed objects to be cached by the
152: * pool subsystem. This can lead to performance improvements by avoiding
153: * needless object construction/destruction; it is deferred until absolutely
154: * necessary.
155: *
156: * Caches are grouped into cache groups. Each cache group references
157: * up to 16 constructed objects. When a cache allocates an object
158: * from the pool, it calls the object's constructor and places it into
159: * a cache group. When a cache group frees an object back to the pool,
160: * it first calls the object's destructor. This allows the object to
161: * persist in constructed form while freed to the cache.
162: *
163: * Multiple caches may exist for each pool. This allows a single
164: * object type to have multiple constructed forms. The pool references
165: * each cache, so that when a pool is drained by the pagedaemon, it can
166: * drain each individual cache as well. Each time a cache is drained,
167: * the most idle cache group is freed to the pool in its entirety.
168: *
169: * Pool caches are layed on top of pools. By layering them, we can avoid
170: * the complexity of cache management for pools which would not benefit
171: * from it.
172: */
173:
174: /* The cache group pool. */
175: static struct pool pcgpool;
1.3 pk 176:
1.99.8.1 tron 177: static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *);
1.3 pk 178:
1.42 thorpej 179: static int pool_catchup(struct pool *);
1.55 thorpej 180: static void pool_prime_page(struct pool *, caddr_t,
181: struct pool_item_header *);
1.88 chs 182: static void pool_update_curpage(struct pool *);
1.66 thorpej 183:
184: void *pool_allocator_alloc(struct pool *, int);
185: void pool_allocator_free(struct pool *, void *);
1.3 pk 186:
1.97 yamt 187: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 188: void (*)(const char *, ...));
1.42 thorpej 189: static void pool_print1(struct pool *, const char *,
190: void (*)(const char *, ...));
1.3 pk 191:
1.88 chs 192: static int pool_chk_page(struct pool *, const char *,
193: struct pool_item_header *);
194:
1.3 pk 195: /*
1.52 thorpej 196: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 197: */
198: struct pool_log {
199: const char *pl_file;
200: long pl_line;
201: int pl_action;
1.25 thorpej 202: #define PRLOG_GET 1
203: #define PRLOG_PUT 2
1.3 pk 204: void *pl_addr;
1.1 pk 205: };
206:
1.86 matt 207: #ifdef POOL_DIAGNOSTIC
1.3 pk 208: /* Number of entries in pool log buffers */
1.17 thorpej 209: #ifndef POOL_LOGSIZE
210: #define POOL_LOGSIZE 10
211: #endif
212:
213: int pool_logsize = POOL_LOGSIZE;
1.1 pk 214:
1.42 thorpej 215: static __inline void
216: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 217: {
218: int n = pp->pr_curlogentry;
219: struct pool_log *pl;
220:
1.20 thorpej 221: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 222: return;
223:
224: /*
225: * Fill in the current entry. Wrap around and overwrite
226: * the oldest entry if necessary.
227: */
228: pl = &pp->pr_log[n];
229: pl->pl_file = file;
230: pl->pl_line = line;
231: pl->pl_action = action;
232: pl->pl_addr = v;
233: if (++n >= pp->pr_logsize)
234: n = 0;
235: pp->pr_curlogentry = n;
236: }
237:
238: static void
1.42 thorpej 239: pr_printlog(struct pool *pp, struct pool_item *pi,
240: void (*pr)(const char *, ...))
1.3 pk 241: {
242: int i = pp->pr_logsize;
243: int n = pp->pr_curlogentry;
244:
1.20 thorpej 245: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 246: return;
247:
248: /*
249: * Print all entries in this pool's log.
250: */
251: while (i-- > 0) {
252: struct pool_log *pl = &pp->pr_log[n];
253: if (pl->pl_action != 0) {
1.25 thorpej 254: if (pi == NULL || pi == pl->pl_addr) {
255: (*pr)("\tlog entry %d:\n", i);
256: (*pr)("\t\taction = %s, addr = %p\n",
257: pl->pl_action == PRLOG_GET ? "get" : "put",
258: pl->pl_addr);
259: (*pr)("\t\tfile: %s at line %lu\n",
260: pl->pl_file, pl->pl_line);
261: }
1.3 pk 262: }
263: if (++n >= pp->pr_logsize)
264: n = 0;
265: }
266: }
1.25 thorpej 267:
1.42 thorpej 268: static __inline void
269: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 270: {
271:
1.34 thorpej 272: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 273: printf("pool %s: reentrancy at file %s line %ld\n",
274: pp->pr_wchan, file, line);
275: printf(" previous entry at file %s line %ld\n",
276: pp->pr_entered_file, pp->pr_entered_line);
277: panic("pr_enter");
278: }
279:
280: pp->pr_entered_file = file;
281: pp->pr_entered_line = line;
282: }
283:
1.42 thorpej 284: static __inline void
285: pr_leave(struct pool *pp)
1.25 thorpej 286: {
287:
1.34 thorpej 288: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 289: printf("pool %s not entered?\n", pp->pr_wchan);
290: panic("pr_leave");
291: }
292:
293: pp->pr_entered_file = NULL;
294: pp->pr_entered_line = 0;
295: }
296:
1.42 thorpej 297: static __inline void
298: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 299: {
300:
301: if (pp->pr_entered_file != NULL)
302: (*pr)("\n\tcurrently entered from file %s line %ld\n",
303: pp->pr_entered_file, pp->pr_entered_line);
304: }
1.3 pk 305: #else
1.25 thorpej 306: #define pr_log(pp, v, action, file, line)
307: #define pr_printlog(pp, pi, pr)
308: #define pr_enter(pp, file, line)
309: #define pr_leave(pp)
310: #define pr_enter_check(pp, pr)
1.59 thorpej 311: #endif /* POOL_DIAGNOSTIC */
1.3 pk 312:
1.88 chs 313: static __inline int
1.97 yamt 314: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
315: const void *v)
316: {
317: const char *cp = v;
318: int idx;
319:
320: KASSERT(pp->pr_roflags & PR_NOTOUCH);
321: idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size;
322: KASSERT(idx < pp->pr_itemsperpage);
323: return idx;
324: }
325:
1.99 yamt 326: #define PR_FREELIST_ALIGN(p) \
327: roundup((uintptr_t)(p), sizeof(pool_item_freelist_t))
328: #define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1))
329: #define PR_INDEX_USED ((pool_item_freelist_t)-1)
330: #define PR_INDEX_EOL ((pool_item_freelist_t)-2)
1.97 yamt 331:
332: static __inline void
333: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
334: void *obj)
335: {
336: int idx = pr_item_notouch_index(pp, ph, obj);
1.99 yamt 337: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 338:
339: KASSERT(freelist[idx] == PR_INDEX_USED);
340: freelist[idx] = ph->ph_firstfree;
341: ph->ph_firstfree = idx;
342: }
343:
344: static __inline void *
345: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
346: {
347: int idx = ph->ph_firstfree;
1.99 yamt 348: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 349:
350: KASSERT(freelist[idx] != PR_INDEX_USED);
351: ph->ph_firstfree = freelist[idx];
352: freelist[idx] = PR_INDEX_USED;
353:
354: return ph->ph_page + ph->ph_off + idx * pp->pr_size;
355: }
356:
357: static __inline int
1.88 chs 358: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
359: {
360: if (a->ph_page < b->ph_page)
361: return (-1);
362: else if (a->ph_page > b->ph_page)
363: return (1);
364: else
365: return (0);
366: }
367:
368: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
369: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
370:
1.3 pk 371: /*
372: * Return the pool page header based on page address.
373: */
1.42 thorpej 374: static __inline struct pool_item_header *
375: pr_find_pagehead(struct pool *pp, caddr_t page)
1.3 pk 376: {
1.88 chs 377: struct pool_item_header *ph, tmp;
1.3 pk 378:
1.20 thorpej 379: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3 pk 380: return ((struct pool_item_header *)(page + pp->pr_phoffset));
381:
1.88 chs 382: tmp.ph_page = page;
383: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
384: return ph;
1.3 pk 385: }
386:
1.99.8.1 tron 387: static void
388: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
389: {
390: struct pool_item_header *ph;
391: int s;
392:
393: while ((ph = LIST_FIRST(pq)) != NULL) {
394: LIST_REMOVE(ph, ph_pagelist);
395: pool_allocator_free(pp, ph->ph_page);
396: if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
397: s = splvm();
398: pool_put(pp->pr_phpool, ph);
399: splx(s);
400: }
401: }
402: }
403:
1.3 pk 404: /*
405: * Remove a page from the pool.
406: */
1.42 thorpej 407: static __inline void
1.61 chs 408: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
409: struct pool_pagelist *pq)
1.3 pk 410: {
411:
1.99.8.1 tron 412: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1.91 yamt 413:
1.3 pk 414: /*
1.7 thorpej 415: * If the page was idle, decrement the idle page count.
1.3 pk 416: */
1.6 thorpej 417: if (ph->ph_nmissing == 0) {
418: #ifdef DIAGNOSTIC
419: if (pp->pr_nidle == 0)
420: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 421: if (pp->pr_nitems < pp->pr_itemsperpage)
422: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 423: #endif
424: pp->pr_nidle--;
425: }
1.7 thorpej 426:
1.20 thorpej 427: pp->pr_nitems -= pp->pr_itemsperpage;
428:
1.7 thorpej 429: /*
1.99.8.1 tron 430: * Unlink the page from the pool and queue it for release.
1.7 thorpej 431: */
1.88 chs 432: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 433: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
434: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.99.8.1 tron 435: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
436:
1.7 thorpej 437: pp->pr_npages--;
438: pp->pr_npagefree++;
1.6 thorpej 439:
1.88 chs 440: pool_update_curpage(pp);
1.3 pk 441: }
442:
443: /*
1.94 simonb 444: * Initialize all the pools listed in the "pools" link set.
445: */
446: void
447: link_pool_init(void)
448: {
449: __link_set_decl(pools, struct link_pool_init);
450: struct link_pool_init * const *pi;
451:
452: __link_set_foreach(pi, pools)
453: pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
454: (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
455: (*pi)->palloc);
456: }
457:
458: /*
1.3 pk 459: * Initialize the given pool resource structure.
460: *
461: * We export this routine to allow other kernel parts to declare
462: * static pools that must be initialized before malloc() is available.
463: */
464: void
1.42 thorpej 465: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.66 thorpej 466: const char *wchan, struct pool_allocator *palloc)
1.3 pk 467: {
1.88 chs 468: int off, slack;
1.92 enami 469: size_t trysize, phsize;
1.93 dbj 470: int s;
1.3 pk 471:
1.99 yamt 472: KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >=
473: PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1));
474:
1.25 thorpej 475: #ifdef POOL_DIAGNOSTIC
476: /*
477: * Always log if POOL_DIAGNOSTIC is defined.
478: */
479: if (pool_logsize != 0)
480: flags |= PR_LOGGING;
481: #endif
482:
1.66 thorpej 483: if (palloc == NULL)
484: palloc = &pool_allocator_kmem;
1.99.8.1.2.1! tron 485: #ifdef POOL_SUBPAGE
! 486: if (size > palloc->pa_pagesz) {
! 487: if (palloc == &pool_allocator_kmem)
! 488: palloc = &pool_allocator_kmem_fullpage;
! 489: else if (palloc == &pool_allocator_nointr)
! 490: palloc = &pool_allocator_nointr_fullpage;
! 491: }
1.66 thorpej 492: #endif /* POOL_SUBPAGE */
493: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
1.99.8.1.2.1! tron 494: if (palloc->pa_pagesz == 0)
1.66 thorpej 495: palloc->pa_pagesz = PAGE_SIZE;
496:
497: TAILQ_INIT(&palloc->pa_list);
498:
499: simple_lock_init(&palloc->pa_slock);
500: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
501: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
502: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 503: }
1.3 pk 504:
505: if (align == 0)
506: align = ALIGN(1);
1.14 thorpej 507:
508: if (size < sizeof(struct pool_item))
509: size = sizeof(struct pool_item);
1.3 pk 510:
1.78 thorpej 511: size = roundup(size, align);
1.66 thorpej 512: #ifdef DIAGNOSTIC
513: if (size > palloc->pa_pagesz)
1.35 pk 514: panic("pool_init: pool item size (%lu) too large",
515: (u_long)size);
1.66 thorpej 516: #endif
1.35 pk 517:
1.3 pk 518: /*
519: * Initialize the pool structure.
520: */
1.88 chs 521: LIST_INIT(&pp->pr_emptypages);
522: LIST_INIT(&pp->pr_fullpages);
523: LIST_INIT(&pp->pr_partpages);
1.43 thorpej 524: TAILQ_INIT(&pp->pr_cachelist);
1.3 pk 525: pp->pr_curpage = NULL;
526: pp->pr_npages = 0;
527: pp->pr_minitems = 0;
528: pp->pr_minpages = 0;
529: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 530: pp->pr_roflags = flags;
531: pp->pr_flags = 0;
1.35 pk 532: pp->pr_size = size;
1.3 pk 533: pp->pr_align = align;
534: pp->pr_wchan = wchan;
1.66 thorpej 535: pp->pr_alloc = palloc;
1.20 thorpej 536: pp->pr_nitems = 0;
537: pp->pr_nout = 0;
538: pp->pr_hardlimit = UINT_MAX;
539: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 540: pp->pr_hardlimit_ratecap.tv_sec = 0;
541: pp->pr_hardlimit_ratecap.tv_usec = 0;
542: pp->pr_hardlimit_warning_last.tv_sec = 0;
543: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 544: pp->pr_drain_hook = NULL;
545: pp->pr_drain_hook_arg = NULL;
1.3 pk 546:
547: /*
548: * Decide whether to put the page header off page to avoid
1.92 enami 549: * wasting too large a part of the page or too big item.
550: * Off-page page headers go on a hash table, so we can match
551: * a returned item with its header based on the page address.
552: * We use 1/16 of the page size and about 8 times of the item
553: * size as the threshold (XXX: tune)
554: *
555: * However, we'll put the header into the page if we can put
556: * it without wasting any items.
557: *
558: * Silently enforce `0 <= ioff < align'.
1.3 pk 559: */
1.92 enami 560: pp->pr_itemoffset = ioff %= align;
561: /* See the comment below about reserved bytes. */
562: trysize = palloc->pa_pagesz - ((align - ioff) % align);
563: phsize = ALIGN(sizeof(struct pool_item_header));
1.97 yamt 564: if ((pp->pr_roflags & PR_NOTOUCH) == 0 &&
565: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
566: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 567: /* Use the end of the page for the page header */
1.20 thorpej 568: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 569: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 570: } else {
1.3 pk 571: /* The page header will be taken from our page header pool */
572: pp->pr_phoffset = 0;
1.66 thorpej 573: off = palloc->pa_pagesz;
1.88 chs 574: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 575: }
1.1 pk 576:
1.3 pk 577: /*
578: * Alignment is to take place at `ioff' within the item. This means
579: * we must reserve up to `align - 1' bytes on the page to allow
580: * appropriate positioning of each item.
581: */
582: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 583: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 584: if ((pp->pr_roflags & PR_NOTOUCH)) {
585: int idx;
586:
587: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
588: idx++) {
589: /* nothing */
590: }
591: if (idx >= PHPOOL_MAX) {
592: /*
593: * if you see this panic, consider to tweak
594: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
595: */
596: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
597: pp->pr_wchan, pp->pr_itemsperpage);
598: }
599: pp->pr_phpool = &phpool[idx];
600: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
601: pp->pr_phpool = &phpool[0];
602: }
603: #if defined(DIAGNOSTIC)
604: else {
605: pp->pr_phpool = NULL;
606: }
607: #endif
1.3 pk 608:
609: /*
610: * Use the slack between the chunks and the page header
611: * for "cache coloring".
612: */
613: slack = off - pp->pr_itemsperpage * pp->pr_size;
614: pp->pr_maxcolor = (slack / align) * align;
615: pp->pr_curcolor = 0;
616:
617: pp->pr_nget = 0;
618: pp->pr_nfail = 0;
619: pp->pr_nput = 0;
620: pp->pr_npagealloc = 0;
621: pp->pr_npagefree = 0;
1.1 pk 622: pp->pr_hiwat = 0;
1.8 thorpej 623: pp->pr_nidle = 0;
1.3 pk 624:
1.59 thorpej 625: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 626: if (flags & PR_LOGGING) {
627: if (kmem_map == NULL ||
628: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
629: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 630: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 631: pp->pr_curlogentry = 0;
632: pp->pr_logsize = pool_logsize;
633: }
1.59 thorpej 634: #endif
1.25 thorpej 635:
636: pp->pr_entered_file = NULL;
637: pp->pr_entered_line = 0;
1.3 pk 638:
1.21 thorpej 639: simple_lock_init(&pp->pr_slock);
1.1 pk 640:
1.3 pk 641: /*
1.43 thorpej 642: * Initialize private page header pool and cache magazine pool if we
643: * haven't done so yet.
1.23 thorpej 644: * XXX LOCKING.
1.3 pk 645: */
1.97 yamt 646: if (phpool[0].pr_size == 0) {
647: int idx;
648: for (idx = 0; idx < PHPOOL_MAX; idx++) {
649: static char phpool_names[PHPOOL_MAX][6+1+6+1];
650: int nelem;
651: size_t sz;
652:
653: nelem = PHPOOL_FREELIST_NELEM(idx);
654: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
655: "phpool-%d", nelem);
656: sz = sizeof(struct pool_item_header);
657: if (nelem) {
658: sz = PR_FREELIST_ALIGN(sz)
1.99 yamt 659: + nelem * sizeof(pool_item_freelist_t);
1.97 yamt 660: }
661: pool_init(&phpool[idx], sz, 0, 0, 0,
1.98 yamt 662: phpool_names[idx], &pool_allocator_meta);
1.97 yamt 663: }
1.62 bjh21 664: #ifdef POOL_SUBPAGE
665: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.98 yamt 666: PR_RECURSIVE, "psppool", &pool_allocator_meta);
1.62 bjh21 667: #endif
1.43 thorpej 668: pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
1.98 yamt 669: 0, "pcgpool", &pool_allocator_meta);
1.1 pk 670: }
671:
1.23 thorpej 672: /* Insert into the list of all pools. */
673: simple_lock(&pool_head_slock);
674: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
675: simple_unlock(&pool_head_slock);
1.66 thorpej 676:
677: /* Insert this into the list of pools using this allocator. */
1.93 dbj 678: s = splvm();
1.66 thorpej 679: simple_lock(&palloc->pa_slock);
680: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
681: simple_unlock(&palloc->pa_slock);
1.93 dbj 682: splx(s);
1.1 pk 683: }
684:
685: /*
686: * De-commision a pool resource.
687: */
688: void
1.42 thorpej 689: pool_destroy(struct pool *pp)
1.1 pk 690: {
1.99.8.1 tron 691: struct pool_pagelist pq;
1.3 pk 692: struct pool_item_header *ph;
1.93 dbj 693: int s;
1.43 thorpej 694:
1.99.8.1 tron 695: /* Remove from global pool list */
696: simple_lock(&pool_head_slock);
697: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
698: if (drainpp == pp)
699: drainpp = NULL;
700: simple_unlock(&pool_head_slock);
701:
702: /* Remove this pool from its allocator's list of pools. */
1.93 dbj 703: s = splvm();
1.66 thorpej 704: simple_lock(&pp->pr_alloc->pa_slock);
705: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
706: simple_unlock(&pp->pr_alloc->pa_slock);
1.93 dbj 707: splx(s);
1.66 thorpej 708:
1.99.8.1 tron 709: s = splvm();
710: simple_lock(&pp->pr_slock);
711:
712: KASSERT(TAILQ_EMPTY(&pp->pr_cachelist));
1.3 pk 713:
714: #ifdef DIAGNOSTIC
1.20 thorpej 715: if (pp->pr_nout != 0) {
1.25 thorpej 716: pr_printlog(pp, NULL, printf);
1.80 provos 717: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 718: pp->pr_nout);
1.3 pk 719: }
720: #endif
1.1 pk 721:
1.88 chs 722: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
723: KASSERT(LIST_EMPTY(&pp->pr_partpages));
1.3 pk 724:
1.99.8.1 tron 725: /* Remove all pages */
726: LIST_INIT(&pq);
727: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
728: pr_rmpage(pp, ph, &pq);
729:
730: simple_unlock(&pp->pr_slock);
731: splx(s);
732:
733: pr_pagelist_free(pp, &pq);
1.3 pk 734:
1.59 thorpej 735: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 736: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 737: free(pp->pr_log, M_TEMP);
1.59 thorpej 738: #endif
1.1 pk 739: }
740:
1.68 thorpej 741: void
742: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
743: {
744:
745: /* XXX no locking -- must be used just after pool_init() */
746: #ifdef DIAGNOSTIC
747: if (pp->pr_drain_hook != NULL)
748: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
749: #endif
750: pp->pr_drain_hook = fn;
751: pp->pr_drain_hook_arg = arg;
752: }
753:
1.88 chs 754: static struct pool_item_header *
1.55 thorpej 755: pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
756: {
757: struct pool_item_header *ph;
758: int s;
759:
760: LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
761:
762: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
763: ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
764: else {
1.85 pk 765: s = splvm();
1.97 yamt 766: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 767: splx(s);
768: }
769:
770: return (ph);
771: }
1.1 pk 772:
773: /*
1.3 pk 774: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 775: */
1.3 pk 776: void *
1.59 thorpej 777: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 778: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 779: #else
780: pool_get(struct pool *pp, int flags)
781: #endif
1.1 pk 782: {
783: struct pool_item *pi;
1.3 pk 784: struct pool_item_header *ph;
1.55 thorpej 785: void *v;
1.1 pk 786:
1.2 pk 787: #ifdef DIAGNOSTIC
1.95 atatat 788: if (__predict_false(pp->pr_itemsperpage == 0))
789: panic("pool_get: pool %p: pr_itemsperpage is zero, "
790: "pool not initialized?", pp);
1.84 thorpej 791: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 792: (flags & PR_WAITOK) != 0))
1.77 matt 793: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 794:
795: #ifdef LOCKDEBUG
796: if (flags & PR_WAITOK)
797: simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
1.56 sommerfe 798: #endif
1.58 thorpej 799: #endif /* DIAGNOSTIC */
1.1 pk 800:
1.21 thorpej 801: simple_lock(&pp->pr_slock);
1.25 thorpej 802: pr_enter(pp, file, line);
1.20 thorpej 803:
804: startover:
805: /*
806: * Check to see if we've reached the hard limit. If we have,
807: * and we can wait, then wait until an item has been returned to
808: * the pool.
809: */
810: #ifdef DIAGNOSTIC
1.34 thorpej 811: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 812: pr_leave(pp);
1.21 thorpej 813: simple_unlock(&pp->pr_slock);
1.20 thorpej 814: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
815: }
816: #endif
1.34 thorpej 817: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 818: if (pp->pr_drain_hook != NULL) {
819: /*
820: * Since the drain hook is going to free things
821: * back to the pool, unlock, call the hook, re-lock,
822: * and check the hardlimit condition again.
823: */
824: pr_leave(pp);
825: simple_unlock(&pp->pr_slock);
826: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
827: simple_lock(&pp->pr_slock);
828: pr_enter(pp, file, line);
829: if (pp->pr_nout < pp->pr_hardlimit)
830: goto startover;
831: }
832:
1.29 sommerfe 833: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 834: /*
835: * XXX: A warning isn't logged in this case. Should
836: * it be?
837: */
838: pp->pr_flags |= PR_WANTED;
1.25 thorpej 839: pr_leave(pp);
1.40 sommerfe 840: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 841: pr_enter(pp, file, line);
1.20 thorpej 842: goto startover;
843: }
1.31 thorpej 844:
845: /*
846: * Log a message that the hard limit has been hit.
847: */
848: if (pp->pr_hardlimit_warning != NULL &&
849: ratecheck(&pp->pr_hardlimit_warning_last,
850: &pp->pr_hardlimit_ratecap))
851: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 852:
853: pp->pr_nfail++;
854:
1.25 thorpej 855: pr_leave(pp);
1.21 thorpej 856: simple_unlock(&pp->pr_slock);
1.20 thorpej 857: return (NULL);
858: }
859:
1.3 pk 860: /*
861: * The convention we use is that if `curpage' is not NULL, then
862: * it points at a non-empty bucket. In particular, `curpage'
863: * never points at a page header which has PR_PHINPAGE set and
864: * has no items in its bucket.
865: */
1.20 thorpej 866: if ((ph = pp->pr_curpage) == NULL) {
867: #ifdef DIAGNOSTIC
868: if (pp->pr_nitems != 0) {
1.21 thorpej 869: simple_unlock(&pp->pr_slock);
1.20 thorpej 870: printf("pool_get: %s: curpage NULL, nitems %u\n",
871: pp->pr_wchan, pp->pr_nitems);
1.80 provos 872: panic("pool_get: nitems inconsistent");
1.20 thorpej 873: }
874: #endif
875:
1.21 thorpej 876: /*
877: * Call the back-end page allocator for more memory.
878: * Release the pool lock, as the back-end page allocator
879: * may block.
880: */
1.25 thorpej 881: pr_leave(pp);
1.21 thorpej 882: simple_unlock(&pp->pr_slock);
1.66 thorpej 883: v = pool_allocator_alloc(pp, flags);
1.55 thorpej 884: if (__predict_true(v != NULL))
885: ph = pool_alloc_item_header(pp, v, flags);
1.15 pk 886:
1.55 thorpej 887: if (__predict_false(v == NULL || ph == NULL)) {
888: if (v != NULL)
1.66 thorpej 889: pool_allocator_free(pp, v);
1.55 thorpej 890:
1.91 yamt 891: simple_lock(&pp->pr_slock);
892: pr_enter(pp, file, line);
893:
1.21 thorpej 894: /*
1.55 thorpej 895: * We were unable to allocate a page or item
896: * header, but we released the lock during
897: * allocation, so perhaps items were freed
898: * back to the pool. Check for this case.
1.21 thorpej 899: */
900: if (pp->pr_curpage != NULL)
901: goto startover;
1.15 pk 902:
1.3 pk 903: if ((flags & PR_WAITOK) == 0) {
904: pp->pr_nfail++;
1.25 thorpej 905: pr_leave(pp);
1.21 thorpej 906: simple_unlock(&pp->pr_slock);
1.1 pk 907: return (NULL);
1.3 pk 908: }
909:
1.15 pk 910: /*
911: * Wait for items to be returned to this pool.
1.21 thorpej 912: *
1.20 thorpej 913: * XXX: maybe we should wake up once a second and
914: * try again?
1.15 pk 915: */
1.1 pk 916: pp->pr_flags |= PR_WANTED;
1.66 thorpej 917: /* PA_WANTED is already set on the allocator. */
1.25 thorpej 918: pr_leave(pp);
1.40 sommerfe 919: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 920: pr_enter(pp, file, line);
1.20 thorpej 921: goto startover;
1.1 pk 922: }
1.3 pk 923:
1.15 pk 924: /* We have more memory; add it to the pool */
1.91 yamt 925: simple_lock(&pp->pr_slock);
926: pr_enter(pp, file, line);
1.55 thorpej 927: pool_prime_page(pp, v, ph);
1.15 pk 928: pp->pr_npagealloc++;
929:
1.20 thorpej 930: /* Start the allocation process over. */
931: goto startover;
1.3 pk 932: }
1.97 yamt 933: if (pp->pr_roflags & PR_NOTOUCH) {
934: #ifdef DIAGNOSTIC
935: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
936: pr_leave(pp);
937: simple_unlock(&pp->pr_slock);
938: panic("pool_get: %s: page empty", pp->pr_wchan);
939: }
940: #endif
941: v = pr_item_notouch_get(pp, ph);
942: #ifdef POOL_DIAGNOSTIC
943: pr_log(pp, v, PRLOG_GET, file, line);
944: #endif
945: } else {
946: v = pi = TAILQ_FIRST(&ph->ph_itemlist);
947: if (__predict_false(v == NULL)) {
948: pr_leave(pp);
949: simple_unlock(&pp->pr_slock);
950: panic("pool_get: %s: page empty", pp->pr_wchan);
951: }
1.20 thorpej 952: #ifdef DIAGNOSTIC
1.97 yamt 953: if (__predict_false(pp->pr_nitems == 0)) {
954: pr_leave(pp);
955: simple_unlock(&pp->pr_slock);
956: printf("pool_get: %s: items on itemlist, nitems %u\n",
957: pp->pr_wchan, pp->pr_nitems);
958: panic("pool_get: nitems inconsistent");
959: }
1.65 enami 960: #endif
1.56 sommerfe 961:
1.65 enami 962: #ifdef POOL_DIAGNOSTIC
1.97 yamt 963: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 964: #endif
1.3 pk 965:
1.65 enami 966: #ifdef DIAGNOSTIC
1.97 yamt 967: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
968: pr_printlog(pp, pi, printf);
969: panic("pool_get(%s): free list modified: "
970: "magic=%x; page %p; item addr %p\n",
971: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
972: }
1.3 pk 973: #endif
974:
1.97 yamt 975: /*
976: * Remove from item list.
977: */
978: TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
979: }
1.20 thorpej 980: pp->pr_nitems--;
981: pp->pr_nout++;
1.6 thorpej 982: if (ph->ph_nmissing == 0) {
983: #ifdef DIAGNOSTIC
1.34 thorpej 984: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 985: panic("pool_get: nidle inconsistent");
986: #endif
987: pp->pr_nidle--;
1.88 chs 988:
989: /*
990: * This page was previously empty. Move it to the list of
991: * partially-full pages. This page is already curpage.
992: */
993: LIST_REMOVE(ph, ph_pagelist);
994: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 995: }
1.3 pk 996: ph->ph_nmissing++;
1.97 yamt 997: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 998: #ifdef DIAGNOSTIC
1.97 yamt 999: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1000: !TAILQ_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 1001: pr_leave(pp);
1.21 thorpej 1002: simple_unlock(&pp->pr_slock);
1003: panic("pool_get: %s: nmissing inconsistent",
1004: pp->pr_wchan);
1005: }
1006: #endif
1.3 pk 1007: /*
1.88 chs 1008: * This page is now full. Move it to the full list
1009: * and select a new current page.
1.3 pk 1010: */
1.88 chs 1011: LIST_REMOVE(ph, ph_pagelist);
1012: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1013: pool_update_curpage(pp);
1.1 pk 1014: }
1.3 pk 1015:
1016: pp->pr_nget++;
1.20 thorpej 1017:
1018: /*
1019: * If we have a low water mark and we are now below that low
1020: * water mark, add more items to the pool.
1021: */
1.53 thorpej 1022: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1023: /*
1024: * XXX: Should we log a warning? Should we set up a timeout
1025: * to try again in a second or so? The latter could break
1026: * a caller's assumptions about interrupt protection, etc.
1027: */
1028: }
1029:
1.25 thorpej 1030: pr_leave(pp);
1.21 thorpej 1031: simple_unlock(&pp->pr_slock);
1.1 pk 1032: return (v);
1033: }
1034:
1035: /*
1.43 thorpej 1036: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1037: */
1.43 thorpej 1038: static void
1.99.8.1 tron 1039: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 1040: {
1041: struct pool_item *pi = v;
1.3 pk 1042: struct pool_item_header *ph;
1043: caddr_t page;
1.21 thorpej 1044: int s;
1.3 pk 1045:
1.61 chs 1046: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1047:
1.66 thorpej 1048: page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1.1 pk 1049:
1.30 thorpej 1050: #ifdef DIAGNOSTIC
1.34 thorpej 1051: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1052: printf("pool %s: putting with none out\n",
1053: pp->pr_wchan);
1054: panic("pool_put");
1055: }
1056: #endif
1.3 pk 1057:
1.34 thorpej 1058: if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1.25 thorpej 1059: pr_printlog(pp, NULL, printf);
1.3 pk 1060: panic("pool_put: %s: page header missing", pp->pr_wchan);
1061: }
1.28 thorpej 1062:
1063: #ifdef LOCKDEBUG
1064: /*
1065: * Check if we're freeing a locked simple lock.
1066: */
1067: simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
1068: #endif
1.3 pk 1069:
1070: /*
1071: * Return to item list.
1072: */
1.97 yamt 1073: if (pp->pr_roflags & PR_NOTOUCH) {
1074: pr_item_notouch_put(pp, ph, v);
1075: } else {
1.2 pk 1076: #ifdef DIAGNOSTIC
1.97 yamt 1077: pi->pi_magic = PI_MAGIC;
1.3 pk 1078: #endif
1.32 chs 1079: #ifdef DEBUG
1.97 yamt 1080: {
1081: int i, *ip = v;
1.32 chs 1082:
1.97 yamt 1083: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1084: *ip++ = PI_MAGIC;
1085: }
1.32 chs 1086: }
1087: #endif
1088:
1.97 yamt 1089: TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1090: }
1.79 thorpej 1091: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1092: ph->ph_nmissing--;
1093: pp->pr_nput++;
1.20 thorpej 1094: pp->pr_nitems++;
1095: pp->pr_nout--;
1.3 pk 1096:
1097: /* Cancel "pool empty" condition if it exists */
1098: if (pp->pr_curpage == NULL)
1099: pp->pr_curpage = ph;
1100:
1101: if (pp->pr_flags & PR_WANTED) {
1102: pp->pr_flags &= ~PR_WANTED;
1.15 pk 1103: if (ph->ph_nmissing == 0)
1104: pp->pr_nidle++;
1.3 pk 1105: wakeup((caddr_t)pp);
1106: return;
1107: }
1108:
1109: /*
1.88 chs 1110: * If this page is now empty, do one of two things:
1.21 thorpej 1111: *
1.88 chs 1112: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1113: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1114: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1115: * CLAIM.
1.21 thorpej 1116: *
1.88 chs 1117: * (2) Otherwise, move the page to the empty page list.
1118: *
1119: * Either way, select a new current page (so we use a partially-full
1120: * page if one is available).
1.3 pk 1121: */
1122: if (ph->ph_nmissing == 0) {
1.6 thorpej 1123: pp->pr_nidle++;
1.90 thorpej 1124: if (pp->pr_npages > pp->pr_minpages &&
1125: (pp->pr_npages > pp->pr_maxpages ||
1126: (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
1.99.8.1 tron 1127: pr_rmpage(pp, ph, pq);
1.3 pk 1128: } else {
1.88 chs 1129: LIST_REMOVE(ph, ph_pagelist);
1130: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1131:
1.21 thorpej 1132: /*
1133: * Update the timestamp on the page. A page must
1134: * be idle for some period of time before it can
1135: * be reclaimed by the pagedaemon. This minimizes
1136: * ping-pong'ing for memory.
1137: */
1138: s = splclock();
1139: ph->ph_time = mono_time;
1140: splx(s);
1.1 pk 1141: }
1.88 chs 1142: pool_update_curpage(pp);
1.1 pk 1143: }
1.88 chs 1144:
1.21 thorpej 1145: /*
1.88 chs 1146: * If the page was previously completely full, move it to the
1147: * partially-full list and make it the current page. The next
1148: * allocation will get the item from this page, instead of
1149: * further fragmenting the pool.
1.21 thorpej 1150: */
1151: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1152: LIST_REMOVE(ph, ph_pagelist);
1153: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1154: pp->pr_curpage = ph;
1155: }
1.43 thorpej 1156: }
1157:
1158: /*
1159: * Return resource to the pool; must be called at appropriate spl level
1160: */
1.59 thorpej 1161: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1162: void
1163: _pool_put(struct pool *pp, void *v, const char *file, long line)
1164: {
1.99.8.1 tron 1165: struct pool_pagelist pq;
1166:
1167: LIST_INIT(&pq);
1.43 thorpej 1168:
1169: simple_lock(&pp->pr_slock);
1170: pr_enter(pp, file, line);
1171:
1.56 sommerfe 1172: pr_log(pp, v, PRLOG_PUT, file, line);
1173:
1.99.8.1 tron 1174: pool_do_put(pp, v, &pq);
1.21 thorpej 1175:
1.25 thorpej 1176: pr_leave(pp);
1.21 thorpej 1177: simple_unlock(&pp->pr_slock);
1.99.8.1 tron 1178:
1179: if (! LIST_EMPTY(&pq))
1180: pr_pagelist_free(pp, &pq);
1.1 pk 1181: }
1.57 sommerfe 1182: #undef pool_put
1.59 thorpej 1183: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1184:
1.56 sommerfe 1185: void
1186: pool_put(struct pool *pp, void *v)
1187: {
1.99.8.1 tron 1188: struct pool_pagelist pq;
1.56 sommerfe 1189:
1.99.8.1 tron 1190: LIST_INIT(&pq);
1.56 sommerfe 1191:
1.99.8.1 tron 1192: simple_lock(&pp->pr_slock);
1193: pool_do_put(pp, v, &pq);
1.56 sommerfe 1194: simple_unlock(&pp->pr_slock);
1.99.8.1 tron 1195:
1196: if (! LIST_EMPTY(&pq))
1197: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1198: }
1.57 sommerfe 1199:
1.59 thorpej 1200: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1201: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1202: #endif
1.74 thorpej 1203:
1204: /*
1205: * Add N items to the pool.
1206: */
1207: int
1208: pool_prime(struct pool *pp, int n)
1209: {
1.83 scw 1210: struct pool_item_header *ph = NULL;
1.74 thorpej 1211: caddr_t cp;
1.75 simonb 1212: int newpages;
1.74 thorpej 1213:
1214: simple_lock(&pp->pr_slock);
1215:
1216: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1217:
1218: while (newpages-- > 0) {
1219: simple_unlock(&pp->pr_slock);
1220: cp = pool_allocator_alloc(pp, PR_NOWAIT);
1221: if (__predict_true(cp != NULL))
1222: ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1223:
1224: if (__predict_false(cp == NULL || ph == NULL)) {
1225: if (cp != NULL)
1226: pool_allocator_free(pp, cp);
1.91 yamt 1227: simple_lock(&pp->pr_slock);
1.74 thorpej 1228: break;
1229: }
1230:
1.91 yamt 1231: simple_lock(&pp->pr_slock);
1.74 thorpej 1232: pool_prime_page(pp, cp, ph);
1233: pp->pr_npagealloc++;
1234: pp->pr_minpages++;
1235: }
1236:
1237: if (pp->pr_minpages >= pp->pr_maxpages)
1238: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1239:
1240: simple_unlock(&pp->pr_slock);
1241: return (0);
1242: }
1.55 thorpej 1243:
1244: /*
1.3 pk 1245: * Add a page worth of items to the pool.
1.21 thorpej 1246: *
1247: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1248: */
1.55 thorpej 1249: static void
1250: pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1.3 pk 1251: {
1252: struct pool_item *pi;
1253: caddr_t cp = storage;
1254: unsigned int align = pp->pr_align;
1255: unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1256: int n;
1.89 yamt 1257: int s;
1.36 pk 1258:
1.91 yamt 1259: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1260:
1.66 thorpej 1261: #ifdef DIAGNOSTIC
1262: if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1263: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1264: #endif
1.3 pk 1265:
1266: /*
1267: * Insert page header.
1268: */
1.88 chs 1269: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1270: TAILQ_INIT(&ph->ph_itemlist);
1271: ph->ph_page = storage;
1272: ph->ph_nmissing = 0;
1.89 yamt 1273: s = splclock();
1274: ph->ph_time = mono_time;
1275: splx(s);
1.88 chs 1276: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1277: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1278:
1.6 thorpej 1279: pp->pr_nidle++;
1280:
1.3 pk 1281: /*
1282: * Color this page.
1283: */
1284: cp = (caddr_t)(cp + pp->pr_curcolor);
1285: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1286: pp->pr_curcolor = 0;
1287:
1288: /*
1289: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1290: */
1291: if (ioff != 0)
1292: cp = (caddr_t)(cp + (align - ioff));
1293:
1294: /*
1295: * Insert remaining chunks on the bucket list.
1296: */
1297: n = pp->pr_itemsperpage;
1.20 thorpej 1298: pp->pr_nitems += n;
1.3 pk 1299:
1.97 yamt 1300: if (pp->pr_roflags & PR_NOTOUCH) {
1.99 yamt 1301: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 1302: int i;
1303:
1.99 yamt 1304: ph->ph_off = cp - storage;
1.97 yamt 1305: ph->ph_firstfree = 0;
1306: for (i = 0; i < n - 1; i++)
1307: freelist[i] = i + 1;
1308: freelist[n - 1] = PR_INDEX_EOL;
1309: } else {
1310: while (n--) {
1311: pi = (struct pool_item *)cp;
1.78 thorpej 1312:
1.97 yamt 1313: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1314:
1.97 yamt 1315: /* Insert on page list */
1316: TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1317: #ifdef DIAGNOSTIC
1.97 yamt 1318: pi->pi_magic = PI_MAGIC;
1.3 pk 1319: #endif
1.97 yamt 1320: cp = (caddr_t)(cp + pp->pr_size);
1321: }
1.3 pk 1322: }
1323:
1324: /*
1325: * If the pool was depleted, point at the new page.
1326: */
1327: if (pp->pr_curpage == NULL)
1328: pp->pr_curpage = ph;
1329:
1330: if (++pp->pr_npages > pp->pr_hiwat)
1331: pp->pr_hiwat = pp->pr_npages;
1332: }
1333:
1.20 thorpej 1334: /*
1.52 thorpej 1335: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1336: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1337: *
1.21 thorpej 1338: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1339: *
1.73 thorpej 1340: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1341: * with it locked.
1342: */
1343: static int
1.42 thorpej 1344: pool_catchup(struct pool *pp)
1.20 thorpej 1345: {
1.83 scw 1346: struct pool_item_header *ph = NULL;
1.20 thorpej 1347: caddr_t cp;
1348: int error = 0;
1349:
1.54 thorpej 1350: while (POOL_NEEDS_CATCHUP(pp)) {
1.20 thorpej 1351: /*
1.21 thorpej 1352: * Call the page back-end allocator for more memory.
1353: *
1354: * XXX: We never wait, so should we bother unlocking
1355: * the pool descriptor?
1.20 thorpej 1356: */
1.21 thorpej 1357: simple_unlock(&pp->pr_slock);
1.66 thorpej 1358: cp = pool_allocator_alloc(pp, PR_NOWAIT);
1.55 thorpej 1359: if (__predict_true(cp != NULL))
1360: ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1361: if (__predict_false(cp == NULL || ph == NULL)) {
1362: if (cp != NULL)
1.66 thorpej 1363: pool_allocator_free(pp, cp);
1.20 thorpej 1364: error = ENOMEM;
1.91 yamt 1365: simple_lock(&pp->pr_slock);
1.20 thorpej 1366: break;
1367: }
1.91 yamt 1368: simple_lock(&pp->pr_slock);
1.55 thorpej 1369: pool_prime_page(pp, cp, ph);
1.26 thorpej 1370: pp->pr_npagealloc++;
1.20 thorpej 1371: }
1372:
1373: return (error);
1374: }
1375:
1.88 chs 1376: static void
1377: pool_update_curpage(struct pool *pp)
1378: {
1379:
1380: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1381: if (pp->pr_curpage == NULL) {
1382: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1383: }
1384: }
1385:
1.3 pk 1386: void
1.42 thorpej 1387: pool_setlowat(struct pool *pp, int n)
1.3 pk 1388: {
1.15 pk 1389:
1.21 thorpej 1390: simple_lock(&pp->pr_slock);
1391:
1.3 pk 1392: pp->pr_minitems = n;
1.15 pk 1393: pp->pr_minpages = (n == 0)
1394: ? 0
1.18 thorpej 1395: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1396:
1397: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1398: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1399: /*
1400: * XXX: Should we log a warning? Should we set up a timeout
1401: * to try again in a second or so? The latter could break
1402: * a caller's assumptions about interrupt protection, etc.
1403: */
1404: }
1.21 thorpej 1405:
1406: simple_unlock(&pp->pr_slock);
1.3 pk 1407: }
1408:
1409: void
1.42 thorpej 1410: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1411: {
1.15 pk 1412:
1.21 thorpej 1413: simple_lock(&pp->pr_slock);
1414:
1.15 pk 1415: pp->pr_maxpages = (n == 0)
1416: ? 0
1.18 thorpej 1417: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1418:
1419: simple_unlock(&pp->pr_slock);
1.3 pk 1420: }
1421:
1.20 thorpej 1422: void
1.42 thorpej 1423: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1424: {
1425:
1.21 thorpej 1426: simple_lock(&pp->pr_slock);
1.20 thorpej 1427:
1428: pp->pr_hardlimit = n;
1429: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1430: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1431: pp->pr_hardlimit_warning_last.tv_sec = 0;
1432: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1433:
1434: /*
1.21 thorpej 1435: * In-line version of pool_sethiwat(), because we don't want to
1436: * release the lock.
1.20 thorpej 1437: */
1438: pp->pr_maxpages = (n == 0)
1439: ? 0
1440: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1441:
1442: simple_unlock(&pp->pr_slock);
1.20 thorpej 1443: }
1.3 pk 1444:
1445: /*
1446: * Release all complete pages that have not been used recently.
1447: */
1.66 thorpej 1448: int
1.59 thorpej 1449: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1450: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1451: #else
1452: pool_reclaim(struct pool *pp)
1453: #endif
1.3 pk 1454: {
1455: struct pool_item_header *ph, *phnext;
1.43 thorpej 1456: struct pool_cache *pc;
1.21 thorpej 1457: struct timeval curtime;
1.61 chs 1458: struct pool_pagelist pq;
1.88 chs 1459: struct timeval diff;
1.21 thorpej 1460: int s;
1.3 pk 1461:
1.68 thorpej 1462: if (pp->pr_drain_hook != NULL) {
1463: /*
1464: * The drain hook must be called with the pool unlocked.
1465: */
1466: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1467: }
1468:
1.21 thorpej 1469: if (simple_lock_try(&pp->pr_slock) == 0)
1.66 thorpej 1470: return (0);
1.25 thorpej 1471: pr_enter(pp, file, line);
1.68 thorpej 1472:
1.88 chs 1473: LIST_INIT(&pq);
1.3 pk 1474:
1.43 thorpej 1475: /*
1476: * Reclaim items from the pool's caches.
1477: */
1.61 chs 1478: TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1.99.8.1 tron 1479: pool_cache_reclaim(pc, &pq);
1.43 thorpej 1480:
1.21 thorpej 1481: s = splclock();
1482: curtime = mono_time;
1483: splx(s);
1484:
1.88 chs 1485: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1486: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1487:
1488: /* Check our minimum page claim */
1489: if (pp->pr_npages <= pp->pr_minpages)
1490: break;
1491:
1.88 chs 1492: KASSERT(ph->ph_nmissing == 0);
1493: timersub(&curtime, &ph->ph_time, &diff);
1494: if (diff.tv_sec < pool_inactive_time)
1495: continue;
1.21 thorpej 1496:
1.88 chs 1497: /*
1498: * If freeing this page would put us below
1499: * the low water mark, stop now.
1500: */
1501: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1502: pp->pr_minitems)
1503: break;
1.21 thorpej 1504:
1.88 chs 1505: pr_rmpage(pp, ph, &pq);
1.3 pk 1506: }
1507:
1.25 thorpej 1508: pr_leave(pp);
1.21 thorpej 1509: simple_unlock(&pp->pr_slock);
1.88 chs 1510: if (LIST_EMPTY(&pq))
1.66 thorpej 1511: return (0);
1512:
1.99.8.1 tron 1513: pr_pagelist_free(pp, &pq);
1.66 thorpej 1514: return (1);
1.3 pk 1515: }
1516:
1517: /*
1518: * Drain pools, one at a time.
1.21 thorpej 1519: *
1520: * Note, we must never be called from an interrupt context.
1.3 pk 1521: */
1522: void
1.42 thorpej 1523: pool_drain(void *arg)
1.3 pk 1524: {
1525: struct pool *pp;
1.23 thorpej 1526: int s;
1.3 pk 1527:
1.61 chs 1528: pp = NULL;
1.49 thorpej 1529: s = splvm();
1.23 thorpej 1530: simple_lock(&pool_head_slock);
1.61 chs 1531: if (drainpp == NULL) {
1532: drainpp = TAILQ_FIRST(&pool_head);
1533: }
1534: if (drainpp) {
1535: pp = drainpp;
1536: drainpp = TAILQ_NEXT(pp, pr_poollist);
1537: }
1538: simple_unlock(&pool_head_slock);
1.63 chs 1539: pool_reclaim(pp);
1.61 chs 1540: splx(s);
1.3 pk 1541: }
1542:
1543: /*
1544: * Diagnostic helpers.
1545: */
1546: void
1.42 thorpej 1547: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1548: {
1549: int s;
1550:
1.49 thorpej 1551: s = splvm();
1.25 thorpej 1552: if (simple_lock_try(&pp->pr_slock) == 0) {
1553: printf("pool %s is locked; try again later\n",
1554: pp->pr_wchan);
1555: splx(s);
1556: return;
1557: }
1558: pool_print1(pp, modif, printf);
1.21 thorpej 1559: simple_unlock(&pp->pr_slock);
1560: splx(s);
1561: }
1562:
1.25 thorpej 1563: void
1.42 thorpej 1564: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1565: {
1566: int didlock = 0;
1567:
1568: if (pp == NULL) {
1569: (*pr)("Must specify a pool to print.\n");
1570: return;
1571: }
1572:
1573: /*
1574: * Called from DDB; interrupts should be blocked, and all
1575: * other processors should be paused. We can skip locking
1576: * the pool in this case.
1577: *
1578: * We do a simple_lock_try() just to print the lock
1579: * status, however.
1580: */
1581:
1582: if (simple_lock_try(&pp->pr_slock) == 0)
1583: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1584: else
1585: didlock = 1;
1586:
1587: pool_print1(pp, modif, pr);
1588:
1589: if (didlock)
1590: simple_unlock(&pp->pr_slock);
1591: }
1592:
1.21 thorpej 1593: static void
1.97 yamt 1594: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1595: void (*pr)(const char *, ...))
1.88 chs 1596: {
1597: struct pool_item_header *ph;
1598: #ifdef DIAGNOSTIC
1599: struct pool_item *pi;
1600: #endif
1601:
1602: LIST_FOREACH(ph, pl, ph_pagelist) {
1603: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1604: ph->ph_page, ph->ph_nmissing,
1605: (u_long)ph->ph_time.tv_sec,
1606: (u_long)ph->ph_time.tv_usec);
1607: #ifdef DIAGNOSTIC
1.97 yamt 1608: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1609: TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1610: if (pi->pi_magic != PI_MAGIC) {
1611: (*pr)("\t\t\titem %p, magic 0x%x\n",
1612: pi, pi->pi_magic);
1613: }
1.88 chs 1614: }
1615: }
1616: #endif
1617: }
1618: }
1619:
1620: static void
1.42 thorpej 1621: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1622: {
1.25 thorpej 1623: struct pool_item_header *ph;
1.44 thorpej 1624: struct pool_cache *pc;
1625: struct pool_cache_group *pcg;
1626: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1627: char c;
1628:
1629: while ((c = *modif++) != '\0') {
1630: if (c == 'l')
1631: print_log = 1;
1632: if (c == 'p')
1633: print_pagelist = 1;
1.44 thorpej 1634: if (c == 'c')
1635: print_cache = 1;
1.25 thorpej 1636: }
1637:
1638: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1639: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1640: pp->pr_roflags);
1.66 thorpej 1641: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1642: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1643: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1644: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1645: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1646:
1647: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1648: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1649: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1650: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1651:
1652: if (print_pagelist == 0)
1653: goto skip_pagelist;
1654:
1.88 chs 1655: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1656: (*pr)("\n\tempty page list:\n");
1.97 yamt 1657: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1658: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1659: (*pr)("\n\tfull page list:\n");
1.97 yamt 1660: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1661: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1662: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1663: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1664:
1.25 thorpej 1665: if (pp->pr_curpage == NULL)
1666: (*pr)("\tno current page\n");
1667: else
1668: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1669:
1670: skip_pagelist:
1671: if (print_log == 0)
1672: goto skip_log;
1673:
1674: (*pr)("\n");
1675: if ((pp->pr_roflags & PR_LOGGING) == 0)
1676: (*pr)("\tno log\n");
1677: else
1678: pr_printlog(pp, NULL, pr);
1.3 pk 1679:
1.25 thorpej 1680: skip_log:
1.44 thorpej 1681: if (print_cache == 0)
1682: goto skip_cache;
1683:
1.61 chs 1684: TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1.44 thorpej 1685: (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1686: pc->pc_allocfrom, pc->pc_freeto);
1.48 thorpej 1687: (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1688: pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1.61 chs 1689: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.44 thorpej 1690: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1.87 thorpej 1691: for (i = 0; i < PCG_NOBJECTS; i++) {
1692: if (pcg->pcg_objects[i].pcgo_pa !=
1693: POOL_PADDR_INVALID) {
1694: (*pr)("\t\t\t%p, 0x%llx\n",
1695: pcg->pcg_objects[i].pcgo_va,
1696: (unsigned long long)
1697: pcg->pcg_objects[i].pcgo_pa);
1698: } else {
1699: (*pr)("\t\t\t%p\n",
1700: pcg->pcg_objects[i].pcgo_va);
1701: }
1702: }
1.44 thorpej 1703: }
1704: }
1705:
1706: skip_cache:
1.88 chs 1707: pr_enter_check(pp, pr);
1708: }
1709:
1710: static int
1711: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1712: {
1713: struct pool_item *pi;
1714: caddr_t page;
1715: int n;
1716:
1717: page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1718: if (page != ph->ph_page &&
1719: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1720: if (label != NULL)
1721: printf("%s: ", label);
1722: printf("pool(%p:%s): page inconsistency: page %p;"
1723: " at page head addr %p (p %p)\n", pp,
1724: pp->pr_wchan, ph->ph_page,
1725: ph, page);
1726: return 1;
1727: }
1.3 pk 1728:
1.97 yamt 1729: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1730: return 0;
1731:
1.88 chs 1732: for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1733: pi != NULL;
1734: pi = TAILQ_NEXT(pi,pi_list), n++) {
1735:
1736: #ifdef DIAGNOSTIC
1737: if (pi->pi_magic != PI_MAGIC) {
1738: if (label != NULL)
1739: printf("%s: ", label);
1740: printf("pool(%s): free list modified: magic=%x;"
1741: " page %p; item ordinal %d;"
1742: " addr %p (p %p)\n",
1743: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1744: n, pi, page);
1745: panic("pool");
1746: }
1747: #endif
1748: page =
1749: (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1750: if (page == ph->ph_page)
1751: continue;
1752:
1753: if (label != NULL)
1754: printf("%s: ", label);
1755: printf("pool(%p:%s): page inconsistency: page %p;"
1756: " item ordinal %d; addr %p (p %p)\n", pp,
1757: pp->pr_wchan, ph->ph_page,
1758: n, pi, page);
1759: return 1;
1760: }
1761: return 0;
1.3 pk 1762: }
1763:
1.88 chs 1764:
1.3 pk 1765: int
1.42 thorpej 1766: pool_chk(struct pool *pp, const char *label)
1.3 pk 1767: {
1768: struct pool_item_header *ph;
1769: int r = 0;
1770:
1.21 thorpej 1771: simple_lock(&pp->pr_slock);
1.88 chs 1772: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1773: r = pool_chk_page(pp, label, ph);
1774: if (r) {
1775: goto out;
1776: }
1777: }
1778: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1779: r = pool_chk_page(pp, label, ph);
1780: if (r) {
1.3 pk 1781: goto out;
1782: }
1.88 chs 1783: }
1784: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1785: r = pool_chk_page(pp, label, ph);
1786: if (r) {
1.3 pk 1787: goto out;
1788: }
1789: }
1.88 chs 1790:
1.3 pk 1791: out:
1.21 thorpej 1792: simple_unlock(&pp->pr_slock);
1.3 pk 1793: return (r);
1.43 thorpej 1794: }
1795:
1796: /*
1797: * pool_cache_init:
1798: *
1799: * Initialize a pool cache.
1800: *
1801: * NOTE: If the pool must be protected from interrupts, we expect
1802: * to be called at the appropriate interrupt priority level.
1803: */
1804: void
1805: pool_cache_init(struct pool_cache *pc, struct pool *pp,
1806: int (*ctor)(void *, void *, int),
1807: void (*dtor)(void *, void *),
1808: void *arg)
1809: {
1810:
1811: TAILQ_INIT(&pc->pc_grouplist);
1812: simple_lock_init(&pc->pc_slock);
1813:
1814: pc->pc_allocfrom = NULL;
1815: pc->pc_freeto = NULL;
1816: pc->pc_pool = pp;
1817:
1818: pc->pc_ctor = ctor;
1819: pc->pc_dtor = dtor;
1820: pc->pc_arg = arg;
1821:
1.48 thorpej 1822: pc->pc_hits = 0;
1823: pc->pc_misses = 0;
1824:
1825: pc->pc_ngroups = 0;
1826:
1827: pc->pc_nitems = 0;
1828:
1.43 thorpej 1829: simple_lock(&pp->pr_slock);
1830: TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1831: simple_unlock(&pp->pr_slock);
1832: }
1833:
1834: /*
1835: * pool_cache_destroy:
1836: *
1837: * Destroy a pool cache.
1838: */
1839: void
1840: pool_cache_destroy(struct pool_cache *pc)
1841: {
1842: struct pool *pp = pc->pc_pool;
1843:
1844: /* First, invalidate the entire cache. */
1845: pool_cache_invalidate(pc);
1846:
1847: /* ...and remove it from the pool's cache list. */
1848: simple_lock(&pp->pr_slock);
1849: TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1850: simple_unlock(&pp->pr_slock);
1851: }
1852:
1853: static __inline void *
1.87 thorpej 1854: pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1.43 thorpej 1855: {
1856: void *object;
1857: u_int idx;
1858:
1859: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45 thorpej 1860: KASSERT(pcg->pcg_avail != 0);
1.43 thorpej 1861: idx = --pcg->pcg_avail;
1862:
1.87 thorpej 1863: KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1864: object = pcg->pcg_objects[idx].pcgo_va;
1865: if (pap != NULL)
1866: *pap = pcg->pcg_objects[idx].pcgo_pa;
1867: pcg->pcg_objects[idx].pcgo_va = NULL;
1.43 thorpej 1868:
1869: return (object);
1870: }
1871:
1872: static __inline void
1.87 thorpej 1873: pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1.43 thorpej 1874: {
1875: u_int idx;
1876:
1877: KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1878: idx = pcg->pcg_avail++;
1879:
1.87 thorpej 1880: KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1881: pcg->pcg_objects[idx].pcgo_va = object;
1882: pcg->pcg_objects[idx].pcgo_pa = pa;
1.43 thorpej 1883: }
1884:
1885: /*
1.87 thorpej 1886: * pool_cache_get{,_paddr}:
1.43 thorpej 1887: *
1.87 thorpej 1888: * Get an object from a pool cache (optionally returning
1889: * the physical address of the object).
1.43 thorpej 1890: */
1891: void *
1.87 thorpej 1892: pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1.43 thorpej 1893: {
1894: struct pool_cache_group *pcg;
1895: void *object;
1.58 thorpej 1896:
1897: #ifdef LOCKDEBUG
1898: if (flags & PR_WAITOK)
1899: simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1900: #endif
1.43 thorpej 1901:
1902: simple_lock(&pc->pc_slock);
1903:
1904: if ((pcg = pc->pc_allocfrom) == NULL) {
1.61 chs 1905: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43 thorpej 1906: if (pcg->pcg_avail != 0) {
1907: pc->pc_allocfrom = pcg;
1908: goto have_group;
1909: }
1910: }
1911:
1912: /*
1913: * No groups with any available objects. Allocate
1914: * a new object, construct it, and return it to
1915: * the caller. We will allocate a group, if necessary,
1916: * when the object is freed back to the cache.
1917: */
1.48 thorpej 1918: pc->pc_misses++;
1.43 thorpej 1919: simple_unlock(&pc->pc_slock);
1920: object = pool_get(pc->pc_pool, flags);
1921: if (object != NULL && pc->pc_ctor != NULL) {
1922: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1923: pool_put(pc->pc_pool, object);
1924: return (NULL);
1925: }
1926: }
1.87 thorpej 1927: if (object != NULL && pap != NULL) {
1928: #ifdef POOL_VTOPHYS
1929: *pap = POOL_VTOPHYS(object);
1930: #else
1931: *pap = POOL_PADDR_INVALID;
1932: #endif
1933: }
1.43 thorpej 1934: return (object);
1935: }
1936:
1937: have_group:
1.48 thorpej 1938: pc->pc_hits++;
1939: pc->pc_nitems--;
1.87 thorpej 1940: object = pcg_get(pcg, pap);
1.43 thorpej 1941:
1942: if (pcg->pcg_avail == 0)
1943: pc->pc_allocfrom = NULL;
1.45 thorpej 1944:
1.43 thorpej 1945: simple_unlock(&pc->pc_slock);
1946:
1947: return (object);
1948: }
1949:
1950: /*
1.87 thorpej 1951: * pool_cache_put{,_paddr}:
1.43 thorpej 1952: *
1.87 thorpej 1953: * Put an object back to the pool cache (optionally caching the
1954: * physical address of the object).
1.43 thorpej 1955: */
1956: void
1.87 thorpej 1957: pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1.43 thorpej 1958: {
1959: struct pool_cache_group *pcg;
1.60 thorpej 1960: int s;
1.43 thorpej 1961:
1962: simple_lock(&pc->pc_slock);
1963:
1964: if ((pcg = pc->pc_freeto) == NULL) {
1.61 chs 1965: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43 thorpej 1966: if (pcg->pcg_avail != PCG_NOBJECTS) {
1967: pc->pc_freeto = pcg;
1968: goto have_group;
1969: }
1970: }
1971:
1972: /*
1973: * No empty groups to free the object to. Attempt to
1.47 thorpej 1974: * allocate one.
1.43 thorpej 1975: */
1.47 thorpej 1976: simple_unlock(&pc->pc_slock);
1.60 thorpej 1977: s = splvm();
1.43 thorpej 1978: pcg = pool_get(&pcgpool, PR_NOWAIT);
1.60 thorpej 1979: splx(s);
1.43 thorpej 1980: if (pcg != NULL) {
1981: memset(pcg, 0, sizeof(*pcg));
1.47 thorpej 1982: simple_lock(&pc->pc_slock);
1.48 thorpej 1983: pc->pc_ngroups++;
1.43 thorpej 1984: TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1.47 thorpej 1985: if (pc->pc_freeto == NULL)
1986: pc->pc_freeto = pcg;
1.43 thorpej 1987: goto have_group;
1988: }
1989:
1990: /*
1991: * Unable to allocate a cache group; destruct the object
1992: * and free it back to the pool.
1993: */
1.51 thorpej 1994: pool_cache_destruct_object(pc, object);
1.43 thorpej 1995: return;
1996: }
1997:
1998: have_group:
1.48 thorpej 1999: pc->pc_nitems++;
1.87 thorpej 2000: pcg_put(pcg, object, pa);
1.43 thorpej 2001:
2002: if (pcg->pcg_avail == PCG_NOBJECTS)
2003: pc->pc_freeto = NULL;
2004:
2005: simple_unlock(&pc->pc_slock);
1.51 thorpej 2006: }
2007:
2008: /*
2009: * pool_cache_destruct_object:
2010: *
2011: * Force destruction of an object and its release back into
2012: * the pool.
2013: */
2014: void
2015: pool_cache_destruct_object(struct pool_cache *pc, void *object)
2016: {
2017:
2018: if (pc->pc_dtor != NULL)
2019: (*pc->pc_dtor)(pc->pc_arg, object);
2020: pool_put(pc->pc_pool, object);
1.43 thorpej 2021: }
2022:
2023: /*
1.99.8.1 tron 2024: * pool_cache_invalidate:
1.43 thorpej 2025: *
1.99.8.1 tron 2026: * Invalidate a pool cache (destruct and release all of the
2027: * cached objects).
1.43 thorpej 2028: */
1.99.8.1 tron 2029: void
2030: pool_cache_invalidate(struct pool_cache *pc)
1.43 thorpej 2031: {
1.99.8.1 tron 2032: struct pool_pagelist pq;
1.43 thorpej 2033: struct pool_cache_group *pcg, *npcg;
2034: void *object;
1.99.8.1 tron 2035:
2036: LIST_INIT(&pq);
2037:
2038: simple_lock(&pc->pc_slock);
2039: simple_lock(&pc->pc_pool->pr_slock);
1.43 thorpej 2040:
2041: for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
2042: pcg = npcg) {
2043: npcg = TAILQ_NEXT(pcg, pcg_list);
2044: while (pcg->pcg_avail != 0) {
1.48 thorpej 2045: pc->pc_nitems--;
1.87 thorpej 2046: object = pcg_get(pcg, NULL);
1.45 thorpej 2047: if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
2048: pc->pc_allocfrom = NULL;
1.43 thorpej 2049: if (pc->pc_dtor != NULL)
2050: (*pc->pc_dtor)(pc->pc_arg, object);
1.99.8.1 tron 2051: pool_do_put(pc->pc_pool, object, &pq);
1.43 thorpej 2052: }
2053: }
2054:
1.99.8.1 tron 2055: simple_unlock(&pc->pc_pool->pr_slock);
1.43 thorpej 2056: simple_unlock(&pc->pc_slock);
1.99.8.1 tron 2057:
2058: if (! LIST_EMPTY(&pq))
2059: pr_pagelist_free(pc->pc_pool, &pq);
1.43 thorpej 2060: }
2061:
2062: /*
2063: * pool_cache_reclaim:
2064: *
2065: * Reclaim a pool cache for pool_reclaim().
2066: */
2067: static void
1.99.8.1 tron 2068: pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq)
1.43 thorpej 2069: {
1.99.8.1 tron 2070: struct pool_cache_group *pcg, *npcg;
2071: void *object;
2072: int s;
2073:
2074: /*
2075: * We're locking in the wrong order (normally pool_cache -> pool,
2076: * but the pool is already locked when we get here), so we have
2077: * to use trylock. If we can't lock the pool_cache, it's not really
2078: * a big deal here.
2079: */
2080: if (simple_lock_try(&pc->pc_slock) == 0)
2081: return;
2082:
2083: for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
2084: pcg = npcg) {
2085: npcg = TAILQ_NEXT(pcg, pcg_list);
2086: while (pcg->pcg_avail != 0) {
2087: pc->pc_nitems--;
2088: object = pcg_get(pcg, NULL);
2089: if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
2090: pc->pc_allocfrom = NULL;
2091: if (pc->pc_dtor != NULL)
2092: (*pc->pc_dtor)(pc->pc_arg, object);
2093: pool_do_put(pc->pc_pool, object, pq);
2094: }
2095: pc->pc_ngroups--;
2096: TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
2097: if (pc->pc_freeto == pcg)
2098: pc->pc_freeto = NULL;
2099: s = splvm();
2100: pool_put(&pcgpool, pcg);
2101: splx(s);
2102: }
1.43 thorpej 2103:
2104: simple_unlock(&pc->pc_slock);
1.3 pk 2105: }
1.66 thorpej 2106:
2107: /*
2108: * Pool backend allocators.
2109: *
2110: * Each pool has a backend allocator that handles allocation, deallocation,
2111: * and any additional draining that might be needed.
2112: *
2113: * We provide two standard allocators:
2114: *
2115: * pool_allocator_kmem - the default when no allocator is specified
2116: *
2117: * pool_allocator_nointr - used for pools that will not be accessed
2118: * in interrupt context.
2119: */
2120: void *pool_page_alloc(struct pool *, int);
2121: void pool_page_free(struct pool *, void *);
2122:
1.99.8.1.2.1! tron 2123: #ifdef POOL_SUBPAGE
! 2124: struct pool_allocator pool_allocator_kmem_fullpage = {
! 2125: pool_page_alloc, pool_page_free, 0,
! 2126: };
! 2127: #else
1.66 thorpej 2128: struct pool_allocator pool_allocator_kmem = {
2129: pool_page_alloc, pool_page_free, 0,
2130: };
1.99.8.1.2.1! tron 2131: #endif
1.66 thorpej 2132:
2133: void *pool_page_alloc_nointr(struct pool *, int);
2134: void pool_page_free_nointr(struct pool *, void *);
2135:
1.99.8.1.2.1! tron 2136: #ifdef POOL_SUBPAGE
! 2137: struct pool_allocator pool_allocator_nointr_fullpage = {
! 2138: pool_page_alloc_nointr, pool_page_free_nointr, 0,
! 2139: };
! 2140: #else
1.66 thorpej 2141: struct pool_allocator pool_allocator_nointr = {
2142: pool_page_alloc_nointr, pool_page_free_nointr, 0,
2143: };
1.99.8.1.2.1! tron 2144: #endif
1.66 thorpej 2145:
2146: #ifdef POOL_SUBPAGE
2147: void *pool_subpage_alloc(struct pool *, int);
2148: void pool_subpage_free(struct pool *, void *);
2149:
1.99.8.1.2.1! tron 2150: struct pool_allocator pool_allocator_kmem = {
! 2151: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
! 2152: };
! 2153:
! 2154: void *pool_subpage_alloc_nointr(struct pool *, int);
! 2155: void pool_subpage_free_nointr(struct pool *, void *);
! 2156:
! 2157: struct pool_allocator pool_allocator_nointr = {
! 2158: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.66 thorpej 2159: };
2160: #endif /* POOL_SUBPAGE */
2161:
2162: /*
2163: * We have at least three different resources for the same allocation and
2164: * each resource can be depleted. First, we have the ready elements in the
2165: * pool. Then we have the resource (typically a vm_map) for this allocator.
2166: * Finally, we have physical memory. Waiting for any of these can be
2167: * unnecessary when any other is freed, but the kernel doesn't support
2168: * sleeping on multiple wait channels, so we have to employ another strategy.
2169: *
2170: * The caller sleeps on the pool (so that it can be awakened when an item
2171: * is returned to the pool), but we set PA_WANT on the allocator. When a
2172: * page is returned to the allocator and PA_WANT is set, pool_allocator_free
2173: * will wake up all sleeping pools belonging to this allocator.
2174: *
2175: * XXX Thundering herd.
2176: */
2177: void *
2178: pool_allocator_alloc(struct pool *org, int flags)
2179: {
2180: struct pool_allocator *pa = org->pr_alloc;
2181: struct pool *pp, *start;
2182: int s, freed;
2183: void *res;
2184:
1.91 yamt 2185: LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
2186:
1.66 thorpej 2187: do {
2188: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2189: return (res);
1.68 thorpej 2190: if ((flags & PR_WAITOK) == 0) {
2191: /*
2192: * We only run the drain hookhere if PR_NOWAIT.
2193: * In other cases, the hook will be run in
2194: * pool_reclaim().
2195: */
2196: if (org->pr_drain_hook != NULL) {
2197: (*org->pr_drain_hook)(org->pr_drain_hook_arg,
2198: flags);
2199: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2200: return (res);
2201: }
1.66 thorpej 2202: break;
1.68 thorpej 2203: }
1.66 thorpej 2204:
2205: /*
2206: * Drain all pools, except "org", that use this
2207: * allocator. We do this to reclaim VA space.
2208: * pa_alloc is responsible for waiting for
2209: * physical memory.
2210: *
2211: * XXX We risk looping forever if start if someone
2212: * calls pool_destroy on "start". But there is no
2213: * other way to have potentially sleeping pool_reclaim,
2214: * non-sleeping locks on pool_allocator, and some
2215: * stirring of drained pools in the allocator.
1.68 thorpej 2216: *
2217: * XXX Maybe we should use pool_head_slock for locking
2218: * the allocators?
1.66 thorpej 2219: */
2220: freed = 0;
2221:
2222: s = splvm();
2223: simple_lock(&pa->pa_slock);
2224: pp = start = TAILQ_FIRST(&pa->pa_list);
2225: do {
2226: TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
2227: TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
2228: if (pp == org)
2229: continue;
1.73 thorpej 2230: simple_unlock(&pa->pa_slock);
1.66 thorpej 2231: freed = pool_reclaim(pp);
1.73 thorpej 2232: simple_lock(&pa->pa_slock);
1.66 thorpej 2233: } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
2234: freed == 0);
2235:
2236: if (freed == 0) {
2237: /*
2238: * We set PA_WANT here, the caller will most likely
2239: * sleep waiting for pages (if not, this won't hurt
2240: * that much), and there is no way to set this in
2241: * the caller without violating locking order.
2242: */
2243: pa->pa_flags |= PA_WANT;
2244: }
2245: simple_unlock(&pa->pa_slock);
2246: splx(s);
2247: } while (freed);
2248: return (NULL);
2249: }
2250:
2251: void
2252: pool_allocator_free(struct pool *pp, void *v)
2253: {
2254: struct pool_allocator *pa = pp->pr_alloc;
2255: int s;
2256:
1.91 yamt 2257: LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
2258:
1.66 thorpej 2259: (*pa->pa_free)(pp, v);
2260:
2261: s = splvm();
2262: simple_lock(&pa->pa_slock);
2263: if ((pa->pa_flags & PA_WANT) == 0) {
2264: simple_unlock(&pa->pa_slock);
2265: splx(s);
2266: return;
2267: }
2268:
2269: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2270: simple_lock(&pp->pr_slock);
2271: if ((pp->pr_flags & PR_WANTED) != 0) {
2272: pp->pr_flags &= ~PR_WANTED;
2273: wakeup(pp);
2274: }
1.69 thorpej 2275: simple_unlock(&pp->pr_slock);
1.66 thorpej 2276: }
2277: pa->pa_flags &= ~PA_WANT;
2278: simple_unlock(&pa->pa_slock);
2279: splx(s);
2280: }
2281:
2282: void *
2283: pool_page_alloc(struct pool *pp, int flags)
2284: {
2285: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2286:
1.98 yamt 2287: return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, NULL, waitok));
1.66 thorpej 2288: }
2289:
2290: void
2291: pool_page_free(struct pool *pp, void *v)
2292: {
2293:
1.98 yamt 2294: uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2295: }
2296:
2297: static void *
2298: pool_page_alloc_meta(struct pool *pp, int flags)
2299: {
2300: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2301:
2302: return ((void *) uvm_km_alloc_poolpage1(kmem_map, NULL, waitok));
2303: }
2304:
2305: static void
2306: pool_page_free_meta(struct pool *pp, void *v)
2307: {
2308:
2309: uvm_km_free_poolpage1(kmem_map, (vaddr_t) v);
1.66 thorpej 2310: }
2311:
2312: #ifdef POOL_SUBPAGE
2313: /* Sub-page allocator, for machines with large hardware pages. */
2314: void *
2315: pool_subpage_alloc(struct pool *pp, int flags)
2316: {
1.93 dbj 2317: void *v;
2318: int s;
2319: s = splvm();
2320: v = pool_get(&psppool, flags);
2321: splx(s);
2322: return v;
1.66 thorpej 2323: }
2324:
2325: void
2326: pool_subpage_free(struct pool *pp, void *v)
2327: {
1.93 dbj 2328: int s;
2329: s = splvm();
1.66 thorpej 2330: pool_put(&psppool, v);
1.93 dbj 2331: splx(s);
1.66 thorpej 2332: }
2333:
2334: /* We don't provide a real nointr allocator. Maybe later. */
2335: void *
1.99.8.1.2.1! tron 2336: pool_subpage_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2337: {
2338:
2339: return (pool_subpage_alloc(pp, flags));
2340: }
2341:
2342: void
1.99.8.1.2.1! tron 2343: pool_subpage_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2344: {
2345:
2346: pool_subpage_free(pp, v);
2347: }
1.99.8.1.2.1! tron 2348: #endif /* POOL_SUBPAGE */
1.66 thorpej 2349: void *
2350: pool_page_alloc_nointr(struct pool *pp, int flags)
2351: {
2352: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2353:
1.98 yamt 2354: return ((void *) uvm_km_alloc_poolpage_cache(kernel_map,
1.66 thorpej 2355: uvm.kernel_object, waitok));
2356: }
2357:
2358: void
2359: pool_page_free_nointr(struct pool *pp, void *v)
2360: {
2361:
1.98 yamt 2362: uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
1.66 thorpej 2363: }
CVSweb <webmaster@jp.NetBSD.org>